hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
1d201e131abf7f7e716f04138ccc9dabb4509eb4
| 7,687 |
#[doc = "Writer for register SWR"]
pub type W = crate::W<u32, super::SWR>;
#[doc = "Register SWR `reset()`'s with value 0"]
impl crate::ResetValue for super::SWR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Write proxy for field `RPM`"]
pub struct RPM_W<'a> {
w: &'a mut W,
}
impl<'a> RPM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Write proxy for field `ROM`"]
pub struct ROM_W<'a> {
w: &'a mut W,
}
impl<'a> ROM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Write proxy for field `RCM1U`"]
pub struct RCM1U_W<'a> {
w: &'a mut W,
}
impl<'a> RCM1U_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Write proxy for field `RCM1D`"]
pub struct RCM1D_W<'a> {
w: &'a mut W,
}
impl<'a> RCM1D_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Write proxy for field `RCM2U`"]
pub struct RCM2U_W<'a> {
w: &'a mut W,
}
impl<'a> RCM2U_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Write proxy for field `RCM2D`"]
pub struct RCM2D_W<'a> {
w: &'a mut W,
}
impl<'a> RCM2D_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Write proxy for field `RE0A`"]
pub struct RE0A_W<'a> {
w: &'a mut W,
}
impl<'a> RE0A_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Write proxy for field `RE1A`"]
pub struct RE1A_W<'a> {
w: &'a mut W,
}
impl<'a> RE1A_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Write proxy for field `RE2A`"]
pub struct RE2A_W<'a> {
w: &'a mut W,
}
impl<'a> RE2A_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Write proxy for field `RTRPF`"]
pub struct RTRPF_W<'a> {
w: &'a mut W,
}
impl<'a> RTRPF_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
impl W {
#[doc = "Bit 0 - Period match while counting up clear"]
#[inline(always)]
pub fn rpm(&mut self) -> RPM_W {
RPM_W { w: self }
}
#[doc = "Bit 1 - One match while counting down clear"]
#[inline(always)]
pub fn rom(&mut self) -> ROM_W {
ROM_W { w: self }
}
#[doc = "Bit 2 - Channel 1 Compare match while counting up clear"]
#[inline(always)]
pub fn rcm1u(&mut self) -> RCM1U_W {
RCM1U_W { w: self }
}
#[doc = "Bit 3 - Channel 1 Compare match while counting down clear"]
#[inline(always)]
pub fn rcm1d(&mut self) -> RCM1D_W {
RCM1D_W { w: self }
}
#[doc = "Bit 4 - Channel 2 Compare match while counting up clear"]
#[inline(always)]
pub fn rcm2u(&mut self) -> RCM2U_W {
RCM2U_W { w: self }
}
#[doc = "Bit 5 - Channel 2 Compare match while counting down clear"]
#[inline(always)]
pub fn rcm2d(&mut self) -> RCM2D_W {
RCM2D_W { w: self }
}
#[doc = "Bit 8 - Event 0 detection clear"]
#[inline(always)]
pub fn re0a(&mut self) -> RE0A_W {
RE0A_W { w: self }
}
#[doc = "Bit 9 - Event 1 detection clear"]
#[inline(always)]
pub fn re1a(&mut self) -> RE1A_W {
RE1A_W { w: self }
}
#[doc = "Bit 10 - Event 2 detection clear"]
#[inline(always)]
pub fn re2a(&mut self) -> RE2A_W {
RE2A_W { w: self }
}
#[doc = "Bit 11 - Trap Flag status clear"]
#[inline(always)]
pub fn rtrpf(&mut self) -> RTRPF_W {
RTRPF_W { w: self }
}
}
| 27.162544 | 86 | 0.518278 |
f91a9065484f9911d3ef9f69378759fe38536db3
| 163,413 |
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UpdateVocabularyFilterOutput {
/// <p>The name of the updated vocabulary filter.</p>
pub vocabulary_filter_name: std::option::Option<std::string::String>,
/// <p>The language code of the words in the vocabulary filter.</p>
pub language_code: std::option::Option<crate::model::LanguageCode>,
/// <p>The date and time that the vocabulary filter was updated.</p>
pub last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl UpdateVocabularyFilterOutput {
/// <p>The name of the updated vocabulary filter.</p>
pub fn vocabulary_filter_name(&self) -> std::option::Option<&str> {
self.vocabulary_filter_name.as_deref()
}
/// <p>The language code of the words in the vocabulary filter.</p>
pub fn language_code(&self) -> std::option::Option<&crate::model::LanguageCode> {
self.language_code.as_ref()
}
/// <p>The date and time that the vocabulary filter was updated.</p>
pub fn last_modified_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.last_modified_time.as_ref()
}
}
impl std::fmt::Debug for UpdateVocabularyFilterOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UpdateVocabularyFilterOutput");
formatter.field("vocabulary_filter_name", &self.vocabulary_filter_name);
formatter.field("language_code", &self.language_code);
formatter.field("last_modified_time", &self.last_modified_time);
formatter.finish()
}
}
/// See [`UpdateVocabularyFilterOutput`](crate::output::UpdateVocabularyFilterOutput)
pub mod update_vocabulary_filter_output {
/// A builder for [`UpdateVocabularyFilterOutput`](crate::output::UpdateVocabularyFilterOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) vocabulary_filter_name: std::option::Option<std::string::String>,
pub(crate) language_code: std::option::Option<crate::model::LanguageCode>,
pub(crate) last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The name of the updated vocabulary filter.</p>
pub fn vocabulary_filter_name(mut self, input: impl Into<std::string::String>) -> Self {
self.vocabulary_filter_name = Some(input.into());
self
}
/// <p>The name of the updated vocabulary filter.</p>
pub fn set_vocabulary_filter_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.vocabulary_filter_name = input;
self
}
/// <p>The language code of the words in the vocabulary filter.</p>
pub fn language_code(mut self, input: crate::model::LanguageCode) -> Self {
self.language_code = Some(input);
self
}
/// <p>The language code of the words in the vocabulary filter.</p>
pub fn set_language_code(
mut self,
input: std::option::Option<crate::model::LanguageCode>,
) -> Self {
self.language_code = input;
self
}
/// <p>The date and time that the vocabulary filter was updated.</p>
pub fn last_modified_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.last_modified_time = Some(input);
self
}
/// <p>The date and time that the vocabulary filter was updated.</p>
pub fn set_last_modified_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.last_modified_time = input;
self
}
/// Consumes the builder and constructs a [`UpdateVocabularyFilterOutput`](crate::output::UpdateVocabularyFilterOutput)
pub fn build(self) -> crate::output::UpdateVocabularyFilterOutput {
crate::output::UpdateVocabularyFilterOutput {
vocabulary_filter_name: self.vocabulary_filter_name,
language_code: self.language_code,
last_modified_time: self.last_modified_time,
}
}
}
}
impl UpdateVocabularyFilterOutput {
/// Creates a new builder-style object to manufacture [`UpdateVocabularyFilterOutput`](crate::output::UpdateVocabularyFilterOutput)
pub fn builder() -> crate::output::update_vocabulary_filter_output::Builder {
crate::output::update_vocabulary_filter_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UpdateVocabularyOutput {
/// <p>The name of the vocabulary that was updated.</p>
pub vocabulary_name: std::option::Option<std::string::String>,
/// <p>The language code of the vocabulary entries.</p>
pub language_code: std::option::Option<crate::model::LanguageCode>,
/// <p>The date and time that the vocabulary was updated.</p>
pub last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The processing state of the vocabulary. When the <code>VocabularyState</code> field contains <code>READY</code> the vocabulary is ready to be used in a <code>StartTranscriptionJob</code> request.</p>
pub vocabulary_state: std::option::Option<crate::model::VocabularyState>,
}
impl UpdateVocabularyOutput {
/// <p>The name of the vocabulary that was updated.</p>
pub fn vocabulary_name(&self) -> std::option::Option<&str> {
self.vocabulary_name.as_deref()
}
/// <p>The language code of the vocabulary entries.</p>
pub fn language_code(&self) -> std::option::Option<&crate::model::LanguageCode> {
self.language_code.as_ref()
}
/// <p>The date and time that the vocabulary was updated.</p>
pub fn last_modified_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.last_modified_time.as_ref()
}
/// <p>The processing state of the vocabulary. When the <code>VocabularyState</code> field contains <code>READY</code> the vocabulary is ready to be used in a <code>StartTranscriptionJob</code> request.</p>
pub fn vocabulary_state(&self) -> std::option::Option<&crate::model::VocabularyState> {
self.vocabulary_state.as_ref()
}
}
impl std::fmt::Debug for UpdateVocabularyOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UpdateVocabularyOutput");
formatter.field("vocabulary_name", &self.vocabulary_name);
formatter.field("language_code", &self.language_code);
formatter.field("last_modified_time", &self.last_modified_time);
formatter.field("vocabulary_state", &self.vocabulary_state);
formatter.finish()
}
}
/// See [`UpdateVocabularyOutput`](crate::output::UpdateVocabularyOutput)
pub mod update_vocabulary_output {
/// A builder for [`UpdateVocabularyOutput`](crate::output::UpdateVocabularyOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) vocabulary_name: std::option::Option<std::string::String>,
pub(crate) language_code: std::option::Option<crate::model::LanguageCode>,
pub(crate) last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) vocabulary_state: std::option::Option<crate::model::VocabularyState>,
}
impl Builder {
/// <p>The name of the vocabulary that was updated.</p>
pub fn vocabulary_name(mut self, input: impl Into<std::string::String>) -> Self {
self.vocabulary_name = Some(input.into());
self
}
/// <p>The name of the vocabulary that was updated.</p>
pub fn set_vocabulary_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.vocabulary_name = input;
self
}
/// <p>The language code of the vocabulary entries.</p>
pub fn language_code(mut self, input: crate::model::LanguageCode) -> Self {
self.language_code = Some(input);
self
}
/// <p>The language code of the vocabulary entries.</p>
pub fn set_language_code(
mut self,
input: std::option::Option<crate::model::LanguageCode>,
) -> Self {
self.language_code = input;
self
}
/// <p>The date and time that the vocabulary was updated.</p>
pub fn last_modified_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.last_modified_time = Some(input);
self
}
/// <p>The date and time that the vocabulary was updated.</p>
pub fn set_last_modified_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.last_modified_time = input;
self
}
/// <p>The processing state of the vocabulary. When the <code>VocabularyState</code> field contains <code>READY</code> the vocabulary is ready to be used in a <code>StartTranscriptionJob</code> request.</p>
pub fn vocabulary_state(mut self, input: crate::model::VocabularyState) -> Self {
self.vocabulary_state = Some(input);
self
}
/// <p>The processing state of the vocabulary. When the <code>VocabularyState</code> field contains <code>READY</code> the vocabulary is ready to be used in a <code>StartTranscriptionJob</code> request.</p>
pub fn set_vocabulary_state(
mut self,
input: std::option::Option<crate::model::VocabularyState>,
) -> Self {
self.vocabulary_state = input;
self
}
/// Consumes the builder and constructs a [`UpdateVocabularyOutput`](crate::output::UpdateVocabularyOutput)
pub fn build(self) -> crate::output::UpdateVocabularyOutput {
crate::output::UpdateVocabularyOutput {
vocabulary_name: self.vocabulary_name,
language_code: self.language_code,
last_modified_time: self.last_modified_time,
vocabulary_state: self.vocabulary_state,
}
}
}
}
impl UpdateVocabularyOutput {
/// Creates a new builder-style object to manufacture [`UpdateVocabularyOutput`](crate::output::UpdateVocabularyOutput)
pub fn builder() -> crate::output::update_vocabulary_output::Builder {
crate::output::update_vocabulary_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UpdateMedicalVocabularyOutput {
/// <p>The name of the updated vocabulary.</p>
pub vocabulary_name: std::option::Option<std::string::String>,
/// <p>The language code for the language of the text file used to update the custom vocabulary. US English (en-US) is the only language supported in Amazon Transcribe Medical.</p>
pub language_code: std::option::Option<crate::model::LanguageCode>,
/// <p>The date and time that the vocabulary was updated.</p>
pub last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The processing state of the update to the vocabulary. When the <code>VocabularyState</code> field is <code>READY</code>, the vocabulary is ready to be used in a <code>StartMedicalTranscriptionJob</code> request.</p>
pub vocabulary_state: std::option::Option<crate::model::VocabularyState>,
}
impl UpdateMedicalVocabularyOutput {
/// <p>The name of the updated vocabulary.</p>
pub fn vocabulary_name(&self) -> std::option::Option<&str> {
self.vocabulary_name.as_deref()
}
/// <p>The language code for the language of the text file used to update the custom vocabulary. US English (en-US) is the only language supported in Amazon Transcribe Medical.</p>
pub fn language_code(&self) -> std::option::Option<&crate::model::LanguageCode> {
self.language_code.as_ref()
}
/// <p>The date and time that the vocabulary was updated.</p>
pub fn last_modified_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.last_modified_time.as_ref()
}
/// <p>The processing state of the update to the vocabulary. When the <code>VocabularyState</code> field is <code>READY</code>, the vocabulary is ready to be used in a <code>StartMedicalTranscriptionJob</code> request.</p>
pub fn vocabulary_state(&self) -> std::option::Option<&crate::model::VocabularyState> {
self.vocabulary_state.as_ref()
}
}
impl std::fmt::Debug for UpdateMedicalVocabularyOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UpdateMedicalVocabularyOutput");
formatter.field("vocabulary_name", &self.vocabulary_name);
formatter.field("language_code", &self.language_code);
formatter.field("last_modified_time", &self.last_modified_time);
formatter.field("vocabulary_state", &self.vocabulary_state);
formatter.finish()
}
}
/// See [`UpdateMedicalVocabularyOutput`](crate::output::UpdateMedicalVocabularyOutput)
pub mod update_medical_vocabulary_output {
/// A builder for [`UpdateMedicalVocabularyOutput`](crate::output::UpdateMedicalVocabularyOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) vocabulary_name: std::option::Option<std::string::String>,
pub(crate) language_code: std::option::Option<crate::model::LanguageCode>,
pub(crate) last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) vocabulary_state: std::option::Option<crate::model::VocabularyState>,
}
impl Builder {
/// <p>The name of the updated vocabulary.</p>
pub fn vocabulary_name(mut self, input: impl Into<std::string::String>) -> Self {
self.vocabulary_name = Some(input.into());
self
}
/// <p>The name of the updated vocabulary.</p>
pub fn set_vocabulary_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.vocabulary_name = input;
self
}
/// <p>The language code for the language of the text file used to update the custom vocabulary. US English (en-US) is the only language supported in Amazon Transcribe Medical.</p>
pub fn language_code(mut self, input: crate::model::LanguageCode) -> Self {
self.language_code = Some(input);
self
}
/// <p>The language code for the language of the text file used to update the custom vocabulary. US English (en-US) is the only language supported in Amazon Transcribe Medical.</p>
pub fn set_language_code(
mut self,
input: std::option::Option<crate::model::LanguageCode>,
) -> Self {
self.language_code = input;
self
}
/// <p>The date and time that the vocabulary was updated.</p>
pub fn last_modified_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.last_modified_time = Some(input);
self
}
/// <p>The date and time that the vocabulary was updated.</p>
pub fn set_last_modified_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.last_modified_time = input;
self
}
/// <p>The processing state of the update to the vocabulary. When the <code>VocabularyState</code> field is <code>READY</code>, the vocabulary is ready to be used in a <code>StartMedicalTranscriptionJob</code> request.</p>
pub fn vocabulary_state(mut self, input: crate::model::VocabularyState) -> Self {
self.vocabulary_state = Some(input);
self
}
/// <p>The processing state of the update to the vocabulary. When the <code>VocabularyState</code> field is <code>READY</code>, the vocabulary is ready to be used in a <code>StartMedicalTranscriptionJob</code> request.</p>
pub fn set_vocabulary_state(
mut self,
input: std::option::Option<crate::model::VocabularyState>,
) -> Self {
self.vocabulary_state = input;
self
}
/// Consumes the builder and constructs a [`UpdateMedicalVocabularyOutput`](crate::output::UpdateMedicalVocabularyOutput)
pub fn build(self) -> crate::output::UpdateMedicalVocabularyOutput {
crate::output::UpdateMedicalVocabularyOutput {
vocabulary_name: self.vocabulary_name,
language_code: self.language_code,
last_modified_time: self.last_modified_time,
vocabulary_state: self.vocabulary_state,
}
}
}
}
impl UpdateMedicalVocabularyOutput {
/// Creates a new builder-style object to manufacture [`UpdateMedicalVocabularyOutput`](crate::output::UpdateMedicalVocabularyOutput)
pub fn builder() -> crate::output::update_medical_vocabulary_output::Builder {
crate::output::update_medical_vocabulary_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UpdateCallAnalyticsCategoryOutput {
/// <p>The attributes describing the analytics category. You can see information such as the rules that you've used to update the category and when the category was originally created. </p>
pub category_properties: std::option::Option<crate::model::CategoryProperties>,
}
impl UpdateCallAnalyticsCategoryOutput {
/// <p>The attributes describing the analytics category. You can see information such as the rules that you've used to update the category and when the category was originally created. </p>
pub fn category_properties(&self) -> std::option::Option<&crate::model::CategoryProperties> {
self.category_properties.as_ref()
}
}
impl std::fmt::Debug for UpdateCallAnalyticsCategoryOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UpdateCallAnalyticsCategoryOutput");
formatter.field("category_properties", &self.category_properties);
formatter.finish()
}
}
/// See [`UpdateCallAnalyticsCategoryOutput`](crate::output::UpdateCallAnalyticsCategoryOutput)
pub mod update_call_analytics_category_output {
/// A builder for [`UpdateCallAnalyticsCategoryOutput`](crate::output::UpdateCallAnalyticsCategoryOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) category_properties: std::option::Option<crate::model::CategoryProperties>,
}
impl Builder {
/// <p>The attributes describing the analytics category. You can see information such as the rules that you've used to update the category and when the category was originally created. </p>
pub fn category_properties(mut self, input: crate::model::CategoryProperties) -> Self {
self.category_properties = Some(input);
self
}
/// <p>The attributes describing the analytics category. You can see information such as the rules that you've used to update the category and when the category was originally created. </p>
pub fn set_category_properties(
mut self,
input: std::option::Option<crate::model::CategoryProperties>,
) -> Self {
self.category_properties = input;
self
}
/// Consumes the builder and constructs a [`UpdateCallAnalyticsCategoryOutput`](crate::output::UpdateCallAnalyticsCategoryOutput)
pub fn build(self) -> crate::output::UpdateCallAnalyticsCategoryOutput {
crate::output::UpdateCallAnalyticsCategoryOutput {
category_properties: self.category_properties,
}
}
}
}
impl UpdateCallAnalyticsCategoryOutput {
/// Creates a new builder-style object to manufacture [`UpdateCallAnalyticsCategoryOutput`](crate::output::UpdateCallAnalyticsCategoryOutput)
pub fn builder() -> crate::output::update_call_analytics_category_output::Builder {
crate::output::update_call_analytics_category_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UntagResourceOutput {}
impl std::fmt::Debug for UntagResourceOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UntagResourceOutput");
formatter.finish()
}
}
/// See [`UntagResourceOutput`](crate::output::UntagResourceOutput)
pub mod untag_resource_output {
/// A builder for [`UntagResourceOutput`](crate::output::UntagResourceOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`UntagResourceOutput`](crate::output::UntagResourceOutput)
pub fn build(self) -> crate::output::UntagResourceOutput {
crate::output::UntagResourceOutput {}
}
}
}
impl UntagResourceOutput {
/// Creates a new builder-style object to manufacture [`UntagResourceOutput`](crate::output::UntagResourceOutput)
pub fn builder() -> crate::output::untag_resource_output::Builder {
crate::output::untag_resource_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct TagResourceOutput {}
impl std::fmt::Debug for TagResourceOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("TagResourceOutput");
formatter.finish()
}
}
/// See [`TagResourceOutput`](crate::output::TagResourceOutput)
pub mod tag_resource_output {
/// A builder for [`TagResourceOutput`](crate::output::TagResourceOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`TagResourceOutput`](crate::output::TagResourceOutput)
pub fn build(self) -> crate::output::TagResourceOutput {
crate::output::TagResourceOutput {}
}
}
}
impl TagResourceOutput {
/// Creates a new builder-style object to manufacture [`TagResourceOutput`](crate::output::TagResourceOutput)
pub fn builder() -> crate::output::tag_resource_output::Builder {
crate::output::tag_resource_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StartTranscriptionJobOutput {
/// <p>An object containing details of the asynchronous transcription job.</p>
pub transcription_job: std::option::Option<crate::model::TranscriptionJob>,
}
impl StartTranscriptionJobOutput {
/// <p>An object containing details of the asynchronous transcription job.</p>
pub fn transcription_job(&self) -> std::option::Option<&crate::model::TranscriptionJob> {
self.transcription_job.as_ref()
}
}
impl std::fmt::Debug for StartTranscriptionJobOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StartTranscriptionJobOutput");
formatter.field("transcription_job", &self.transcription_job);
formatter.finish()
}
}
/// See [`StartTranscriptionJobOutput`](crate::output::StartTranscriptionJobOutput)
pub mod start_transcription_job_output {
/// A builder for [`StartTranscriptionJobOutput`](crate::output::StartTranscriptionJobOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) transcription_job: std::option::Option<crate::model::TranscriptionJob>,
}
impl Builder {
/// <p>An object containing details of the asynchronous transcription job.</p>
pub fn transcription_job(mut self, input: crate::model::TranscriptionJob) -> Self {
self.transcription_job = Some(input);
self
}
/// <p>An object containing details of the asynchronous transcription job.</p>
pub fn set_transcription_job(
mut self,
input: std::option::Option<crate::model::TranscriptionJob>,
) -> Self {
self.transcription_job = input;
self
}
/// Consumes the builder and constructs a [`StartTranscriptionJobOutput`](crate::output::StartTranscriptionJobOutput)
pub fn build(self) -> crate::output::StartTranscriptionJobOutput {
crate::output::StartTranscriptionJobOutput {
transcription_job: self.transcription_job,
}
}
}
}
impl StartTranscriptionJobOutput {
/// Creates a new builder-style object to manufacture [`StartTranscriptionJobOutput`](crate::output::StartTranscriptionJobOutput)
pub fn builder() -> crate::output::start_transcription_job_output::Builder {
crate::output::start_transcription_job_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StartMedicalTranscriptionJobOutput {
/// <p>A batch job submitted to transcribe medical speech to text.</p>
pub medical_transcription_job: std::option::Option<crate::model::MedicalTranscriptionJob>,
}
impl StartMedicalTranscriptionJobOutput {
/// <p>A batch job submitted to transcribe medical speech to text.</p>
pub fn medical_transcription_job(
&self,
) -> std::option::Option<&crate::model::MedicalTranscriptionJob> {
self.medical_transcription_job.as_ref()
}
}
impl std::fmt::Debug for StartMedicalTranscriptionJobOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StartMedicalTranscriptionJobOutput");
formatter.field("medical_transcription_job", &self.medical_transcription_job);
formatter.finish()
}
}
/// See [`StartMedicalTranscriptionJobOutput`](crate::output::StartMedicalTranscriptionJobOutput)
pub mod start_medical_transcription_job_output {
/// A builder for [`StartMedicalTranscriptionJobOutput`](crate::output::StartMedicalTranscriptionJobOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) medical_transcription_job:
std::option::Option<crate::model::MedicalTranscriptionJob>,
}
impl Builder {
/// <p>A batch job submitted to transcribe medical speech to text.</p>
pub fn medical_transcription_job(
mut self,
input: crate::model::MedicalTranscriptionJob,
) -> Self {
self.medical_transcription_job = Some(input);
self
}
/// <p>A batch job submitted to transcribe medical speech to text.</p>
pub fn set_medical_transcription_job(
mut self,
input: std::option::Option<crate::model::MedicalTranscriptionJob>,
) -> Self {
self.medical_transcription_job = input;
self
}
/// Consumes the builder and constructs a [`StartMedicalTranscriptionJobOutput`](crate::output::StartMedicalTranscriptionJobOutput)
pub fn build(self) -> crate::output::StartMedicalTranscriptionJobOutput {
crate::output::StartMedicalTranscriptionJobOutput {
medical_transcription_job: self.medical_transcription_job,
}
}
}
}
impl StartMedicalTranscriptionJobOutput {
/// Creates a new builder-style object to manufacture [`StartMedicalTranscriptionJobOutput`](crate::output::StartMedicalTranscriptionJobOutput)
pub fn builder() -> crate::output::start_medical_transcription_job_output::Builder {
crate::output::start_medical_transcription_job_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StartCallAnalyticsJobOutput {
/// <p>An object containing the details of the asynchronous call analytics job.</p>
pub call_analytics_job: std::option::Option<crate::model::CallAnalyticsJob>,
}
impl StartCallAnalyticsJobOutput {
/// <p>An object containing the details of the asynchronous call analytics job.</p>
pub fn call_analytics_job(&self) -> std::option::Option<&crate::model::CallAnalyticsJob> {
self.call_analytics_job.as_ref()
}
}
impl std::fmt::Debug for StartCallAnalyticsJobOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StartCallAnalyticsJobOutput");
formatter.field("call_analytics_job", &self.call_analytics_job);
formatter.finish()
}
}
/// See [`StartCallAnalyticsJobOutput`](crate::output::StartCallAnalyticsJobOutput)
pub mod start_call_analytics_job_output {
/// A builder for [`StartCallAnalyticsJobOutput`](crate::output::StartCallAnalyticsJobOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) call_analytics_job: std::option::Option<crate::model::CallAnalyticsJob>,
}
impl Builder {
/// <p>An object containing the details of the asynchronous call analytics job.</p>
pub fn call_analytics_job(mut self, input: crate::model::CallAnalyticsJob) -> Self {
self.call_analytics_job = Some(input);
self
}
/// <p>An object containing the details of the asynchronous call analytics job.</p>
pub fn set_call_analytics_job(
mut self,
input: std::option::Option<crate::model::CallAnalyticsJob>,
) -> Self {
self.call_analytics_job = input;
self
}
/// Consumes the builder and constructs a [`StartCallAnalyticsJobOutput`](crate::output::StartCallAnalyticsJobOutput)
pub fn build(self) -> crate::output::StartCallAnalyticsJobOutput {
crate::output::StartCallAnalyticsJobOutput {
call_analytics_job: self.call_analytics_job,
}
}
}
}
impl StartCallAnalyticsJobOutput {
/// Creates a new builder-style object to manufacture [`StartCallAnalyticsJobOutput`](crate::output::StartCallAnalyticsJobOutput)
pub fn builder() -> crate::output::start_call_analytics_job_output::Builder {
crate::output::start_call_analytics_job_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListVocabularyFiltersOutput {
/// <p>The <code>ListVocabularyFilters</code> operation returns a page of collections at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the <code>ListVocabularyFilters</code> operation to return in the next page of jobs.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The list of vocabulary filters. It contains at most <code>MaxResults</code> number of filters. If there are more filters, call the <code>ListVocabularyFilters</code> operation again with the <code>NextToken</code> parameter in the request set to the value of the <code>NextToken</code> field in the response.</p>
pub vocabulary_filters: std::option::Option<std::vec::Vec<crate::model::VocabularyFilterInfo>>,
}
impl ListVocabularyFiltersOutput {
/// <p>The <code>ListVocabularyFilters</code> operation returns a page of collections at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the <code>ListVocabularyFilters</code> operation to return in the next page of jobs.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>The list of vocabulary filters. It contains at most <code>MaxResults</code> number of filters. If there are more filters, call the <code>ListVocabularyFilters</code> operation again with the <code>NextToken</code> parameter in the request set to the value of the <code>NextToken</code> field in the response.</p>
pub fn vocabulary_filters(&self) -> std::option::Option<&[crate::model::VocabularyFilterInfo]> {
self.vocabulary_filters.as_deref()
}
}
impl std::fmt::Debug for ListVocabularyFiltersOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListVocabularyFiltersOutput");
formatter.field("next_token", &self.next_token);
formatter.field("vocabulary_filters", &self.vocabulary_filters);
formatter.finish()
}
}
/// See [`ListVocabularyFiltersOutput`](crate::output::ListVocabularyFiltersOutput)
pub mod list_vocabulary_filters_output {
/// A builder for [`ListVocabularyFiltersOutput`](crate::output::ListVocabularyFiltersOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) vocabulary_filters:
std::option::Option<std::vec::Vec<crate::model::VocabularyFilterInfo>>,
}
impl Builder {
/// <p>The <code>ListVocabularyFilters</code> operation returns a page of collections at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the <code>ListVocabularyFilters</code> operation to return in the next page of jobs.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The <code>ListVocabularyFilters</code> operation returns a page of collections at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the <code>ListVocabularyFilters</code> operation to return in the next page of jobs.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Appends an item to `vocabulary_filters`.
///
/// To override the contents of this collection use [`set_vocabulary_filters`](Self::set_vocabulary_filters).
///
/// <p>The list of vocabulary filters. It contains at most <code>MaxResults</code> number of filters. If there are more filters, call the <code>ListVocabularyFilters</code> operation again with the <code>NextToken</code> parameter in the request set to the value of the <code>NextToken</code> field in the response.</p>
pub fn vocabulary_filters(mut self, input: crate::model::VocabularyFilterInfo) -> Self {
let mut v = self.vocabulary_filters.unwrap_or_default();
v.push(input);
self.vocabulary_filters = Some(v);
self
}
/// <p>The list of vocabulary filters. It contains at most <code>MaxResults</code> number of filters. If there are more filters, call the <code>ListVocabularyFilters</code> operation again with the <code>NextToken</code> parameter in the request set to the value of the <code>NextToken</code> field in the response.</p>
pub fn set_vocabulary_filters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::VocabularyFilterInfo>>,
) -> Self {
self.vocabulary_filters = input;
self
}
/// Consumes the builder and constructs a [`ListVocabularyFiltersOutput`](crate::output::ListVocabularyFiltersOutput)
pub fn build(self) -> crate::output::ListVocabularyFiltersOutput {
crate::output::ListVocabularyFiltersOutput {
next_token: self.next_token,
vocabulary_filters: self.vocabulary_filters,
}
}
}
}
impl ListVocabularyFiltersOutput {
/// Creates a new builder-style object to manufacture [`ListVocabularyFiltersOutput`](crate::output::ListVocabularyFiltersOutput)
pub fn builder() -> crate::output::list_vocabulary_filters_output::Builder {
crate::output::list_vocabulary_filters_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListVocabulariesOutput {
/// <p>The requested vocabulary state.</p>
pub status: std::option::Option<crate::model::VocabularyState>,
/// <p>The <code>ListVocabularies</code> operation returns a page of vocabularies at a time. The maximum size of the page is set in the <code>MaxResults</code> parameter. If there are more jobs in the list than will fit on the page, Amazon Transcribe returns the <code>NextPage</code> token. To return in the next page of jobs, include the token in the next request to the <code>ListVocabularies</code> operation.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>A list of objects that describe the vocabularies that match the search criteria in the request.</p>
pub vocabularies: std::option::Option<std::vec::Vec<crate::model::VocabularyInfo>>,
}
impl ListVocabulariesOutput {
/// <p>The requested vocabulary state.</p>
pub fn status(&self) -> std::option::Option<&crate::model::VocabularyState> {
self.status.as_ref()
}
/// <p>The <code>ListVocabularies</code> operation returns a page of vocabularies at a time. The maximum size of the page is set in the <code>MaxResults</code> parameter. If there are more jobs in the list than will fit on the page, Amazon Transcribe returns the <code>NextPage</code> token. To return in the next page of jobs, include the token in the next request to the <code>ListVocabularies</code> operation.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>A list of objects that describe the vocabularies that match the search criteria in the request.</p>
pub fn vocabularies(&self) -> std::option::Option<&[crate::model::VocabularyInfo]> {
self.vocabularies.as_deref()
}
}
impl std::fmt::Debug for ListVocabulariesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListVocabulariesOutput");
formatter.field("status", &self.status);
formatter.field("next_token", &self.next_token);
formatter.field("vocabularies", &self.vocabularies);
formatter.finish()
}
}
/// See [`ListVocabulariesOutput`](crate::output::ListVocabulariesOutput)
pub mod list_vocabularies_output {
/// A builder for [`ListVocabulariesOutput`](crate::output::ListVocabulariesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) status: std::option::Option<crate::model::VocabularyState>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) vocabularies: std::option::Option<std::vec::Vec<crate::model::VocabularyInfo>>,
}
impl Builder {
/// <p>The requested vocabulary state.</p>
pub fn status(mut self, input: crate::model::VocabularyState) -> Self {
self.status = Some(input);
self
}
/// <p>The requested vocabulary state.</p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::VocabularyState>,
) -> Self {
self.status = input;
self
}
/// <p>The <code>ListVocabularies</code> operation returns a page of vocabularies at a time. The maximum size of the page is set in the <code>MaxResults</code> parameter. If there are more jobs in the list than will fit on the page, Amazon Transcribe returns the <code>NextPage</code> token. To return in the next page of jobs, include the token in the next request to the <code>ListVocabularies</code> operation.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The <code>ListVocabularies</code> operation returns a page of vocabularies at a time. The maximum size of the page is set in the <code>MaxResults</code> parameter. If there are more jobs in the list than will fit on the page, Amazon Transcribe returns the <code>NextPage</code> token. To return in the next page of jobs, include the token in the next request to the <code>ListVocabularies</code> operation.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Appends an item to `vocabularies`.
///
/// To override the contents of this collection use [`set_vocabularies`](Self::set_vocabularies).
///
/// <p>A list of objects that describe the vocabularies that match the search criteria in the request.</p>
pub fn vocabularies(mut self, input: crate::model::VocabularyInfo) -> Self {
let mut v = self.vocabularies.unwrap_or_default();
v.push(input);
self.vocabularies = Some(v);
self
}
/// <p>A list of objects that describe the vocabularies that match the search criteria in the request.</p>
pub fn set_vocabularies(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::VocabularyInfo>>,
) -> Self {
self.vocabularies = input;
self
}
/// Consumes the builder and constructs a [`ListVocabulariesOutput`](crate::output::ListVocabulariesOutput)
pub fn build(self) -> crate::output::ListVocabulariesOutput {
crate::output::ListVocabulariesOutput {
status: self.status,
next_token: self.next_token,
vocabularies: self.vocabularies,
}
}
}
}
impl ListVocabulariesOutput {
/// Creates a new builder-style object to manufacture [`ListVocabulariesOutput`](crate::output::ListVocabulariesOutput)
pub fn builder() -> crate::output::list_vocabularies_output::Builder {
crate::output::list_vocabularies_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListTranscriptionJobsOutput {
/// <p>The requested status of the jobs returned.</p>
pub status: std::option::Option<crate::model::TranscriptionJobStatus>,
/// <p>The <code>ListTranscriptionJobs</code> operation returns a page of jobs at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the <code>ListTranscriptionJobs</code> operation to return in the next page of jobs.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>A list of objects containing summary information for a transcription job.</p>
pub transcription_job_summaries:
std::option::Option<std::vec::Vec<crate::model::TranscriptionJobSummary>>,
}
impl ListTranscriptionJobsOutput {
/// <p>The requested status of the jobs returned.</p>
pub fn status(&self) -> std::option::Option<&crate::model::TranscriptionJobStatus> {
self.status.as_ref()
}
/// <p>The <code>ListTranscriptionJobs</code> operation returns a page of jobs at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the <code>ListTranscriptionJobs</code> operation to return in the next page of jobs.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>A list of objects containing summary information for a transcription job.</p>
pub fn transcription_job_summaries(
&self,
) -> std::option::Option<&[crate::model::TranscriptionJobSummary]> {
self.transcription_job_summaries.as_deref()
}
}
impl std::fmt::Debug for ListTranscriptionJobsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListTranscriptionJobsOutput");
formatter.field("status", &self.status);
formatter.field("next_token", &self.next_token);
formatter.field(
"transcription_job_summaries",
&self.transcription_job_summaries,
);
formatter.finish()
}
}
/// See [`ListTranscriptionJobsOutput`](crate::output::ListTranscriptionJobsOutput)
pub mod list_transcription_jobs_output {
/// A builder for [`ListTranscriptionJobsOutput`](crate::output::ListTranscriptionJobsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) status: std::option::Option<crate::model::TranscriptionJobStatus>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) transcription_job_summaries:
std::option::Option<std::vec::Vec<crate::model::TranscriptionJobSummary>>,
}
impl Builder {
/// <p>The requested status of the jobs returned.</p>
pub fn status(mut self, input: crate::model::TranscriptionJobStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The requested status of the jobs returned.</p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::TranscriptionJobStatus>,
) -> Self {
self.status = input;
self
}
/// <p>The <code>ListTranscriptionJobs</code> operation returns a page of jobs at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the <code>ListTranscriptionJobs</code> operation to return in the next page of jobs.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The <code>ListTranscriptionJobs</code> operation returns a page of jobs at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the <code>ListTranscriptionJobs</code> operation to return in the next page of jobs.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Appends an item to `transcription_job_summaries`.
///
/// To override the contents of this collection use [`set_transcription_job_summaries`](Self::set_transcription_job_summaries).
///
/// <p>A list of objects containing summary information for a transcription job.</p>
pub fn transcription_job_summaries(
mut self,
input: crate::model::TranscriptionJobSummary,
) -> Self {
let mut v = self.transcription_job_summaries.unwrap_or_default();
v.push(input);
self.transcription_job_summaries = Some(v);
self
}
/// <p>A list of objects containing summary information for a transcription job.</p>
pub fn set_transcription_job_summaries(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::TranscriptionJobSummary>>,
) -> Self {
self.transcription_job_summaries = input;
self
}
/// Consumes the builder and constructs a [`ListTranscriptionJobsOutput`](crate::output::ListTranscriptionJobsOutput)
pub fn build(self) -> crate::output::ListTranscriptionJobsOutput {
crate::output::ListTranscriptionJobsOutput {
status: self.status,
next_token: self.next_token,
transcription_job_summaries: self.transcription_job_summaries,
}
}
}
}
impl ListTranscriptionJobsOutput {
/// Creates a new builder-style object to manufacture [`ListTranscriptionJobsOutput`](crate::output::ListTranscriptionJobsOutput)
pub fn builder() -> crate::output::list_transcription_jobs_output::Builder {
crate::output::list_transcription_jobs_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListTagsForResourceOutput {
/// <p>Lists all tags associated with the given Amazon Resource Name (ARN). </p>
pub resource_arn: std::option::Option<std::string::String>,
/// <p>Lists all tags associated with the given transcription job, vocabulary, or resource.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl ListTagsForResourceOutput {
/// <p>Lists all tags associated with the given Amazon Resource Name (ARN). </p>
pub fn resource_arn(&self) -> std::option::Option<&str> {
self.resource_arn.as_deref()
}
/// <p>Lists all tags associated with the given transcription job, vocabulary, or resource.</p>
pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
self.tags.as_deref()
}
}
impl std::fmt::Debug for ListTagsForResourceOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListTagsForResourceOutput");
formatter.field("resource_arn", &self.resource_arn);
formatter.field("tags", &self.tags);
formatter.finish()
}
}
/// See [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput)
pub mod list_tags_for_resource_output {
/// A builder for [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) resource_arn: std::option::Option<std::string::String>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl Builder {
/// <p>Lists all tags associated with the given Amazon Resource Name (ARN). </p>
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_arn = Some(input.into());
self
}
/// <p>Lists all tags associated with the given Amazon Resource Name (ARN). </p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.resource_arn = input;
self
}
/// Appends an item to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>Lists all tags associated with the given transcription job, vocabulary, or resource.</p>
pub fn tags(mut self, input: crate::model::Tag) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input);
self.tags = Some(v);
self
}
/// <p>Lists all tags associated with the given transcription job, vocabulary, or resource.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput)
pub fn build(self) -> crate::output::ListTagsForResourceOutput {
crate::output::ListTagsForResourceOutput {
resource_arn: self.resource_arn,
tags: self.tags,
}
}
}
}
impl ListTagsForResourceOutput {
/// Creates a new builder-style object to manufacture [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput)
pub fn builder() -> crate::output::list_tags_for_resource_output::Builder {
crate::output::list_tags_for_resource_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListMedicalVocabulariesOutput {
/// <p>The requested vocabulary state.</p>
pub status: std::option::Option<crate::model::VocabularyState>,
/// <p>The <code>ListMedicalVocabularies</code> operation returns a page of vocabularies at a time. You set the maximum number of vocabularies to return on a page with the <code>MaxResults</code> parameter. If there are more jobs in the list will fit on a page, Amazon Transcribe Medical returns the <code>NextPage</code> token. To return the next page of vocabularies, include the token in the next request to the <code>ListMedicalVocabularies</code> operation .</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>A list of objects that describe the vocabularies that match your search criteria.</p>
pub vocabularies: std::option::Option<std::vec::Vec<crate::model::VocabularyInfo>>,
}
impl ListMedicalVocabulariesOutput {
/// <p>The requested vocabulary state.</p>
pub fn status(&self) -> std::option::Option<&crate::model::VocabularyState> {
self.status.as_ref()
}
/// <p>The <code>ListMedicalVocabularies</code> operation returns a page of vocabularies at a time. You set the maximum number of vocabularies to return on a page with the <code>MaxResults</code> parameter. If there are more jobs in the list will fit on a page, Amazon Transcribe Medical returns the <code>NextPage</code> token. To return the next page of vocabularies, include the token in the next request to the <code>ListMedicalVocabularies</code> operation .</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>A list of objects that describe the vocabularies that match your search criteria.</p>
pub fn vocabularies(&self) -> std::option::Option<&[crate::model::VocabularyInfo]> {
self.vocabularies.as_deref()
}
}
impl std::fmt::Debug for ListMedicalVocabulariesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListMedicalVocabulariesOutput");
formatter.field("status", &self.status);
formatter.field("next_token", &self.next_token);
formatter.field("vocabularies", &self.vocabularies);
formatter.finish()
}
}
/// See [`ListMedicalVocabulariesOutput`](crate::output::ListMedicalVocabulariesOutput)
pub mod list_medical_vocabularies_output {
/// A builder for [`ListMedicalVocabulariesOutput`](crate::output::ListMedicalVocabulariesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) status: std::option::Option<crate::model::VocabularyState>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) vocabularies: std::option::Option<std::vec::Vec<crate::model::VocabularyInfo>>,
}
impl Builder {
/// <p>The requested vocabulary state.</p>
pub fn status(mut self, input: crate::model::VocabularyState) -> Self {
self.status = Some(input);
self
}
/// <p>The requested vocabulary state.</p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::VocabularyState>,
) -> Self {
self.status = input;
self
}
/// <p>The <code>ListMedicalVocabularies</code> operation returns a page of vocabularies at a time. You set the maximum number of vocabularies to return on a page with the <code>MaxResults</code> parameter. If there are more jobs in the list will fit on a page, Amazon Transcribe Medical returns the <code>NextPage</code> token. To return the next page of vocabularies, include the token in the next request to the <code>ListMedicalVocabularies</code> operation .</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The <code>ListMedicalVocabularies</code> operation returns a page of vocabularies at a time. You set the maximum number of vocabularies to return on a page with the <code>MaxResults</code> parameter. If there are more jobs in the list will fit on a page, Amazon Transcribe Medical returns the <code>NextPage</code> token. To return the next page of vocabularies, include the token in the next request to the <code>ListMedicalVocabularies</code> operation .</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Appends an item to `vocabularies`.
///
/// To override the contents of this collection use [`set_vocabularies`](Self::set_vocabularies).
///
/// <p>A list of objects that describe the vocabularies that match your search criteria.</p>
pub fn vocabularies(mut self, input: crate::model::VocabularyInfo) -> Self {
let mut v = self.vocabularies.unwrap_or_default();
v.push(input);
self.vocabularies = Some(v);
self
}
/// <p>A list of objects that describe the vocabularies that match your search criteria.</p>
pub fn set_vocabularies(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::VocabularyInfo>>,
) -> Self {
self.vocabularies = input;
self
}
/// Consumes the builder and constructs a [`ListMedicalVocabulariesOutput`](crate::output::ListMedicalVocabulariesOutput)
pub fn build(self) -> crate::output::ListMedicalVocabulariesOutput {
crate::output::ListMedicalVocabulariesOutput {
status: self.status,
next_token: self.next_token,
vocabularies: self.vocabularies,
}
}
}
}
impl ListMedicalVocabulariesOutput {
/// Creates a new builder-style object to manufacture [`ListMedicalVocabulariesOutput`](crate::output::ListMedicalVocabulariesOutput)
pub fn builder() -> crate::output::list_medical_vocabularies_output::Builder {
crate::output::list_medical_vocabularies_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListMedicalTranscriptionJobsOutput {
/// <p>The requested status of the medical transcription jobs returned.</p>
pub status: std::option::Option<crate::model::TranscriptionJobStatus>,
/// <p>The <code>ListMedicalTranscriptionJobs</code> operation returns a page of jobs at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If the number of jobs exceeds what can fit on a page, Amazon Transcribe Medical returns the <code>NextPage</code> token. Include the token in the next request to the <code>ListMedicalTranscriptionJobs</code> operation to return in the next page of jobs.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>A list of objects containing summary information for a transcription job.</p>
pub medical_transcription_job_summaries:
std::option::Option<std::vec::Vec<crate::model::MedicalTranscriptionJobSummary>>,
}
impl ListMedicalTranscriptionJobsOutput {
/// <p>The requested status of the medical transcription jobs returned.</p>
pub fn status(&self) -> std::option::Option<&crate::model::TranscriptionJobStatus> {
self.status.as_ref()
}
/// <p>The <code>ListMedicalTranscriptionJobs</code> operation returns a page of jobs at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If the number of jobs exceeds what can fit on a page, Amazon Transcribe Medical returns the <code>NextPage</code> token. Include the token in the next request to the <code>ListMedicalTranscriptionJobs</code> operation to return in the next page of jobs.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>A list of objects containing summary information for a transcription job.</p>
pub fn medical_transcription_job_summaries(
&self,
) -> std::option::Option<&[crate::model::MedicalTranscriptionJobSummary]> {
self.medical_transcription_job_summaries.as_deref()
}
}
impl std::fmt::Debug for ListMedicalTranscriptionJobsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListMedicalTranscriptionJobsOutput");
formatter.field("status", &self.status);
formatter.field("next_token", &self.next_token);
formatter.field(
"medical_transcription_job_summaries",
&self.medical_transcription_job_summaries,
);
formatter.finish()
}
}
/// See [`ListMedicalTranscriptionJobsOutput`](crate::output::ListMedicalTranscriptionJobsOutput)
pub mod list_medical_transcription_jobs_output {
/// A builder for [`ListMedicalTranscriptionJobsOutput`](crate::output::ListMedicalTranscriptionJobsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) status: std::option::Option<crate::model::TranscriptionJobStatus>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) medical_transcription_job_summaries:
std::option::Option<std::vec::Vec<crate::model::MedicalTranscriptionJobSummary>>,
}
impl Builder {
/// <p>The requested status of the medical transcription jobs returned.</p>
pub fn status(mut self, input: crate::model::TranscriptionJobStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The requested status of the medical transcription jobs returned.</p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::TranscriptionJobStatus>,
) -> Self {
self.status = input;
self
}
/// <p>The <code>ListMedicalTranscriptionJobs</code> operation returns a page of jobs at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If the number of jobs exceeds what can fit on a page, Amazon Transcribe Medical returns the <code>NextPage</code> token. Include the token in the next request to the <code>ListMedicalTranscriptionJobs</code> operation to return in the next page of jobs.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The <code>ListMedicalTranscriptionJobs</code> operation returns a page of jobs at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If the number of jobs exceeds what can fit on a page, Amazon Transcribe Medical returns the <code>NextPage</code> token. Include the token in the next request to the <code>ListMedicalTranscriptionJobs</code> operation to return in the next page of jobs.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Appends an item to `medical_transcription_job_summaries`.
///
/// To override the contents of this collection use [`set_medical_transcription_job_summaries`](Self::set_medical_transcription_job_summaries).
///
/// <p>A list of objects containing summary information for a transcription job.</p>
pub fn medical_transcription_job_summaries(
mut self,
input: crate::model::MedicalTranscriptionJobSummary,
) -> Self {
let mut v = self.medical_transcription_job_summaries.unwrap_or_default();
v.push(input);
self.medical_transcription_job_summaries = Some(v);
self
}
/// <p>A list of objects containing summary information for a transcription job.</p>
pub fn set_medical_transcription_job_summaries(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::MedicalTranscriptionJobSummary>>,
) -> Self {
self.medical_transcription_job_summaries = input;
self
}
/// Consumes the builder and constructs a [`ListMedicalTranscriptionJobsOutput`](crate::output::ListMedicalTranscriptionJobsOutput)
pub fn build(self) -> crate::output::ListMedicalTranscriptionJobsOutput {
crate::output::ListMedicalTranscriptionJobsOutput {
status: self.status,
next_token: self.next_token,
medical_transcription_job_summaries: self.medical_transcription_job_summaries,
}
}
}
}
impl ListMedicalTranscriptionJobsOutput {
/// Creates a new builder-style object to manufacture [`ListMedicalTranscriptionJobsOutput`](crate::output::ListMedicalTranscriptionJobsOutput)
pub fn builder() -> crate::output::list_medical_transcription_jobs_output::Builder {
crate::output::list_medical_transcription_jobs_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListLanguageModelsOutput {
/// <p>The operation returns a page of jobs at a time. The maximum size of the list is set by the MaxResults parameter. If there are more language models in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the operation to return the next page of language models.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>A list of objects containing information about custom language models.</p>
pub models: std::option::Option<std::vec::Vec<crate::model::LanguageModel>>,
}
impl ListLanguageModelsOutput {
/// <p>The operation returns a page of jobs at a time. The maximum size of the list is set by the MaxResults parameter. If there are more language models in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the operation to return the next page of language models.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>A list of objects containing information about custom language models.</p>
pub fn models(&self) -> std::option::Option<&[crate::model::LanguageModel]> {
self.models.as_deref()
}
}
impl std::fmt::Debug for ListLanguageModelsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListLanguageModelsOutput");
formatter.field("next_token", &self.next_token);
formatter.field("models", &self.models);
formatter.finish()
}
}
/// See [`ListLanguageModelsOutput`](crate::output::ListLanguageModelsOutput)
pub mod list_language_models_output {
/// A builder for [`ListLanguageModelsOutput`](crate::output::ListLanguageModelsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) models: std::option::Option<std::vec::Vec<crate::model::LanguageModel>>,
}
impl Builder {
/// <p>The operation returns a page of jobs at a time. The maximum size of the list is set by the MaxResults parameter. If there are more language models in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the operation to return the next page of language models.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The operation returns a page of jobs at a time. The maximum size of the list is set by the MaxResults parameter. If there are more language models in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the operation to return the next page of language models.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Appends an item to `models`.
///
/// To override the contents of this collection use [`set_models`](Self::set_models).
///
/// <p>A list of objects containing information about custom language models.</p>
pub fn models(mut self, input: crate::model::LanguageModel) -> Self {
let mut v = self.models.unwrap_or_default();
v.push(input);
self.models = Some(v);
self
}
/// <p>A list of objects containing information about custom language models.</p>
pub fn set_models(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::LanguageModel>>,
) -> Self {
self.models = input;
self
}
/// Consumes the builder and constructs a [`ListLanguageModelsOutput`](crate::output::ListLanguageModelsOutput)
pub fn build(self) -> crate::output::ListLanguageModelsOutput {
crate::output::ListLanguageModelsOutput {
next_token: self.next_token,
models: self.models,
}
}
}
}
impl ListLanguageModelsOutput {
/// Creates a new builder-style object to manufacture [`ListLanguageModelsOutput`](crate::output::ListLanguageModelsOutput)
pub fn builder() -> crate::output::list_language_models_output::Builder {
crate::output::list_language_models_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListCallAnalyticsJobsOutput {
/// <p>When specified, returns only call analytics jobs with that status. Jobs are ordered by creation date, with the most recent jobs returned first. If you don't specify a status, Amazon Transcribe returns all transcription jobs ordered by creation date.</p>
pub status: std::option::Option<crate::model::CallAnalyticsJobStatus>,
/// <p>The operation returns a page of jobs at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in your next request to the operation to return next page of jobs.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>A list of objects containing summary information for a transcription job.</p>
pub call_analytics_job_summaries:
std::option::Option<std::vec::Vec<crate::model::CallAnalyticsJobSummary>>,
}
impl ListCallAnalyticsJobsOutput {
/// <p>When specified, returns only call analytics jobs with that status. Jobs are ordered by creation date, with the most recent jobs returned first. If you don't specify a status, Amazon Transcribe returns all transcription jobs ordered by creation date.</p>
pub fn status(&self) -> std::option::Option<&crate::model::CallAnalyticsJobStatus> {
self.status.as_ref()
}
/// <p>The operation returns a page of jobs at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in your next request to the operation to return next page of jobs.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>A list of objects containing summary information for a transcription job.</p>
pub fn call_analytics_job_summaries(
&self,
) -> std::option::Option<&[crate::model::CallAnalyticsJobSummary]> {
self.call_analytics_job_summaries.as_deref()
}
}
impl std::fmt::Debug for ListCallAnalyticsJobsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListCallAnalyticsJobsOutput");
formatter.field("status", &self.status);
formatter.field("next_token", &self.next_token);
formatter.field(
"call_analytics_job_summaries",
&self.call_analytics_job_summaries,
);
formatter.finish()
}
}
/// See [`ListCallAnalyticsJobsOutput`](crate::output::ListCallAnalyticsJobsOutput)
pub mod list_call_analytics_jobs_output {
/// A builder for [`ListCallAnalyticsJobsOutput`](crate::output::ListCallAnalyticsJobsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) status: std::option::Option<crate::model::CallAnalyticsJobStatus>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) call_analytics_job_summaries:
std::option::Option<std::vec::Vec<crate::model::CallAnalyticsJobSummary>>,
}
impl Builder {
/// <p>When specified, returns only call analytics jobs with that status. Jobs are ordered by creation date, with the most recent jobs returned first. If you don't specify a status, Amazon Transcribe returns all transcription jobs ordered by creation date.</p>
pub fn status(mut self, input: crate::model::CallAnalyticsJobStatus) -> Self {
self.status = Some(input);
self
}
/// <p>When specified, returns only call analytics jobs with that status. Jobs are ordered by creation date, with the most recent jobs returned first. If you don't specify a status, Amazon Transcribe returns all transcription jobs ordered by creation date.</p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::CallAnalyticsJobStatus>,
) -> Self {
self.status = input;
self
}
/// <p>The operation returns a page of jobs at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in your next request to the operation to return next page of jobs.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The operation returns a page of jobs at a time. The maximum size of the page is set by the <code>MaxResults</code> parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in your next request to the operation to return next page of jobs.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Appends an item to `call_analytics_job_summaries`.
///
/// To override the contents of this collection use [`set_call_analytics_job_summaries`](Self::set_call_analytics_job_summaries).
///
/// <p>A list of objects containing summary information for a transcription job.</p>
pub fn call_analytics_job_summaries(
mut self,
input: crate::model::CallAnalyticsJobSummary,
) -> Self {
let mut v = self.call_analytics_job_summaries.unwrap_or_default();
v.push(input);
self.call_analytics_job_summaries = Some(v);
self
}
/// <p>A list of objects containing summary information for a transcription job.</p>
pub fn set_call_analytics_job_summaries(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::CallAnalyticsJobSummary>>,
) -> Self {
self.call_analytics_job_summaries = input;
self
}
/// Consumes the builder and constructs a [`ListCallAnalyticsJobsOutput`](crate::output::ListCallAnalyticsJobsOutput)
pub fn build(self) -> crate::output::ListCallAnalyticsJobsOutput {
crate::output::ListCallAnalyticsJobsOutput {
status: self.status,
next_token: self.next_token,
call_analytics_job_summaries: self.call_analytics_job_summaries,
}
}
}
}
impl ListCallAnalyticsJobsOutput {
/// Creates a new builder-style object to manufacture [`ListCallAnalyticsJobsOutput`](crate::output::ListCallAnalyticsJobsOutput)
pub fn builder() -> crate::output::list_call_analytics_jobs_output::Builder {
crate::output::list_call_analytics_jobs_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListCallAnalyticsCategoriesOutput {
/// <p>The operation returns a page of jobs at a time. The maximum size of the list is set by the <code>MaxResults</code> parameter. If there are more categories in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the operation to return the next page of analytics categories.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>A list of objects containing information about analytics categories.</p>
pub categories: std::option::Option<std::vec::Vec<crate::model::CategoryProperties>>,
}
impl ListCallAnalyticsCategoriesOutput {
/// <p>The operation returns a page of jobs at a time. The maximum size of the list is set by the <code>MaxResults</code> parameter. If there are more categories in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the operation to return the next page of analytics categories.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>A list of objects containing information about analytics categories.</p>
pub fn categories(&self) -> std::option::Option<&[crate::model::CategoryProperties]> {
self.categories.as_deref()
}
}
impl std::fmt::Debug for ListCallAnalyticsCategoriesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListCallAnalyticsCategoriesOutput");
formatter.field("next_token", &self.next_token);
formatter.field("categories", &self.categories);
formatter.finish()
}
}
/// See [`ListCallAnalyticsCategoriesOutput`](crate::output::ListCallAnalyticsCategoriesOutput)
pub mod list_call_analytics_categories_output {
/// A builder for [`ListCallAnalyticsCategoriesOutput`](crate::output::ListCallAnalyticsCategoriesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) categories: std::option::Option<std::vec::Vec<crate::model::CategoryProperties>>,
}
impl Builder {
/// <p>The operation returns a page of jobs at a time. The maximum size of the list is set by the <code>MaxResults</code> parameter. If there are more categories in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the operation to return the next page of analytics categories.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The operation returns a page of jobs at a time. The maximum size of the list is set by the <code>MaxResults</code> parameter. If there are more categories in the list than the page size, Amazon Transcribe returns the <code>NextPage</code> token. Include the token in the next request to the operation to return the next page of analytics categories.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Appends an item to `categories`.
///
/// To override the contents of this collection use [`set_categories`](Self::set_categories).
///
/// <p>A list of objects containing information about analytics categories.</p>
pub fn categories(mut self, input: crate::model::CategoryProperties) -> Self {
let mut v = self.categories.unwrap_or_default();
v.push(input);
self.categories = Some(v);
self
}
/// <p>A list of objects containing information about analytics categories.</p>
pub fn set_categories(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::CategoryProperties>>,
) -> Self {
self.categories = input;
self
}
/// Consumes the builder and constructs a [`ListCallAnalyticsCategoriesOutput`](crate::output::ListCallAnalyticsCategoriesOutput)
pub fn build(self) -> crate::output::ListCallAnalyticsCategoriesOutput {
crate::output::ListCallAnalyticsCategoriesOutput {
next_token: self.next_token,
categories: self.categories,
}
}
}
}
impl ListCallAnalyticsCategoriesOutput {
/// Creates a new builder-style object to manufacture [`ListCallAnalyticsCategoriesOutput`](crate::output::ListCallAnalyticsCategoriesOutput)
pub fn builder() -> crate::output::list_call_analytics_categories_output::Builder {
crate::output::list_call_analytics_categories_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetVocabularyFilterOutput {
/// <p>The name of the vocabulary filter.</p>
pub vocabulary_filter_name: std::option::Option<std::string::String>,
/// <p>The language code of the words in the vocabulary filter.</p>
pub language_code: std::option::Option<crate::model::LanguageCode>,
/// <p>The date and time that the contents of the vocabulary filter were updated.</p>
pub last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The URI of the list of words in the vocabulary filter. You can use this URI to get the list of words.</p>
pub download_uri: std::option::Option<std::string::String>,
}
impl GetVocabularyFilterOutput {
/// <p>The name of the vocabulary filter.</p>
pub fn vocabulary_filter_name(&self) -> std::option::Option<&str> {
self.vocabulary_filter_name.as_deref()
}
/// <p>The language code of the words in the vocabulary filter.</p>
pub fn language_code(&self) -> std::option::Option<&crate::model::LanguageCode> {
self.language_code.as_ref()
}
/// <p>The date and time that the contents of the vocabulary filter were updated.</p>
pub fn last_modified_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.last_modified_time.as_ref()
}
/// <p>The URI of the list of words in the vocabulary filter. You can use this URI to get the list of words.</p>
pub fn download_uri(&self) -> std::option::Option<&str> {
self.download_uri.as_deref()
}
}
impl std::fmt::Debug for GetVocabularyFilterOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetVocabularyFilterOutput");
formatter.field("vocabulary_filter_name", &self.vocabulary_filter_name);
formatter.field("language_code", &self.language_code);
formatter.field("last_modified_time", &self.last_modified_time);
formatter.field("download_uri", &self.download_uri);
formatter.finish()
}
}
/// See [`GetVocabularyFilterOutput`](crate::output::GetVocabularyFilterOutput)
pub mod get_vocabulary_filter_output {
/// A builder for [`GetVocabularyFilterOutput`](crate::output::GetVocabularyFilterOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) vocabulary_filter_name: std::option::Option<std::string::String>,
pub(crate) language_code: std::option::Option<crate::model::LanguageCode>,
pub(crate) last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) download_uri: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the vocabulary filter.</p>
pub fn vocabulary_filter_name(mut self, input: impl Into<std::string::String>) -> Self {
self.vocabulary_filter_name = Some(input.into());
self
}
/// <p>The name of the vocabulary filter.</p>
pub fn set_vocabulary_filter_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.vocabulary_filter_name = input;
self
}
/// <p>The language code of the words in the vocabulary filter.</p>
pub fn language_code(mut self, input: crate::model::LanguageCode) -> Self {
self.language_code = Some(input);
self
}
/// <p>The language code of the words in the vocabulary filter.</p>
pub fn set_language_code(
mut self,
input: std::option::Option<crate::model::LanguageCode>,
) -> Self {
self.language_code = input;
self
}
/// <p>The date and time that the contents of the vocabulary filter were updated.</p>
pub fn last_modified_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.last_modified_time = Some(input);
self
}
/// <p>The date and time that the contents of the vocabulary filter were updated.</p>
pub fn set_last_modified_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.last_modified_time = input;
self
}
/// <p>The URI of the list of words in the vocabulary filter. You can use this URI to get the list of words.</p>
pub fn download_uri(mut self, input: impl Into<std::string::String>) -> Self {
self.download_uri = Some(input.into());
self
}
/// <p>The URI of the list of words in the vocabulary filter. You can use this URI to get the list of words.</p>
pub fn set_download_uri(mut self, input: std::option::Option<std::string::String>) -> Self {
self.download_uri = input;
self
}
/// Consumes the builder and constructs a [`GetVocabularyFilterOutput`](crate::output::GetVocabularyFilterOutput)
pub fn build(self) -> crate::output::GetVocabularyFilterOutput {
crate::output::GetVocabularyFilterOutput {
vocabulary_filter_name: self.vocabulary_filter_name,
language_code: self.language_code,
last_modified_time: self.last_modified_time,
download_uri: self.download_uri,
}
}
}
}
impl GetVocabularyFilterOutput {
/// Creates a new builder-style object to manufacture [`GetVocabularyFilterOutput`](crate::output::GetVocabularyFilterOutput)
pub fn builder() -> crate::output::get_vocabulary_filter_output::Builder {
crate::output::get_vocabulary_filter_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetVocabularyOutput {
/// <p>The name of the vocabulary to return.</p>
pub vocabulary_name: std::option::Option<std::string::String>,
/// <p>The language code of the vocabulary entries.</p>
pub language_code: std::option::Option<crate::model::LanguageCode>,
/// <p>The processing state of the vocabulary.</p>
pub vocabulary_state: std::option::Option<crate::model::VocabularyState>,
/// <p>The date and time that the vocabulary was last modified.</p>
pub last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>If the <code>VocabularyState</code> field is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub failure_reason: std::option::Option<std::string::String>,
/// <p>The S3 location where the vocabulary is stored. Use this URI to get the contents of the vocabulary. The URI is available for a limited time.</p>
pub download_uri: std::option::Option<std::string::String>,
}
impl GetVocabularyOutput {
/// <p>The name of the vocabulary to return.</p>
pub fn vocabulary_name(&self) -> std::option::Option<&str> {
self.vocabulary_name.as_deref()
}
/// <p>The language code of the vocabulary entries.</p>
pub fn language_code(&self) -> std::option::Option<&crate::model::LanguageCode> {
self.language_code.as_ref()
}
/// <p>The processing state of the vocabulary.</p>
pub fn vocabulary_state(&self) -> std::option::Option<&crate::model::VocabularyState> {
self.vocabulary_state.as_ref()
}
/// <p>The date and time that the vocabulary was last modified.</p>
pub fn last_modified_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.last_modified_time.as_ref()
}
/// <p>If the <code>VocabularyState</code> field is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub fn failure_reason(&self) -> std::option::Option<&str> {
self.failure_reason.as_deref()
}
/// <p>The S3 location where the vocabulary is stored. Use this URI to get the contents of the vocabulary. The URI is available for a limited time.</p>
pub fn download_uri(&self) -> std::option::Option<&str> {
self.download_uri.as_deref()
}
}
impl std::fmt::Debug for GetVocabularyOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetVocabularyOutput");
formatter.field("vocabulary_name", &self.vocabulary_name);
formatter.field("language_code", &self.language_code);
formatter.field("vocabulary_state", &self.vocabulary_state);
formatter.field("last_modified_time", &self.last_modified_time);
formatter.field("failure_reason", &self.failure_reason);
formatter.field("download_uri", &self.download_uri);
formatter.finish()
}
}
/// See [`GetVocabularyOutput`](crate::output::GetVocabularyOutput)
pub mod get_vocabulary_output {
/// A builder for [`GetVocabularyOutput`](crate::output::GetVocabularyOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) vocabulary_name: std::option::Option<std::string::String>,
pub(crate) language_code: std::option::Option<crate::model::LanguageCode>,
pub(crate) vocabulary_state: std::option::Option<crate::model::VocabularyState>,
pub(crate) last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) failure_reason: std::option::Option<std::string::String>,
pub(crate) download_uri: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the vocabulary to return.</p>
pub fn vocabulary_name(mut self, input: impl Into<std::string::String>) -> Self {
self.vocabulary_name = Some(input.into());
self
}
/// <p>The name of the vocabulary to return.</p>
pub fn set_vocabulary_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.vocabulary_name = input;
self
}
/// <p>The language code of the vocabulary entries.</p>
pub fn language_code(mut self, input: crate::model::LanguageCode) -> Self {
self.language_code = Some(input);
self
}
/// <p>The language code of the vocabulary entries.</p>
pub fn set_language_code(
mut self,
input: std::option::Option<crate::model::LanguageCode>,
) -> Self {
self.language_code = input;
self
}
/// <p>The processing state of the vocabulary.</p>
pub fn vocabulary_state(mut self, input: crate::model::VocabularyState) -> Self {
self.vocabulary_state = Some(input);
self
}
/// <p>The processing state of the vocabulary.</p>
pub fn set_vocabulary_state(
mut self,
input: std::option::Option<crate::model::VocabularyState>,
) -> Self {
self.vocabulary_state = input;
self
}
/// <p>The date and time that the vocabulary was last modified.</p>
pub fn last_modified_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.last_modified_time = Some(input);
self
}
/// <p>The date and time that the vocabulary was last modified.</p>
pub fn set_last_modified_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.last_modified_time = input;
self
}
/// <p>If the <code>VocabularyState</code> field is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub fn failure_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.failure_reason = Some(input.into());
self
}
/// <p>If the <code>VocabularyState</code> field is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub fn set_failure_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.failure_reason = input;
self
}
/// <p>The S3 location where the vocabulary is stored. Use this URI to get the contents of the vocabulary. The URI is available for a limited time.</p>
pub fn download_uri(mut self, input: impl Into<std::string::String>) -> Self {
self.download_uri = Some(input.into());
self
}
/// <p>The S3 location where the vocabulary is stored. Use this URI to get the contents of the vocabulary. The URI is available for a limited time.</p>
pub fn set_download_uri(mut self, input: std::option::Option<std::string::String>) -> Self {
self.download_uri = input;
self
}
/// Consumes the builder and constructs a [`GetVocabularyOutput`](crate::output::GetVocabularyOutput)
pub fn build(self) -> crate::output::GetVocabularyOutput {
crate::output::GetVocabularyOutput {
vocabulary_name: self.vocabulary_name,
language_code: self.language_code,
vocabulary_state: self.vocabulary_state,
last_modified_time: self.last_modified_time,
failure_reason: self.failure_reason,
download_uri: self.download_uri,
}
}
}
}
impl GetVocabularyOutput {
/// Creates a new builder-style object to manufacture [`GetVocabularyOutput`](crate::output::GetVocabularyOutput)
pub fn builder() -> crate::output::get_vocabulary_output::Builder {
crate::output::get_vocabulary_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetTranscriptionJobOutput {
/// <p>An object that contains the results of the transcription job.</p>
pub transcription_job: std::option::Option<crate::model::TranscriptionJob>,
}
impl GetTranscriptionJobOutput {
/// <p>An object that contains the results of the transcription job.</p>
pub fn transcription_job(&self) -> std::option::Option<&crate::model::TranscriptionJob> {
self.transcription_job.as_ref()
}
}
impl std::fmt::Debug for GetTranscriptionJobOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetTranscriptionJobOutput");
formatter.field("transcription_job", &self.transcription_job);
formatter.finish()
}
}
/// See [`GetTranscriptionJobOutput`](crate::output::GetTranscriptionJobOutput)
pub mod get_transcription_job_output {
/// A builder for [`GetTranscriptionJobOutput`](crate::output::GetTranscriptionJobOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) transcription_job: std::option::Option<crate::model::TranscriptionJob>,
}
impl Builder {
/// <p>An object that contains the results of the transcription job.</p>
pub fn transcription_job(mut self, input: crate::model::TranscriptionJob) -> Self {
self.transcription_job = Some(input);
self
}
/// <p>An object that contains the results of the transcription job.</p>
pub fn set_transcription_job(
mut self,
input: std::option::Option<crate::model::TranscriptionJob>,
) -> Self {
self.transcription_job = input;
self
}
/// Consumes the builder and constructs a [`GetTranscriptionJobOutput`](crate::output::GetTranscriptionJobOutput)
pub fn build(self) -> crate::output::GetTranscriptionJobOutput {
crate::output::GetTranscriptionJobOutput {
transcription_job: self.transcription_job,
}
}
}
}
impl GetTranscriptionJobOutput {
/// Creates a new builder-style object to manufacture [`GetTranscriptionJobOutput`](crate::output::GetTranscriptionJobOutput)
pub fn builder() -> crate::output::get_transcription_job_output::Builder {
crate::output::get_transcription_job_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetMedicalVocabularyOutput {
/// <p>The name of the vocabulary returned by Amazon Transcribe Medical.</p>
pub vocabulary_name: std::option::Option<std::string::String>,
/// <p>The valid language code for your vocabulary entries.</p>
pub language_code: std::option::Option<crate::model::LanguageCode>,
/// <p>The processing state of the vocabulary. If the <code>VocabularyState</code> is <code>READY</code> then you can use it in the <code>StartMedicalTranscriptionJob</code> operation.</p>
pub vocabulary_state: std::option::Option<crate::model::VocabularyState>,
/// <p>The date and time that the vocabulary was last modified with a text file different from the one that was previously used.</p>
pub last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>If the <code>VocabularyState</code> is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub failure_reason: std::option::Option<std::string::String>,
/// <p>The location in Amazon S3 where the vocabulary is stored. Use this URI to get the contents of the vocabulary. You can download your vocabulary from the URI for a limited time.</p>
pub download_uri: std::option::Option<std::string::String>,
}
impl GetMedicalVocabularyOutput {
/// <p>The name of the vocabulary returned by Amazon Transcribe Medical.</p>
pub fn vocabulary_name(&self) -> std::option::Option<&str> {
self.vocabulary_name.as_deref()
}
/// <p>The valid language code for your vocabulary entries.</p>
pub fn language_code(&self) -> std::option::Option<&crate::model::LanguageCode> {
self.language_code.as_ref()
}
/// <p>The processing state of the vocabulary. If the <code>VocabularyState</code> is <code>READY</code> then you can use it in the <code>StartMedicalTranscriptionJob</code> operation.</p>
pub fn vocabulary_state(&self) -> std::option::Option<&crate::model::VocabularyState> {
self.vocabulary_state.as_ref()
}
/// <p>The date and time that the vocabulary was last modified with a text file different from the one that was previously used.</p>
pub fn last_modified_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.last_modified_time.as_ref()
}
/// <p>If the <code>VocabularyState</code> is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub fn failure_reason(&self) -> std::option::Option<&str> {
self.failure_reason.as_deref()
}
/// <p>The location in Amazon S3 where the vocabulary is stored. Use this URI to get the contents of the vocabulary. You can download your vocabulary from the URI for a limited time.</p>
pub fn download_uri(&self) -> std::option::Option<&str> {
self.download_uri.as_deref()
}
}
impl std::fmt::Debug for GetMedicalVocabularyOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetMedicalVocabularyOutput");
formatter.field("vocabulary_name", &self.vocabulary_name);
formatter.field("language_code", &self.language_code);
formatter.field("vocabulary_state", &self.vocabulary_state);
formatter.field("last_modified_time", &self.last_modified_time);
formatter.field("failure_reason", &self.failure_reason);
formatter.field("download_uri", &self.download_uri);
formatter.finish()
}
}
/// See [`GetMedicalVocabularyOutput`](crate::output::GetMedicalVocabularyOutput)
pub mod get_medical_vocabulary_output {
/// A builder for [`GetMedicalVocabularyOutput`](crate::output::GetMedicalVocabularyOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) vocabulary_name: std::option::Option<std::string::String>,
pub(crate) language_code: std::option::Option<crate::model::LanguageCode>,
pub(crate) vocabulary_state: std::option::Option<crate::model::VocabularyState>,
pub(crate) last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) failure_reason: std::option::Option<std::string::String>,
pub(crate) download_uri: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the vocabulary returned by Amazon Transcribe Medical.</p>
pub fn vocabulary_name(mut self, input: impl Into<std::string::String>) -> Self {
self.vocabulary_name = Some(input.into());
self
}
/// <p>The name of the vocabulary returned by Amazon Transcribe Medical.</p>
pub fn set_vocabulary_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.vocabulary_name = input;
self
}
/// <p>The valid language code for your vocabulary entries.</p>
pub fn language_code(mut self, input: crate::model::LanguageCode) -> Self {
self.language_code = Some(input);
self
}
/// <p>The valid language code for your vocabulary entries.</p>
pub fn set_language_code(
mut self,
input: std::option::Option<crate::model::LanguageCode>,
) -> Self {
self.language_code = input;
self
}
/// <p>The processing state of the vocabulary. If the <code>VocabularyState</code> is <code>READY</code> then you can use it in the <code>StartMedicalTranscriptionJob</code> operation.</p>
pub fn vocabulary_state(mut self, input: crate::model::VocabularyState) -> Self {
self.vocabulary_state = Some(input);
self
}
/// <p>The processing state of the vocabulary. If the <code>VocabularyState</code> is <code>READY</code> then you can use it in the <code>StartMedicalTranscriptionJob</code> operation.</p>
pub fn set_vocabulary_state(
mut self,
input: std::option::Option<crate::model::VocabularyState>,
) -> Self {
self.vocabulary_state = input;
self
}
/// <p>The date and time that the vocabulary was last modified with a text file different from the one that was previously used.</p>
pub fn last_modified_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.last_modified_time = Some(input);
self
}
/// <p>The date and time that the vocabulary was last modified with a text file different from the one that was previously used.</p>
pub fn set_last_modified_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.last_modified_time = input;
self
}
/// <p>If the <code>VocabularyState</code> is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub fn failure_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.failure_reason = Some(input.into());
self
}
/// <p>If the <code>VocabularyState</code> is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub fn set_failure_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.failure_reason = input;
self
}
/// <p>The location in Amazon S3 where the vocabulary is stored. Use this URI to get the contents of the vocabulary. You can download your vocabulary from the URI for a limited time.</p>
pub fn download_uri(mut self, input: impl Into<std::string::String>) -> Self {
self.download_uri = Some(input.into());
self
}
/// <p>The location in Amazon S3 where the vocabulary is stored. Use this URI to get the contents of the vocabulary. You can download your vocabulary from the URI for a limited time.</p>
pub fn set_download_uri(mut self, input: std::option::Option<std::string::String>) -> Self {
self.download_uri = input;
self
}
/// Consumes the builder and constructs a [`GetMedicalVocabularyOutput`](crate::output::GetMedicalVocabularyOutput)
pub fn build(self) -> crate::output::GetMedicalVocabularyOutput {
crate::output::GetMedicalVocabularyOutput {
vocabulary_name: self.vocabulary_name,
language_code: self.language_code,
vocabulary_state: self.vocabulary_state,
last_modified_time: self.last_modified_time,
failure_reason: self.failure_reason,
download_uri: self.download_uri,
}
}
}
}
impl GetMedicalVocabularyOutput {
/// Creates a new builder-style object to manufacture [`GetMedicalVocabularyOutput`](crate::output::GetMedicalVocabularyOutput)
pub fn builder() -> crate::output::get_medical_vocabulary_output::Builder {
crate::output::get_medical_vocabulary_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetMedicalTranscriptionJobOutput {
/// <p>An object that contains the results of the medical transcription job.</p>
pub medical_transcription_job: std::option::Option<crate::model::MedicalTranscriptionJob>,
}
impl GetMedicalTranscriptionJobOutput {
/// <p>An object that contains the results of the medical transcription job.</p>
pub fn medical_transcription_job(
&self,
) -> std::option::Option<&crate::model::MedicalTranscriptionJob> {
self.medical_transcription_job.as_ref()
}
}
impl std::fmt::Debug for GetMedicalTranscriptionJobOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetMedicalTranscriptionJobOutput");
formatter.field("medical_transcription_job", &self.medical_transcription_job);
formatter.finish()
}
}
/// See [`GetMedicalTranscriptionJobOutput`](crate::output::GetMedicalTranscriptionJobOutput)
pub mod get_medical_transcription_job_output {
/// A builder for [`GetMedicalTranscriptionJobOutput`](crate::output::GetMedicalTranscriptionJobOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) medical_transcription_job:
std::option::Option<crate::model::MedicalTranscriptionJob>,
}
impl Builder {
/// <p>An object that contains the results of the medical transcription job.</p>
pub fn medical_transcription_job(
mut self,
input: crate::model::MedicalTranscriptionJob,
) -> Self {
self.medical_transcription_job = Some(input);
self
}
/// <p>An object that contains the results of the medical transcription job.</p>
pub fn set_medical_transcription_job(
mut self,
input: std::option::Option<crate::model::MedicalTranscriptionJob>,
) -> Self {
self.medical_transcription_job = input;
self
}
/// Consumes the builder and constructs a [`GetMedicalTranscriptionJobOutput`](crate::output::GetMedicalTranscriptionJobOutput)
pub fn build(self) -> crate::output::GetMedicalTranscriptionJobOutput {
crate::output::GetMedicalTranscriptionJobOutput {
medical_transcription_job: self.medical_transcription_job,
}
}
}
}
impl GetMedicalTranscriptionJobOutput {
/// Creates a new builder-style object to manufacture [`GetMedicalTranscriptionJobOutput`](crate::output::GetMedicalTranscriptionJobOutput)
pub fn builder() -> crate::output::get_medical_transcription_job_output::Builder {
crate::output::get_medical_transcription_job_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetCallAnalyticsJobOutput {
/// <p>An object that contains the results of your call analytics job.</p>
pub call_analytics_job: std::option::Option<crate::model::CallAnalyticsJob>,
}
impl GetCallAnalyticsJobOutput {
/// <p>An object that contains the results of your call analytics job.</p>
pub fn call_analytics_job(&self) -> std::option::Option<&crate::model::CallAnalyticsJob> {
self.call_analytics_job.as_ref()
}
}
impl std::fmt::Debug for GetCallAnalyticsJobOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetCallAnalyticsJobOutput");
formatter.field("call_analytics_job", &self.call_analytics_job);
formatter.finish()
}
}
/// See [`GetCallAnalyticsJobOutput`](crate::output::GetCallAnalyticsJobOutput)
pub mod get_call_analytics_job_output {
/// A builder for [`GetCallAnalyticsJobOutput`](crate::output::GetCallAnalyticsJobOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) call_analytics_job: std::option::Option<crate::model::CallAnalyticsJob>,
}
impl Builder {
/// <p>An object that contains the results of your call analytics job.</p>
pub fn call_analytics_job(mut self, input: crate::model::CallAnalyticsJob) -> Self {
self.call_analytics_job = Some(input);
self
}
/// <p>An object that contains the results of your call analytics job.</p>
pub fn set_call_analytics_job(
mut self,
input: std::option::Option<crate::model::CallAnalyticsJob>,
) -> Self {
self.call_analytics_job = input;
self
}
/// Consumes the builder and constructs a [`GetCallAnalyticsJobOutput`](crate::output::GetCallAnalyticsJobOutput)
pub fn build(self) -> crate::output::GetCallAnalyticsJobOutput {
crate::output::GetCallAnalyticsJobOutput {
call_analytics_job: self.call_analytics_job,
}
}
}
}
impl GetCallAnalyticsJobOutput {
/// Creates a new builder-style object to manufacture [`GetCallAnalyticsJobOutput`](crate::output::GetCallAnalyticsJobOutput)
pub fn builder() -> crate::output::get_call_analytics_job_output::Builder {
crate::output::get_call_analytics_job_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetCallAnalyticsCategoryOutput {
/// <p>The rules you've defined for a category.</p>
pub category_properties: std::option::Option<crate::model::CategoryProperties>,
}
impl GetCallAnalyticsCategoryOutput {
/// <p>The rules you've defined for a category.</p>
pub fn category_properties(&self) -> std::option::Option<&crate::model::CategoryProperties> {
self.category_properties.as_ref()
}
}
impl std::fmt::Debug for GetCallAnalyticsCategoryOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetCallAnalyticsCategoryOutput");
formatter.field("category_properties", &self.category_properties);
formatter.finish()
}
}
/// See [`GetCallAnalyticsCategoryOutput`](crate::output::GetCallAnalyticsCategoryOutput)
pub mod get_call_analytics_category_output {
/// A builder for [`GetCallAnalyticsCategoryOutput`](crate::output::GetCallAnalyticsCategoryOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) category_properties: std::option::Option<crate::model::CategoryProperties>,
}
impl Builder {
/// <p>The rules you've defined for a category.</p>
pub fn category_properties(mut self, input: crate::model::CategoryProperties) -> Self {
self.category_properties = Some(input);
self
}
/// <p>The rules you've defined for a category.</p>
pub fn set_category_properties(
mut self,
input: std::option::Option<crate::model::CategoryProperties>,
) -> Self {
self.category_properties = input;
self
}
/// Consumes the builder and constructs a [`GetCallAnalyticsCategoryOutput`](crate::output::GetCallAnalyticsCategoryOutput)
pub fn build(self) -> crate::output::GetCallAnalyticsCategoryOutput {
crate::output::GetCallAnalyticsCategoryOutput {
category_properties: self.category_properties,
}
}
}
}
impl GetCallAnalyticsCategoryOutput {
/// Creates a new builder-style object to manufacture [`GetCallAnalyticsCategoryOutput`](crate::output::GetCallAnalyticsCategoryOutput)
pub fn builder() -> crate::output::get_call_analytics_category_output::Builder {
crate::output::get_call_analytics_category_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeLanguageModelOutput {
/// <p>The name of the custom language model you requested more information about.</p>
pub language_model: std::option::Option<crate::model::LanguageModel>,
}
impl DescribeLanguageModelOutput {
/// <p>The name of the custom language model you requested more information about.</p>
pub fn language_model(&self) -> std::option::Option<&crate::model::LanguageModel> {
self.language_model.as_ref()
}
}
impl std::fmt::Debug for DescribeLanguageModelOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeLanguageModelOutput");
formatter.field("language_model", &self.language_model);
formatter.finish()
}
}
/// See [`DescribeLanguageModelOutput`](crate::output::DescribeLanguageModelOutput)
pub mod describe_language_model_output {
/// A builder for [`DescribeLanguageModelOutput`](crate::output::DescribeLanguageModelOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) language_model: std::option::Option<crate::model::LanguageModel>,
}
impl Builder {
/// <p>The name of the custom language model you requested more information about.</p>
pub fn language_model(mut self, input: crate::model::LanguageModel) -> Self {
self.language_model = Some(input);
self
}
/// <p>The name of the custom language model you requested more information about.</p>
pub fn set_language_model(
mut self,
input: std::option::Option<crate::model::LanguageModel>,
) -> Self {
self.language_model = input;
self
}
/// Consumes the builder and constructs a [`DescribeLanguageModelOutput`](crate::output::DescribeLanguageModelOutput)
pub fn build(self) -> crate::output::DescribeLanguageModelOutput {
crate::output::DescribeLanguageModelOutput {
language_model: self.language_model,
}
}
}
}
impl DescribeLanguageModelOutput {
/// Creates a new builder-style object to manufacture [`DescribeLanguageModelOutput`](crate::output::DescribeLanguageModelOutput)
pub fn builder() -> crate::output::describe_language_model_output::Builder {
crate::output::describe_language_model_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteVocabularyFilterOutput {}
impl std::fmt::Debug for DeleteVocabularyFilterOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteVocabularyFilterOutput");
formatter.finish()
}
}
/// See [`DeleteVocabularyFilterOutput`](crate::output::DeleteVocabularyFilterOutput)
pub mod delete_vocabulary_filter_output {
/// A builder for [`DeleteVocabularyFilterOutput`](crate::output::DeleteVocabularyFilterOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteVocabularyFilterOutput`](crate::output::DeleteVocabularyFilterOutput)
pub fn build(self) -> crate::output::DeleteVocabularyFilterOutput {
crate::output::DeleteVocabularyFilterOutput {}
}
}
}
impl DeleteVocabularyFilterOutput {
/// Creates a new builder-style object to manufacture [`DeleteVocabularyFilterOutput`](crate::output::DeleteVocabularyFilterOutput)
pub fn builder() -> crate::output::delete_vocabulary_filter_output::Builder {
crate::output::delete_vocabulary_filter_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteVocabularyOutput {}
impl std::fmt::Debug for DeleteVocabularyOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteVocabularyOutput");
formatter.finish()
}
}
/// See [`DeleteVocabularyOutput`](crate::output::DeleteVocabularyOutput)
pub mod delete_vocabulary_output {
/// A builder for [`DeleteVocabularyOutput`](crate::output::DeleteVocabularyOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteVocabularyOutput`](crate::output::DeleteVocabularyOutput)
pub fn build(self) -> crate::output::DeleteVocabularyOutput {
crate::output::DeleteVocabularyOutput {}
}
}
}
impl DeleteVocabularyOutput {
/// Creates a new builder-style object to manufacture [`DeleteVocabularyOutput`](crate::output::DeleteVocabularyOutput)
pub fn builder() -> crate::output::delete_vocabulary_output::Builder {
crate::output::delete_vocabulary_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteTranscriptionJobOutput {}
impl std::fmt::Debug for DeleteTranscriptionJobOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteTranscriptionJobOutput");
formatter.finish()
}
}
/// See [`DeleteTranscriptionJobOutput`](crate::output::DeleteTranscriptionJobOutput)
pub mod delete_transcription_job_output {
/// A builder for [`DeleteTranscriptionJobOutput`](crate::output::DeleteTranscriptionJobOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteTranscriptionJobOutput`](crate::output::DeleteTranscriptionJobOutput)
pub fn build(self) -> crate::output::DeleteTranscriptionJobOutput {
crate::output::DeleteTranscriptionJobOutput {}
}
}
}
impl DeleteTranscriptionJobOutput {
/// Creates a new builder-style object to manufacture [`DeleteTranscriptionJobOutput`](crate::output::DeleteTranscriptionJobOutput)
pub fn builder() -> crate::output::delete_transcription_job_output::Builder {
crate::output::delete_transcription_job_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteMedicalVocabularyOutput {}
impl std::fmt::Debug for DeleteMedicalVocabularyOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteMedicalVocabularyOutput");
formatter.finish()
}
}
/// See [`DeleteMedicalVocabularyOutput`](crate::output::DeleteMedicalVocabularyOutput)
pub mod delete_medical_vocabulary_output {
/// A builder for [`DeleteMedicalVocabularyOutput`](crate::output::DeleteMedicalVocabularyOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteMedicalVocabularyOutput`](crate::output::DeleteMedicalVocabularyOutput)
pub fn build(self) -> crate::output::DeleteMedicalVocabularyOutput {
crate::output::DeleteMedicalVocabularyOutput {}
}
}
}
impl DeleteMedicalVocabularyOutput {
/// Creates a new builder-style object to manufacture [`DeleteMedicalVocabularyOutput`](crate::output::DeleteMedicalVocabularyOutput)
pub fn builder() -> crate::output::delete_medical_vocabulary_output::Builder {
crate::output::delete_medical_vocabulary_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteMedicalTranscriptionJobOutput {}
impl std::fmt::Debug for DeleteMedicalTranscriptionJobOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteMedicalTranscriptionJobOutput");
formatter.finish()
}
}
/// See [`DeleteMedicalTranscriptionJobOutput`](crate::output::DeleteMedicalTranscriptionJobOutput)
pub mod delete_medical_transcription_job_output {
/// A builder for [`DeleteMedicalTranscriptionJobOutput`](crate::output::DeleteMedicalTranscriptionJobOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteMedicalTranscriptionJobOutput`](crate::output::DeleteMedicalTranscriptionJobOutput)
pub fn build(self) -> crate::output::DeleteMedicalTranscriptionJobOutput {
crate::output::DeleteMedicalTranscriptionJobOutput {}
}
}
}
impl DeleteMedicalTranscriptionJobOutput {
/// Creates a new builder-style object to manufacture [`DeleteMedicalTranscriptionJobOutput`](crate::output::DeleteMedicalTranscriptionJobOutput)
pub fn builder() -> crate::output::delete_medical_transcription_job_output::Builder {
crate::output::delete_medical_transcription_job_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteLanguageModelOutput {}
impl std::fmt::Debug for DeleteLanguageModelOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteLanguageModelOutput");
formatter.finish()
}
}
/// See [`DeleteLanguageModelOutput`](crate::output::DeleteLanguageModelOutput)
pub mod delete_language_model_output {
/// A builder for [`DeleteLanguageModelOutput`](crate::output::DeleteLanguageModelOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteLanguageModelOutput`](crate::output::DeleteLanguageModelOutput)
pub fn build(self) -> crate::output::DeleteLanguageModelOutput {
crate::output::DeleteLanguageModelOutput {}
}
}
}
impl DeleteLanguageModelOutput {
/// Creates a new builder-style object to manufacture [`DeleteLanguageModelOutput`](crate::output::DeleteLanguageModelOutput)
pub fn builder() -> crate::output::delete_language_model_output::Builder {
crate::output::delete_language_model_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteCallAnalyticsJobOutput {}
impl std::fmt::Debug for DeleteCallAnalyticsJobOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteCallAnalyticsJobOutput");
formatter.finish()
}
}
/// See [`DeleteCallAnalyticsJobOutput`](crate::output::DeleteCallAnalyticsJobOutput)
pub mod delete_call_analytics_job_output {
/// A builder for [`DeleteCallAnalyticsJobOutput`](crate::output::DeleteCallAnalyticsJobOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteCallAnalyticsJobOutput`](crate::output::DeleteCallAnalyticsJobOutput)
pub fn build(self) -> crate::output::DeleteCallAnalyticsJobOutput {
crate::output::DeleteCallAnalyticsJobOutput {}
}
}
}
impl DeleteCallAnalyticsJobOutput {
/// Creates a new builder-style object to manufacture [`DeleteCallAnalyticsJobOutput`](crate::output::DeleteCallAnalyticsJobOutput)
pub fn builder() -> crate::output::delete_call_analytics_job_output::Builder {
crate::output::delete_call_analytics_job_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteCallAnalyticsCategoryOutput {}
impl std::fmt::Debug for DeleteCallAnalyticsCategoryOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteCallAnalyticsCategoryOutput");
formatter.finish()
}
}
/// See [`DeleteCallAnalyticsCategoryOutput`](crate::output::DeleteCallAnalyticsCategoryOutput)
pub mod delete_call_analytics_category_output {
/// A builder for [`DeleteCallAnalyticsCategoryOutput`](crate::output::DeleteCallAnalyticsCategoryOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteCallAnalyticsCategoryOutput`](crate::output::DeleteCallAnalyticsCategoryOutput)
pub fn build(self) -> crate::output::DeleteCallAnalyticsCategoryOutput {
crate::output::DeleteCallAnalyticsCategoryOutput {}
}
}
}
impl DeleteCallAnalyticsCategoryOutput {
/// Creates a new builder-style object to manufacture [`DeleteCallAnalyticsCategoryOutput`](crate::output::DeleteCallAnalyticsCategoryOutput)
pub fn builder() -> crate::output::delete_call_analytics_category_output::Builder {
crate::output::delete_call_analytics_category_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateVocabularyFilterOutput {
/// <p>The name of the vocabulary filter.</p>
pub vocabulary_filter_name: std::option::Option<std::string::String>,
/// <p>The language code of the words in the collection.</p>
pub language_code: std::option::Option<crate::model::LanguageCode>,
/// <p>The date and time that the vocabulary filter was modified.</p>
pub last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl CreateVocabularyFilterOutput {
/// <p>The name of the vocabulary filter.</p>
pub fn vocabulary_filter_name(&self) -> std::option::Option<&str> {
self.vocabulary_filter_name.as_deref()
}
/// <p>The language code of the words in the collection.</p>
pub fn language_code(&self) -> std::option::Option<&crate::model::LanguageCode> {
self.language_code.as_ref()
}
/// <p>The date and time that the vocabulary filter was modified.</p>
pub fn last_modified_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.last_modified_time.as_ref()
}
}
impl std::fmt::Debug for CreateVocabularyFilterOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateVocabularyFilterOutput");
formatter.field("vocabulary_filter_name", &self.vocabulary_filter_name);
formatter.field("language_code", &self.language_code);
formatter.field("last_modified_time", &self.last_modified_time);
formatter.finish()
}
}
/// See [`CreateVocabularyFilterOutput`](crate::output::CreateVocabularyFilterOutput)
pub mod create_vocabulary_filter_output {
/// A builder for [`CreateVocabularyFilterOutput`](crate::output::CreateVocabularyFilterOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) vocabulary_filter_name: std::option::Option<std::string::String>,
pub(crate) language_code: std::option::Option<crate::model::LanguageCode>,
pub(crate) last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The name of the vocabulary filter.</p>
pub fn vocabulary_filter_name(mut self, input: impl Into<std::string::String>) -> Self {
self.vocabulary_filter_name = Some(input.into());
self
}
/// <p>The name of the vocabulary filter.</p>
pub fn set_vocabulary_filter_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.vocabulary_filter_name = input;
self
}
/// <p>The language code of the words in the collection.</p>
pub fn language_code(mut self, input: crate::model::LanguageCode) -> Self {
self.language_code = Some(input);
self
}
/// <p>The language code of the words in the collection.</p>
pub fn set_language_code(
mut self,
input: std::option::Option<crate::model::LanguageCode>,
) -> Self {
self.language_code = input;
self
}
/// <p>The date and time that the vocabulary filter was modified.</p>
pub fn last_modified_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.last_modified_time = Some(input);
self
}
/// <p>The date and time that the vocabulary filter was modified.</p>
pub fn set_last_modified_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.last_modified_time = input;
self
}
/// Consumes the builder and constructs a [`CreateVocabularyFilterOutput`](crate::output::CreateVocabularyFilterOutput)
pub fn build(self) -> crate::output::CreateVocabularyFilterOutput {
crate::output::CreateVocabularyFilterOutput {
vocabulary_filter_name: self.vocabulary_filter_name,
language_code: self.language_code,
last_modified_time: self.last_modified_time,
}
}
}
}
impl CreateVocabularyFilterOutput {
/// Creates a new builder-style object to manufacture [`CreateVocabularyFilterOutput`](crate::output::CreateVocabularyFilterOutput)
pub fn builder() -> crate::output::create_vocabulary_filter_output::Builder {
crate::output::create_vocabulary_filter_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateVocabularyOutput {
/// <p>The name of the vocabulary.</p>
pub vocabulary_name: std::option::Option<std::string::String>,
/// <p>The language code of the vocabulary entries.</p>
pub language_code: std::option::Option<crate::model::LanguageCode>,
/// <p>The processing state of the vocabulary. When the <code>VocabularyState</code> field contains <code>READY</code> the vocabulary is ready to be used in a <code>StartTranscriptionJob</code> request.</p>
pub vocabulary_state: std::option::Option<crate::model::VocabularyState>,
/// <p>The date and time that the vocabulary was created.</p>
pub last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>If the <code>VocabularyState</code> field is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub failure_reason: std::option::Option<std::string::String>,
}
impl CreateVocabularyOutput {
/// <p>The name of the vocabulary.</p>
pub fn vocabulary_name(&self) -> std::option::Option<&str> {
self.vocabulary_name.as_deref()
}
/// <p>The language code of the vocabulary entries.</p>
pub fn language_code(&self) -> std::option::Option<&crate::model::LanguageCode> {
self.language_code.as_ref()
}
/// <p>The processing state of the vocabulary. When the <code>VocabularyState</code> field contains <code>READY</code> the vocabulary is ready to be used in a <code>StartTranscriptionJob</code> request.</p>
pub fn vocabulary_state(&self) -> std::option::Option<&crate::model::VocabularyState> {
self.vocabulary_state.as_ref()
}
/// <p>The date and time that the vocabulary was created.</p>
pub fn last_modified_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.last_modified_time.as_ref()
}
/// <p>If the <code>VocabularyState</code> field is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub fn failure_reason(&self) -> std::option::Option<&str> {
self.failure_reason.as_deref()
}
}
impl std::fmt::Debug for CreateVocabularyOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateVocabularyOutput");
formatter.field("vocabulary_name", &self.vocabulary_name);
formatter.field("language_code", &self.language_code);
formatter.field("vocabulary_state", &self.vocabulary_state);
formatter.field("last_modified_time", &self.last_modified_time);
formatter.field("failure_reason", &self.failure_reason);
formatter.finish()
}
}
/// See [`CreateVocabularyOutput`](crate::output::CreateVocabularyOutput)
pub mod create_vocabulary_output {
/// A builder for [`CreateVocabularyOutput`](crate::output::CreateVocabularyOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) vocabulary_name: std::option::Option<std::string::String>,
pub(crate) language_code: std::option::Option<crate::model::LanguageCode>,
pub(crate) vocabulary_state: std::option::Option<crate::model::VocabularyState>,
pub(crate) last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) failure_reason: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the vocabulary.</p>
pub fn vocabulary_name(mut self, input: impl Into<std::string::String>) -> Self {
self.vocabulary_name = Some(input.into());
self
}
/// <p>The name of the vocabulary.</p>
pub fn set_vocabulary_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.vocabulary_name = input;
self
}
/// <p>The language code of the vocabulary entries.</p>
pub fn language_code(mut self, input: crate::model::LanguageCode) -> Self {
self.language_code = Some(input);
self
}
/// <p>The language code of the vocabulary entries.</p>
pub fn set_language_code(
mut self,
input: std::option::Option<crate::model::LanguageCode>,
) -> Self {
self.language_code = input;
self
}
/// <p>The processing state of the vocabulary. When the <code>VocabularyState</code> field contains <code>READY</code> the vocabulary is ready to be used in a <code>StartTranscriptionJob</code> request.</p>
pub fn vocabulary_state(mut self, input: crate::model::VocabularyState) -> Self {
self.vocabulary_state = Some(input);
self
}
/// <p>The processing state of the vocabulary. When the <code>VocabularyState</code> field contains <code>READY</code> the vocabulary is ready to be used in a <code>StartTranscriptionJob</code> request.</p>
pub fn set_vocabulary_state(
mut self,
input: std::option::Option<crate::model::VocabularyState>,
) -> Self {
self.vocabulary_state = input;
self
}
/// <p>The date and time that the vocabulary was created.</p>
pub fn last_modified_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.last_modified_time = Some(input);
self
}
/// <p>The date and time that the vocabulary was created.</p>
pub fn set_last_modified_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.last_modified_time = input;
self
}
/// <p>If the <code>VocabularyState</code> field is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub fn failure_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.failure_reason = Some(input.into());
self
}
/// <p>If the <code>VocabularyState</code> field is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub fn set_failure_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.failure_reason = input;
self
}
/// Consumes the builder and constructs a [`CreateVocabularyOutput`](crate::output::CreateVocabularyOutput)
pub fn build(self) -> crate::output::CreateVocabularyOutput {
crate::output::CreateVocabularyOutput {
vocabulary_name: self.vocabulary_name,
language_code: self.language_code,
vocabulary_state: self.vocabulary_state,
last_modified_time: self.last_modified_time,
failure_reason: self.failure_reason,
}
}
}
}
impl CreateVocabularyOutput {
/// Creates a new builder-style object to manufacture [`CreateVocabularyOutput`](crate::output::CreateVocabularyOutput)
pub fn builder() -> crate::output::create_vocabulary_output::Builder {
crate::output::create_vocabulary_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateMedicalVocabularyOutput {
/// <p>The name of the vocabulary. The name must be unique within an Amazon Web Services account and is case sensitive.</p>
pub vocabulary_name: std::option::Option<std::string::String>,
/// <p>The language code for the entries in your custom vocabulary. US English (en-US) is the only valid language code for Amazon Transcribe Medical.</p>
pub language_code: std::option::Option<crate::model::LanguageCode>,
/// <p>The processing state of your custom vocabulary in Amazon Transcribe Medical. If the state is <code>READY</code>, you can use the vocabulary in a <code>StartMedicalTranscriptionJob</code> request.</p>
pub vocabulary_state: std::option::Option<crate::model::VocabularyState>,
/// <p>The date and time that you created the vocabulary.</p>
pub last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>If the <code>VocabularyState</code> field is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub failure_reason: std::option::Option<std::string::String>,
}
impl CreateMedicalVocabularyOutput {
/// <p>The name of the vocabulary. The name must be unique within an Amazon Web Services account and is case sensitive.</p>
pub fn vocabulary_name(&self) -> std::option::Option<&str> {
self.vocabulary_name.as_deref()
}
/// <p>The language code for the entries in your custom vocabulary. US English (en-US) is the only valid language code for Amazon Transcribe Medical.</p>
pub fn language_code(&self) -> std::option::Option<&crate::model::LanguageCode> {
self.language_code.as_ref()
}
/// <p>The processing state of your custom vocabulary in Amazon Transcribe Medical. If the state is <code>READY</code>, you can use the vocabulary in a <code>StartMedicalTranscriptionJob</code> request.</p>
pub fn vocabulary_state(&self) -> std::option::Option<&crate::model::VocabularyState> {
self.vocabulary_state.as_ref()
}
/// <p>The date and time that you created the vocabulary.</p>
pub fn last_modified_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.last_modified_time.as_ref()
}
/// <p>If the <code>VocabularyState</code> field is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub fn failure_reason(&self) -> std::option::Option<&str> {
self.failure_reason.as_deref()
}
}
impl std::fmt::Debug for CreateMedicalVocabularyOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateMedicalVocabularyOutput");
formatter.field("vocabulary_name", &self.vocabulary_name);
formatter.field("language_code", &self.language_code);
formatter.field("vocabulary_state", &self.vocabulary_state);
formatter.field("last_modified_time", &self.last_modified_time);
formatter.field("failure_reason", &self.failure_reason);
formatter.finish()
}
}
/// See [`CreateMedicalVocabularyOutput`](crate::output::CreateMedicalVocabularyOutput)
pub mod create_medical_vocabulary_output {
/// A builder for [`CreateMedicalVocabularyOutput`](crate::output::CreateMedicalVocabularyOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) vocabulary_name: std::option::Option<std::string::String>,
pub(crate) language_code: std::option::Option<crate::model::LanguageCode>,
pub(crate) vocabulary_state: std::option::Option<crate::model::VocabularyState>,
pub(crate) last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) failure_reason: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the vocabulary. The name must be unique within an Amazon Web Services account and is case sensitive.</p>
pub fn vocabulary_name(mut self, input: impl Into<std::string::String>) -> Self {
self.vocabulary_name = Some(input.into());
self
}
/// <p>The name of the vocabulary. The name must be unique within an Amazon Web Services account and is case sensitive.</p>
pub fn set_vocabulary_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.vocabulary_name = input;
self
}
/// <p>The language code for the entries in your custom vocabulary. US English (en-US) is the only valid language code for Amazon Transcribe Medical.</p>
pub fn language_code(mut self, input: crate::model::LanguageCode) -> Self {
self.language_code = Some(input);
self
}
/// <p>The language code for the entries in your custom vocabulary. US English (en-US) is the only valid language code for Amazon Transcribe Medical.</p>
pub fn set_language_code(
mut self,
input: std::option::Option<crate::model::LanguageCode>,
) -> Self {
self.language_code = input;
self
}
/// <p>The processing state of your custom vocabulary in Amazon Transcribe Medical. If the state is <code>READY</code>, you can use the vocabulary in a <code>StartMedicalTranscriptionJob</code> request.</p>
pub fn vocabulary_state(mut self, input: crate::model::VocabularyState) -> Self {
self.vocabulary_state = Some(input);
self
}
/// <p>The processing state of your custom vocabulary in Amazon Transcribe Medical. If the state is <code>READY</code>, you can use the vocabulary in a <code>StartMedicalTranscriptionJob</code> request.</p>
pub fn set_vocabulary_state(
mut self,
input: std::option::Option<crate::model::VocabularyState>,
) -> Self {
self.vocabulary_state = input;
self
}
/// <p>The date and time that you created the vocabulary.</p>
pub fn last_modified_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.last_modified_time = Some(input);
self
}
/// <p>The date and time that you created the vocabulary.</p>
pub fn set_last_modified_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.last_modified_time = input;
self
}
/// <p>If the <code>VocabularyState</code> field is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub fn failure_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.failure_reason = Some(input.into());
self
}
/// <p>If the <code>VocabularyState</code> field is <code>FAILED</code>, this field contains information about why the job failed.</p>
pub fn set_failure_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.failure_reason = input;
self
}
/// Consumes the builder and constructs a [`CreateMedicalVocabularyOutput`](crate::output::CreateMedicalVocabularyOutput)
pub fn build(self) -> crate::output::CreateMedicalVocabularyOutput {
crate::output::CreateMedicalVocabularyOutput {
vocabulary_name: self.vocabulary_name,
language_code: self.language_code,
vocabulary_state: self.vocabulary_state,
last_modified_time: self.last_modified_time,
failure_reason: self.failure_reason,
}
}
}
}
impl CreateMedicalVocabularyOutput {
/// Creates a new builder-style object to manufacture [`CreateMedicalVocabularyOutput`](crate::output::CreateMedicalVocabularyOutput)
pub fn builder() -> crate::output::create_medical_vocabulary_output::Builder {
crate::output::create_medical_vocabulary_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateLanguageModelOutput {
/// <p>The language code of the text you've used to create a custom language model.</p>
pub language_code: std::option::Option<crate::model::ClmLanguageCode>,
/// <p>The Amazon Transcribe standard language model, or base model you've used to create a custom language model.</p>
pub base_model_name: std::option::Option<crate::model::BaseModelName>,
/// <p>The name you've chosen for your custom language model.</p>
pub model_name: std::option::Option<std::string::String>,
/// <p>The data access role and Amazon S3 prefixes you've chosen to create your custom language model.</p>
pub input_data_config: std::option::Option<crate::model::InputDataConfig>,
/// <p>The status of the custom language model. When the status is <code>COMPLETED</code> the model is ready to use.</p>
pub model_status: std::option::Option<crate::model::ModelStatus>,
}
impl CreateLanguageModelOutput {
/// <p>The language code of the text you've used to create a custom language model.</p>
pub fn language_code(&self) -> std::option::Option<&crate::model::ClmLanguageCode> {
self.language_code.as_ref()
}
/// <p>The Amazon Transcribe standard language model, or base model you've used to create a custom language model.</p>
pub fn base_model_name(&self) -> std::option::Option<&crate::model::BaseModelName> {
self.base_model_name.as_ref()
}
/// <p>The name you've chosen for your custom language model.</p>
pub fn model_name(&self) -> std::option::Option<&str> {
self.model_name.as_deref()
}
/// <p>The data access role and Amazon S3 prefixes you've chosen to create your custom language model.</p>
pub fn input_data_config(&self) -> std::option::Option<&crate::model::InputDataConfig> {
self.input_data_config.as_ref()
}
/// <p>The status of the custom language model. When the status is <code>COMPLETED</code> the model is ready to use.</p>
pub fn model_status(&self) -> std::option::Option<&crate::model::ModelStatus> {
self.model_status.as_ref()
}
}
impl std::fmt::Debug for CreateLanguageModelOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateLanguageModelOutput");
formatter.field("language_code", &self.language_code);
formatter.field("base_model_name", &self.base_model_name);
formatter.field("model_name", &self.model_name);
formatter.field("input_data_config", &self.input_data_config);
formatter.field("model_status", &self.model_status);
formatter.finish()
}
}
/// See [`CreateLanguageModelOutput`](crate::output::CreateLanguageModelOutput)
pub mod create_language_model_output {
/// A builder for [`CreateLanguageModelOutput`](crate::output::CreateLanguageModelOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) language_code: std::option::Option<crate::model::ClmLanguageCode>,
pub(crate) base_model_name: std::option::Option<crate::model::BaseModelName>,
pub(crate) model_name: std::option::Option<std::string::String>,
pub(crate) input_data_config: std::option::Option<crate::model::InputDataConfig>,
pub(crate) model_status: std::option::Option<crate::model::ModelStatus>,
}
impl Builder {
/// <p>The language code of the text you've used to create a custom language model.</p>
pub fn language_code(mut self, input: crate::model::ClmLanguageCode) -> Self {
self.language_code = Some(input);
self
}
/// <p>The language code of the text you've used to create a custom language model.</p>
pub fn set_language_code(
mut self,
input: std::option::Option<crate::model::ClmLanguageCode>,
) -> Self {
self.language_code = input;
self
}
/// <p>The Amazon Transcribe standard language model, or base model you've used to create a custom language model.</p>
pub fn base_model_name(mut self, input: crate::model::BaseModelName) -> Self {
self.base_model_name = Some(input);
self
}
/// <p>The Amazon Transcribe standard language model, or base model you've used to create a custom language model.</p>
pub fn set_base_model_name(
mut self,
input: std::option::Option<crate::model::BaseModelName>,
) -> Self {
self.base_model_name = input;
self
}
/// <p>The name you've chosen for your custom language model.</p>
pub fn model_name(mut self, input: impl Into<std::string::String>) -> Self {
self.model_name = Some(input.into());
self
}
/// <p>The name you've chosen for your custom language model.</p>
pub fn set_model_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.model_name = input;
self
}
/// <p>The data access role and Amazon S3 prefixes you've chosen to create your custom language model.</p>
pub fn input_data_config(mut self, input: crate::model::InputDataConfig) -> Self {
self.input_data_config = Some(input);
self
}
/// <p>The data access role and Amazon S3 prefixes you've chosen to create your custom language model.</p>
pub fn set_input_data_config(
mut self,
input: std::option::Option<crate::model::InputDataConfig>,
) -> Self {
self.input_data_config = input;
self
}
/// <p>The status of the custom language model. When the status is <code>COMPLETED</code> the model is ready to use.</p>
pub fn model_status(mut self, input: crate::model::ModelStatus) -> Self {
self.model_status = Some(input);
self
}
/// <p>The status of the custom language model. When the status is <code>COMPLETED</code> the model is ready to use.</p>
pub fn set_model_status(
mut self,
input: std::option::Option<crate::model::ModelStatus>,
) -> Self {
self.model_status = input;
self
}
/// Consumes the builder and constructs a [`CreateLanguageModelOutput`](crate::output::CreateLanguageModelOutput)
pub fn build(self) -> crate::output::CreateLanguageModelOutput {
crate::output::CreateLanguageModelOutput {
language_code: self.language_code,
base_model_name: self.base_model_name,
model_name: self.model_name,
input_data_config: self.input_data_config,
model_status: self.model_status,
}
}
}
}
impl CreateLanguageModelOutput {
/// Creates a new builder-style object to manufacture [`CreateLanguageModelOutput`](crate::output::CreateLanguageModelOutput)
pub fn builder() -> crate::output::create_language_model_output::Builder {
crate::output::create_language_model_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateCallAnalyticsCategoryOutput {
/// <p>The rules and associated metadata used to create a category.</p>
pub category_properties: std::option::Option<crate::model::CategoryProperties>,
}
impl CreateCallAnalyticsCategoryOutput {
/// <p>The rules and associated metadata used to create a category.</p>
pub fn category_properties(&self) -> std::option::Option<&crate::model::CategoryProperties> {
self.category_properties.as_ref()
}
}
impl std::fmt::Debug for CreateCallAnalyticsCategoryOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateCallAnalyticsCategoryOutput");
formatter.field("category_properties", &self.category_properties);
formatter.finish()
}
}
/// See [`CreateCallAnalyticsCategoryOutput`](crate::output::CreateCallAnalyticsCategoryOutput)
pub mod create_call_analytics_category_output {
/// A builder for [`CreateCallAnalyticsCategoryOutput`](crate::output::CreateCallAnalyticsCategoryOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) category_properties: std::option::Option<crate::model::CategoryProperties>,
}
impl Builder {
/// <p>The rules and associated metadata used to create a category.</p>
pub fn category_properties(mut self, input: crate::model::CategoryProperties) -> Self {
self.category_properties = Some(input);
self
}
/// <p>The rules and associated metadata used to create a category.</p>
pub fn set_category_properties(
mut self,
input: std::option::Option<crate::model::CategoryProperties>,
) -> Self {
self.category_properties = input;
self
}
/// Consumes the builder and constructs a [`CreateCallAnalyticsCategoryOutput`](crate::output::CreateCallAnalyticsCategoryOutput)
pub fn build(self) -> crate::output::CreateCallAnalyticsCategoryOutput {
crate::output::CreateCallAnalyticsCategoryOutput {
category_properties: self.category_properties,
}
}
}
}
impl CreateCallAnalyticsCategoryOutput {
/// Creates a new builder-style object to manufacture [`CreateCallAnalyticsCategoryOutput`](crate::output::CreateCallAnalyticsCategoryOutput)
pub fn builder() -> crate::output::create_call_analytics_category_output::Builder {
crate::output::create_call_analytics_category_output::Builder::default()
}
}
| 52.884466 | 475 | 0.672388 |
2650ddac93e8f19c761b2f05198c6926c780c6ad
| 9,365 |
::bobbin_mcu::signal_type!(TX, SigTx);
::bobbin_mcu::signal_type!(RX, SigRx);
::bobbin_mcu::signal_type!(CTS, SigCts);
::bobbin_mcu::signal_type!(RTS, SigRts);
::bobbin_mcu::signal_type!(CK, SigCk);
::bobbin_mcu::signal_type!(I2C_SCL, SigI2cScl);
::bobbin_mcu::signal_type!(I2C_SDA, SigI2cSda);
::bobbin_mcu::signal_type!(I2C_SMBAL, SigI2cSmbal);
::bobbin_mcu::signal_type!(SPI_NSS, SigSpiNss);
::bobbin_mcu::signal_type!(SPI_MISO, SigSpiMiso);
::bobbin_mcu::signal_type!(SPI_MOSI, SigSpiMosi);
::bobbin_mcu::signal_type!(SPI_SCK, SigSpiSck);
::bobbin_mcu::signal_type!(ADC, SigAdc);
::bobbin_mcu::signal_type!(DAC, SigDac);
::bobbin_mcu::signal_type!(TIM, SigTim);
::bobbin_mcu::signal_type!(TIMN, SigTimn);
// IWDG
// WWDG
// CRC
// EXTI
// GPIO
// USART
::bobbin_mcu::periph_signal!(super::usart::Usart1, SigTx);
::bobbin_mcu::periph_signal!(super::usart::Usart1, SigRx);
::bobbin_mcu::periph_signal!(super::usart::Usart1, SigCts);
::bobbin_mcu::periph_signal!(super::usart::Usart1, SigRts);
::bobbin_mcu::periph_signal!(super::usart::Usart1, SigCk);
::bobbin_mcu::periph_signal!(super::usart::Usart2, SigTx);
::bobbin_mcu::periph_signal!(super::usart::Usart2, SigRx);
::bobbin_mcu::periph_signal!(super::usart::Usart2, SigCts);
::bobbin_mcu::periph_signal!(super::usart::Usart2, SigRts);
::bobbin_mcu::periph_signal!(super::usart::Usart2, SigCk);
::bobbin_mcu::periph_signal!(super::usart::Usart3, SigTx);
::bobbin_mcu::periph_signal!(super::usart::Usart3, SigRx);
::bobbin_mcu::periph_signal!(super::usart::Usart3, SigCts);
::bobbin_mcu::periph_signal!(super::usart::Usart3, SigRts);
::bobbin_mcu::periph_signal!(super::usart::Usart3, SigCk);
::bobbin_mcu::periph_signal!(super::usart::Uart4, SigTx);
::bobbin_mcu::periph_signal!(super::usart::Uart4, SigRx);
::bobbin_mcu::periph_signal!(super::usart::Uart4, SigCts);
::bobbin_mcu::periph_signal!(super::usart::Uart4, SigRts);
::bobbin_mcu::periph_signal!(super::usart::Uart4, SigCk);
::bobbin_mcu::periph_signal!(super::usart::Uart5, SigTx);
::bobbin_mcu::periph_signal!(super::usart::Uart5, SigRx);
::bobbin_mcu::periph_signal!(super::usart::Uart5, SigCts);
::bobbin_mcu::periph_signal!(super::usart::Uart5, SigRts);
::bobbin_mcu::periph_signal!(super::usart::Uart5, SigCk);
// I2C
::bobbin_mcu::periph_signal!(super::i2c::I2c1, SigI2cScl);
::bobbin_mcu::periph_signal!(super::i2c::I2c1, SigI2cSda);
::bobbin_mcu::periph_signal!(super::i2c::I2c1, SigI2cSmbal);
::bobbin_mcu::periph_signal!(super::i2c::I2c2, SigI2cScl);
::bobbin_mcu::periph_signal!(super::i2c::I2c2, SigI2cSda);
::bobbin_mcu::periph_signal!(super::i2c::I2c2, SigI2cSmbal);
::bobbin_mcu::periph_signal!(super::i2c::I2c3, SigI2cScl);
::bobbin_mcu::periph_signal!(super::i2c::I2c3, SigI2cSda);
::bobbin_mcu::periph_signal!(super::i2c::I2c3, SigI2cSmbal);
// SPI
::bobbin_mcu::periph_signal!(super::spi::Spi1, SigSpiNss);
::bobbin_mcu::periph_signal!(super::spi::Spi1, SigSpiMiso);
::bobbin_mcu::periph_signal!(super::spi::Spi1, SigSpiMosi);
::bobbin_mcu::periph_signal!(super::spi::Spi1, SigSpiSck);
::bobbin_mcu::periph_signal!(super::spi::Spi2, SigSpiNss);
::bobbin_mcu::periph_signal!(super::spi::Spi2, SigSpiMiso);
::bobbin_mcu::periph_signal!(super::spi::Spi2, SigSpiMosi);
::bobbin_mcu::periph_signal!(super::spi::Spi2, SigSpiSck);
::bobbin_mcu::periph_signal!(super::spi::Spi3, SigSpiNss);
::bobbin_mcu::periph_signal!(super::spi::Spi3, SigSpiMiso);
::bobbin_mcu::periph_signal!(super::spi::Spi3, SigSpiMosi);
::bobbin_mcu::periph_signal!(super::spi::Spi3, SigSpiSck);
// ADC
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch1, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch2, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch3, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch4, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch5, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch6, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch7, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch8, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch9, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch10, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch11, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch12, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch13, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch14, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc1Ch15, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch1, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch2, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch3, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch4, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch5, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch6, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch7, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch8, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch9, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch10, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch11, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch12, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch13, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch14, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc2Ch15, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch1, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch2, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch3, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch4, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch5, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch6, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch7, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch8, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch9, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch10, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch11, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch12, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch13, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch14, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc3Ch15, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch1, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch2, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch3, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch4, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch5, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch6, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch7, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch8, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch9, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch10, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch11, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch12, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch13, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch14, SigAdc);
::bobbin_mcu::channel_signal!(super::adc::Adc4Ch15, SigAdc);
// C_ADC
// DAC
::bobbin_mcu::channel_signal!(super::dac::DacCh1, SigDac);
::bobbin_mcu::channel_signal!(super::dac::DacCh2, SigDac);
// TIM_BAS
// TIM_GEN
::bobbin_mcu::channel_signal!(super::tim_gen::Tim2Ch1, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim2Ch2, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim2Ch3, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim2Ch4, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim3Ch1, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim3Ch2, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim3Ch3, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim3Ch4, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim4Ch1, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim4Ch2, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim4Ch3, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim4Ch4, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim15Ch1, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim15Ch2, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim16Ch1, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim16Ch1, SigTimn);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim17Ch1, SigTim);
::bobbin_mcu::channel_signal!(super::tim_gen::Tim17Ch1, SigTimn);
// TIM_ADV
::bobbin_mcu::channel_signal!(super::tim_adv::Tim1Ch1, SigTim);
::bobbin_mcu::channel_signal!(super::tim_adv::Tim1Ch2, SigTim);
::bobbin_mcu::channel_signal!(super::tim_adv::Tim1Ch3, SigTim);
::bobbin_mcu::channel_signal!(super::tim_adv::Tim1Ch4, SigTim);
::bobbin_mcu::channel_signal!(super::tim_adv::Tim8Ch1, SigTim);
::bobbin_mcu::channel_signal!(super::tim_adv::Tim8Ch2, SigTim);
::bobbin_mcu::channel_signal!(super::tim_adv::Tim8Ch3, SigTim);
::bobbin_mcu::channel_signal!(super::tim_adv::Tim8Ch4, SigTim);
::bobbin_mcu::channel_signal!(super::tim_adv::Tim20Ch1, SigTim);
::bobbin_mcu::channel_signal!(super::tim_adv::Tim20Ch2, SigTim);
::bobbin_mcu::channel_signal!(super::tim_adv::Tim20Ch3, SigTim);
::bobbin_mcu::channel_signal!(super::tim_adv::Tim20Ch4, SigTim);
// DMA
| 31.112957 | 65 | 0.739776 |
5dc00c0de3e15a218f403fc2d01a36cc13a62500
| 2,585 |
extern crate gilrs;
#[macro_use]
extern crate glium;
extern crate cgmath;
extern crate image;
mod util;
mod input;
mod sprites;
mod collision;
mod scene;
mod display;
use glium::glutin;
use std::time::Duration;
const WINDOW_WIDTH: u32 = 800;
const WINDOW_HEIGHT: u32 = 800;
const VIRTUAL_WIDHT: u32 = 200;
const VIRTUAL_HEIGHT: u32 = 200;
const FRAME_RATE: u64 = 60;
fn create_window() -> glium::backend::glutin_backend::GlutinFacade {
use glium::DisplayBuild;
glutin::WindowBuilder::new()
.with_dimensions(WINDOW_WIDTH, WINDOW_HEIGHT)
.with_gl(glutin::GlRequest::Latest)
.with_gl_profile(glutin::GlProfile::Core)
.with_vsync()
.with_title("yavss")
.build_glium()
.expect("Can't create gluim window")
}
fn main() {
use std::time::Instant;
let virtual_dimensions = (VIRTUAL_WIDHT, VIRTUAL_HEIGHT);
let frame_rate_loop_duration = Duration::from_millis(1_000u64 / FRAME_RATE);
let sprites = sprites::SpritesData::new(virtual_dimensions);
let mut scene = scene::Scene::new(&sprites);
let window = create_window();
let mut input_poller =
input::InputPoller::new(window.get_window().expect("Can't get window ref"));
let mut instant = Instant::now();
let mut renderer = display::Renderer::new(&window, &sprites, virtual_dimensions);
let mut frame_counter = 0usize;
let mut frame_counter_instant = Instant::now();
const FRAMES_TO_COUNT: usize = 600;
'main_loop: loop {
let mut new_instant = Instant::now();
let mut duration = new_instant - instant;
if duration < frame_rate_loop_duration {
std::thread::sleep(frame_rate_loop_duration - duration);
new_instant = Instant::now();
duration = new_instant - instant;
}
instant = new_instant;
input_poller.poll_events();
if input_poller.exit() {
break 'main_loop;
}
scene.tick(&input_poller, duration);
renderer.render(&window, &scene);
frame_counter += 1;
if frame_counter >= FRAMES_TO_COUNT {
let new_frame_instant = Instant::now();
let duration = new_frame_instant - frame_counter_instant;
println!("{} fps, {} objects",
frame_counter as f32 /
(duration.as_secs() as f32 +
duration.subsec_nanos() as f32 / 1_000_000_000.0),
scene.total_objects());
frame_counter = 0;
frame_counter_instant = Instant::now();
}
}
}
| 31.52439 | 85 | 0.630948 |
7571dc400ae7291e6b731b0f3e8e0cd56a1cf088
| 154 |
extern crate bytes;
#[macro_use]
extern crate futures;
#[macro_use]
extern crate log;
pub mod codec;
pub mod framed;
mod framed_read;
mod framed_write;
| 12.833333 | 21 | 0.766234 |
fc10e7af36c524b3a45cd15b38c9862b203aaa8f
| 21,030 |
use crate::args;
use crate::output_type::OutputType;
use crate::utils::{build_value_repr, check_reserved_name, get_crate_name, get_rustdoc};
use inflector::Inflector;
use proc_macro::TokenStream;
use quote::quote;
use syn::{Block, Error, FnArg, ImplItem, ItemImpl, Pat, Result, ReturnType, Type, TypeReference};
pub fn generate(object_args: &args::Object, item_impl: &mut ItemImpl) -> Result<TokenStream> {
let crate_name = get_crate_name(object_args.internal);
let (self_ty, self_name) = match item_impl.self_ty.as_ref() {
Type::Path(path) => (
path,
path.path
.segments
.last()
.map(|s| s.ident.to_string())
.unwrap(),
),
_ => return Err(Error::new_spanned(&item_impl.self_ty, "Invalid type")),
};
let generics = &item_impl.generics;
let where_clause = &item_impl.generics.where_clause;
let extends = object_args.extends;
let gql_typename = object_args
.name
.clone()
.unwrap_or_else(|| self_name.clone());
check_reserved_name(&gql_typename, object_args.internal)?;
let desc = object_args
.desc
.clone()
.or_else(|| get_rustdoc(&item_impl.attrs).ok().flatten())
.map(|s| quote! { Some(#s) })
.unwrap_or_else(|| quote! {None});
let mut resolvers = Vec::new();
let mut schema_fields = Vec::new();
let mut find_entities = Vec::new();
let mut add_keys = Vec::new();
let mut create_entity_types = Vec::new();
for item in &mut item_impl.items {
if let ImplItem::Method(method) = item {
if let Some(entity) = args::Entity::parse(&crate_name, &method.attrs)? {
let ty = match &method.sig.output {
ReturnType::Type(_, ty) => OutputType::parse(ty)?,
ReturnType::Default => {
return Err(Error::new_spanned(&method.sig.output, "Missing type"))
}
};
let mut create_ctx = true;
let mut args = Vec::new();
for (idx, arg) in method.sig.inputs.iter_mut().enumerate() {
if let FnArg::Receiver(receiver) = arg {
if idx != 0 {
return Err(Error::new_spanned(
receiver,
"The self receiver must be the first parameter.",
));
}
} else if let FnArg::Typed(pat) = arg {
if idx == 0 {
return Err(Error::new_spanned(
pat,
"The self receiver must be the first parameter.",
));
}
match (&*pat.pat, &*pat.ty) {
(Pat::Ident(arg_ident), Type::Path(arg_ty)) => {
args.push((
arg_ident.clone(),
arg_ty.clone(),
args::Argument::parse(&crate_name, &pat.attrs)?,
));
pat.attrs.clear();
}
(arg, Type::Reference(TypeReference { elem, .. })) => {
if let Type::Path(path) = elem.as_ref() {
if idx != 1
|| path.path.segments.last().unwrap().ident != "Context"
{
return Err(Error::new_spanned(
arg,
"The Context must be the second argument.",
));
} else {
create_ctx = false;
}
}
}
_ => return Err(Error::new_spanned(arg, "Invalid argument type.")),
}
}
}
if create_ctx {
let arg =
syn::parse2::<FnArg>(quote! { _: &#crate_name::Context<'_> }).unwrap();
method.sig.inputs.insert(1, arg);
}
let entity_type = ty.value_type();
let mut key_pat = Vec::new();
let mut key_getter = Vec::new();
let mut use_keys = Vec::new();
let mut keys = Vec::new();
let mut keys_str = String::new();
for (ident, ty, args::Argument { name, .. }) in &args {
let name = name
.clone()
.unwrap_or_else(|| ident.ident.to_string().to_camel_case());
if !keys_str.is_empty() {
keys_str.push(' ');
}
keys_str.push_str(&name);
key_pat.push(quote! {
Some(#ident)
});
key_getter.push(quote! {
params.get(#name).and_then(|value| {
let value: Option<#ty> = #crate_name::InputValueType::parse(value.clone()).ok();
value
})
});
keys.push(name);
use_keys.push(ident);
}
add_keys.push(quote! { registry.add_keys(&<#entity_type as #crate_name::Type>::type_name(), #keys_str); });
create_entity_types.push(
quote! { <#entity_type as #crate_name::Type>::create_type_info(registry); },
);
let field_ident = &method.sig.ident;
if let OutputType::Value(inner_ty) = &ty {
let block = &method.block;
let new_block = quote!({
{
let value:#inner_ty = async move #block.await;
Ok(value)
}
});
method.block = syn::parse2::<Block>(new_block).expect("invalid block");
method.sig.output = syn::parse2::<ReturnType>(
quote! { -> #crate_name::FieldResult<#inner_ty> },
)
.expect("invalid result type");
}
let do_find = quote! { self.#field_ident(ctx, #(#use_keys),*).await.map_err(|err| err.into_error(ctx.position()))? };
let guard = entity.guard.map(
|guard| quote! { #guard.check(ctx).await.map_err(|err| err.into_error(ctx.position()))?; },
);
find_entities.push((
args.len(),
quote! {
if typename == &<#entity_type as #crate_name::Type>::type_name() {
if let (#(#key_pat),*) = (#(#key_getter),*) {
#guard
let ctx_obj = ctx.with_selection_set(&ctx.selection_set);
return #crate_name::OutputValueType::resolve(&#do_find, &ctx_obj, ctx.item).await;
}
}
},
));
method.attrs.remove(
method
.attrs
.iter()
.enumerate()
.find(|(_, a)| a.path.is_ident("entity"))
.map(|(idx, _)| idx)
.unwrap(),
);
} else if let Some(field) = args::Field::parse(&crate_name, &method.attrs)? {
if method.sig.asyncness.is_none() {
return Err(Error::new_spanned(&method, "Must be asynchronous"));
}
let field_name = field
.name
.clone()
.unwrap_or_else(|| method.sig.ident.to_string().to_camel_case());
let field_desc = field
.desc
.as_ref()
.map(|s| quote! {Some(#s)})
.unwrap_or_else(|| quote! {None});
let field_deprecation = field
.deprecation
.as_ref()
.map(|s| quote! {Some(#s)})
.unwrap_or_else(|| quote! {None});
let external = field.external;
let requires = match &field.requires {
Some(requires) => quote! { Some(#requires) },
None => quote! { None },
};
let provides = match &field.provides {
Some(provides) => quote! { Some(#provides) },
None => quote! { None },
};
let ty = match &method.sig.output {
ReturnType::Type(_, ty) => OutputType::parse(ty)?,
ReturnType::Default => {
return Err(Error::new_spanned(&method.sig.output, "Missing type"))
}
};
let cache_control = {
let public = field.cache_control.public;
let max_age = field.cache_control.max_age;
quote! {
#crate_name::CacheControl {
public: #public,
max_age: #max_age,
}
}
};
let mut create_ctx = true;
let mut args = Vec::new();
for (idx, arg) in method.sig.inputs.iter_mut().enumerate() {
if let FnArg::Receiver(receiver) = arg {
if idx != 0 {
return Err(Error::new_spanned(
receiver,
"The self receiver must be the first parameter.",
));
}
} else if let FnArg::Typed(pat) = arg {
if idx == 0 {
return Err(Error::new_spanned(
pat,
"The self receiver must be the first parameter.",
));
}
match (&*pat.pat, &*pat.ty) {
(Pat::Ident(arg_ident), Type::Path(arg_ty)) => {
args.push((
arg_ident.clone(),
arg_ty.clone(),
args::Argument::parse(&crate_name, &pat.attrs)?,
));
pat.attrs.clear();
}
(arg, Type::Reference(TypeReference { elem, .. })) => {
if let Type::Path(path) = elem.as_ref() {
if idx != 1
|| path.path.segments.last().unwrap().ident != "Context"
{
return Err(Error::new_spanned(
arg,
"The Context must be the second argument.",
));
}
create_ctx = false;
}
}
_ => return Err(Error::new_spanned(arg, "Invalid argument type.")),
}
}
}
if create_ctx {
let arg =
syn::parse2::<FnArg>(quote! { _: &#crate_name::Context<'_> }).unwrap();
method.sig.inputs.insert(1, arg);
}
let mut schema_args = Vec::new();
let mut use_params = Vec::new();
let mut get_params = Vec::new();
for (
ident,
ty,
args::Argument {
name,
desc,
default,
validator,
},
) in args
{
let name = name
.clone()
.unwrap_or_else(|| ident.ident.to_string().to_camel_case());
let desc = desc
.as_ref()
.map(|s| quote! {Some(#s)})
.unwrap_or_else(|| quote! {None});
let schema_default = default
.as_ref()
.map(|v| {
let s = v.to_string();
quote! {Some(#s)}
})
.unwrap_or_else(|| quote! {None});
schema_args.push(quote! {
args.insert(#name, #crate_name::registry::MetaInputValue {
name: #name,
description: #desc,
ty: <#ty as #crate_name::Type>::create_type_info(registry),
default_value: #schema_default,
validator: #validator,
});
});
use_params.push(quote! { #ident });
let default = match &default {
Some(default) => {
let repr = build_value_repr(&crate_name, &default);
quote! {|| #repr }
}
None => {
quote! { || #crate_name::Value::Null }
}
};
get_params.push(quote! {
let #ident: #ty = ctx.param_value(#name, #default)?;
});
}
let schema_ty = ty.value_type();
schema_fields.push(quote! {
fields.insert(#field_name.to_string(), #crate_name::registry::MetaField {
name: #field_name.to_string(),
description: #field_desc,
args: {
let mut args = #crate_name::indexmap::IndexMap::new();
#(#schema_args)*
args
},
ty: <#schema_ty as #crate_name::Type>::create_type_info(registry),
deprecation: #field_deprecation,
cache_control: #cache_control,
external: #external,
provides: #provides,
requires: #requires,
});
});
let field_ident = &method.sig.ident;
if let OutputType::Value(inner_ty) = &ty {
let block = &method.block;
let new_block = quote!({
{
let value:#inner_ty = async move #block.await;
Ok(value)
}
});
method.block = syn::parse2::<Block>(new_block).expect("invalid block");
method.sig.output = syn::parse2::<ReturnType>(
quote! { -> #crate_name::FieldResult<#inner_ty> },
)
.expect("invalid result type");
}
let resolve_obj = quote! {
{
let res = self.#field_ident(ctx, #(#use_params),*).await;
res.map_err(|err| err.into_error_with_path(ctx.position(), ctx.path_node.as_ref().unwrap().to_json()))?
}
};
let guard = field
.guard
.map(|guard| quote! {
#guard.check(ctx).await
.map_err(|err| err.into_error_with_path(ctx.position(), ctx.path_node.as_ref().unwrap().to_json()))?;
});
resolvers.push(quote! {
if ctx.name.node == #field_name {
use #crate_name::OutputValueType;
#(#get_params)*
#guard
let ctx_obj = ctx.with_selection_set(&ctx.selection_set);
return OutputValueType::resolve(&#resolve_obj, &ctx_obj, ctx.item).await;
}
});
if let Some((idx, _)) = method
.attrs
.iter()
.enumerate()
.find(|(_, a)| a.path.is_ident("field"))
{
method.attrs.remove(idx);
}
} else if let Some((idx, _)) = method
.attrs
.iter()
.enumerate()
.find(|(_, a)| a.path.is_ident("field"))
{
method.attrs.remove(idx);
}
}
}
let cache_control = {
let public = object_args.cache_control.public;
let max_age = object_args.cache_control.max_age;
quote! {
#crate_name::CacheControl {
public: #public,
max_age: #max_age,
}
}
};
find_entities.sort_by(|(a, _), (b, _)| b.cmp(a));
let find_entities_iter = find_entities.iter().map(|(_, code)| code);
let expanded = quote! {
#item_impl
impl #generics #crate_name::Type for #self_ty #where_clause {
fn type_name() -> std::borrow::Cow<'static, str> {
std::borrow::Cow::Borrowed(#gql_typename)
}
fn create_type_info(registry: &mut #crate_name::registry::Registry) -> String {
let ty = registry.create_type::<Self, _>(|registry| #crate_name::registry::MetaType::Object {
name: #gql_typename.to_string(),
description: #desc,
fields: {
let mut fields = #crate_name::indexmap::IndexMap::new();
#(#schema_fields)*
fields
},
cache_control: #cache_control,
extends: #extends,
keys: None,
});
#(#create_entity_types)*
#(#add_keys)*
ty
}
}
#[#crate_name::async_trait::async_trait]
impl#generics #crate_name::ObjectType for #self_ty #where_clause {
async fn resolve_field(&self, ctx: &#crate_name::Context<'_>) -> #crate_name::Result<#crate_name::serde_json::Value> {
#(#resolvers)*
Err(#crate_name::QueryError::FieldNotFound {
field_name: ctx.name.to_string(),
object: #gql_typename.to_string(),
}.into_error(ctx.position()))
}
async fn find_entity(&self, ctx: &#crate_name::Context<'_>, params: &#crate_name::Value) -> #crate_name::Result<#crate_name::serde_json::Value> {
let params = match params {
#crate_name::Value::Object(params) => params,
_ => return Err(#crate_name::QueryError::EntityNotFound.into_error(ctx.position())),
};
let typename = if let Some(#crate_name::Value::String(typename)) = params.get("__typename") {
typename
} else {
return Err(#crate_name::QueryError::TypeNameNotExists.into_error(ctx.position()));
};
#(#find_entities_iter)*
Err(#crate_name::QueryError::EntityNotFound.into_error(ctx.position()))
}
}
#[#crate_name::async_trait::async_trait]
impl #generics #crate_name::OutputValueType for #self_ty #where_clause {
async fn resolve(&self, ctx: &#crate_name::ContextSelectionSet<'_>, _field: &#crate_name::Positioned<#crate_name::parser::query::Field>) -> #crate_name::Result<#crate_name::serde_json::Value> {
#crate_name::do_resolve(ctx, self).await
}
}
};
Ok(expanded.into())
}
| 42.743902 | 205 | 0.398716 |
33aee55b8c7387a6c461af49ecc9a04b3ec7d6e7
| 612 |
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main() {
let mut sum = 0;
let xs = ~[1, 2, 3, 4, 5];
for x in xs.iter() {
sum += *x;
}
assert_eq!(sum, 15);
}
| 32.210526 | 68 | 0.665033 |
f88b79a849cfbfb02c3b8ab998efc8a4deea441d
| 1,024 |
//! Core definitions related to runtime errors.
use std::path::PathBuf;
use thiserror::Error;
use tower_lsp::lsp_types::*;
#[allow(clippy::enum_variant_names)]
#[derive(Debug, Error, PartialEq)]
pub(crate) enum Error {
#[error("client not initialized")]
ClientNotInitialized,
#[error("document not found: {0}")]
DocumentNotFound(Url),
#[error("invalid language extension: {0}")]
InvalidLanguageExtension(String),
#[error("invalid language id: {0}")]
InvalidLanguageId(String),
#[error("conversion to &str failed")]
ToStrFailed,
#[error("failed to get file extension for PathBuf: {0}")]
PathExtensionFailed(PathBuf),
}
pub(crate) struct IntoJsonRpcError(pub(crate) anyhow::Error);
impl From<IntoJsonRpcError> for tower_lsp::jsonrpc::Error {
fn from(error: IntoJsonRpcError) -> Self {
let mut rpc_error = tower_lsp::jsonrpc::Error::internal_error();
rpc_error.data = Some(serde_json::to_value(format!("{}", error.0)).unwrap());
rpc_error
}
}
| 31.030303 | 85 | 0.681641 |
9187972ba9ff206fd63b9c8486c5acccc1fa3b4e
| 1,176 |
use ark_ff::{Field, PrimeField};
use ark_serialize::{buffer_bit_byte_size, CanonicalSerialize};
use ark_std::rand::Rng;
use ark_std::test_rng;
use crate::*;
use ark_algebra_test_templates::fields::*;
#[test]
fn test_fr() {
let mut rng = test_rng();
let a: Fr = rng.gen();
let b: Fr = rng.gen();
field_test(a, b);
sqrt_field_test(a);
primefield_test::<Fr>();
}
#[test]
fn test_fq() {
let mut rng = test_rng();
let a: Fq = rng.gen();
let b: Fq = rng.gen();
field_test(a, b);
primefield_test::<Fq>();
sqrt_field_test(a);
let byte_size = a.serialized_size();
let (_, buffer_size) = buffer_bit_byte_size(Fq::size_in_bits());
assert_eq!(byte_size, buffer_size);
field_serialization_test::<Fq>(byte_size);
}
#[test]
fn test_fq3() {
let mut rng = test_rng();
let a: Fq3 = rng.gen();
let b: Fq3 = rng.gen();
field_test(a, b);
sqrt_field_test(a);
frobenius_test::<Fq3, _>(Fq::characteristic(), 13);
}
#[test]
fn test_fq6() {
let mut rng = test_rng();
let a: Fq6 = rng.gen();
let b: Fq6 = rng.gen();
field_test(a, b);
frobenius_test::<Fq6, _>(Fq::characteristic(), 13);
}
| 22.188679 | 68 | 0.613095 |
7224ed0b9ea83fee7b0a4181341a34ae8d2f1678
| 1,000 |
use etcd_rs::*;
use std::convert::TryInto;
use tokio::stream::StreamExt;
pub struct Mutex {
pub name: String
}
impl Mutex {
fn lock_name(&self) -> String {
format!("{}-lock", self.name)
}
pub async fn acquire(&self, client: & Client) -> Result<()> {
loop {
let next_kr = KeyRange::key(self.lock_name());
let initalize = PutRequest::new(self.lock_name(), vec![]);
let txn = TxnRequest::new()
.when_version(next_kr, TxnCmp::Equal, 0)
.and_then(PutRequest::new(self.lock_name(), "held"));
let grab_open_mutex: TxnResponse = client.kv().txn(txn).await?;
if grab_open_mutex.is_success() {
return Ok(());
}
}
}
pub async fn release(& self, client: & Client) -> Result<()> {
let release = DeleteRequest::new(KeyRange::key(self.lock_name()));
client.kv().delete(release).await?;
Ok(())
}
}
| 25 | 75 | 0.534 |
3895229e4e5eb9e1cc8c5334a9f5deeebabf9854
| 1,920 |
#![allow(dead_code)]
extern crate wasm_bindgen;
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsCast; // dyn_into::<>
use web_sys::{ HtmlCanvasElement, WebGl2RenderingContext as GL };
use std::cell::{ RefCell, RefMut };
use std::rc::Rc;
use crate::World;
//##################################################
#[macro_use]
pub mod macros;
pub mod buffer; pub use buffer::{ Buffer };
pub mod util; pub use util::Util;
pub mod vao; pub use vao::{ Vao, VaoCache };
pub mod shader; pub use shader::{ Shader, UniformType, AttribLoc, ShaderCache };
pub mod ubo; pub use ubo::{ Ubo, UboCache };
//##################################################
thread_local!{
pub static GLOBAL_GL: RefCell< Option< Rc<GL>> > = RefCell::new( None );
}
#[allow(non_snake_case)]
pub fn glctx()-> Rc<GL> {
GLOBAL_GL.with( |v|{
match *v.borrow(){ //Deference Ref<>
Some( ref d ) => d.clone(),
None => panic!("Meh"),
}
})
}
//##################################################
pub fn get_webgl_context( c_name: &str ) -> Result< (), &'static str >{
let win = web_sys::window().expect( "Window Not Found" );
let doc = win.document().expect( "Window.Document Not Found" );
let elm = doc.get_element_by_id( c_name ).expect( &format!( "Canvas by the ID {}, Not Found", c_name ) );
let canvas: HtmlCanvasElement = elm.dyn_into::<HtmlCanvasElement>()
.expect( "Unable to convert html element to canvas element" );
let ctx = canvas.get_context( "webgl2" ) // Result< Option<>, Jsvalue >
.expect( "Unable to get WebGL 2.0 context from canvas" )
.unwrap() // For Option
.dyn_into::< GL >()
.expect( "Error converting WebGL context to rust." );
let w = canvas.client_width();
let h = canvas.client_height();
console_log!( "set viewport {}, {}", w, h );
ctx.viewport( 0, 0, w, h );
World::set_size( w, h );
GLOBAL_GL.with(|v|{ v.replace( Some( Rc::new(ctx) ) ); });
Ok(())
}
| 29.090909 | 107 | 0.588021 |
6715ea6390f22f6a644beab0303f47c5d8c294fd
| 287 |
//! Nakamoto's client library.
#![allow(clippy::inconsistent_struct_constructor)]
#![allow(clippy::type_complexity)]
#![deny(missing_docs, unsafe_code)]
pub mod client;
pub mod error;
pub mod event;
pub mod handle;
pub mod peer;
pub mod spv;
pub use client::*;
#[cfg(test)]
mod tests;
| 17.9375 | 50 | 0.728223 |
d6ef9475c2497f9393d9b14512e45937357c5cc4
| 825 |
// Copyright 2018-2020 Cargill Incorporated
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(feature = "circuit-read")]
pub(in super::super) mod circuits;
#[cfg(feature = "circuit-read")]
pub(in super::super) mod circuits_circuit_id;
#[cfg(feature = "proposal-read")]
pub(in super::super) mod proposals_read;
| 39.285714 | 75 | 0.740606 |
3a965a8a838995330858cc871e0d95d3a8a7c599
| 23,462 |
#[doc = "Reader of register RTSR"]
pub type R = crate::R<u32, super::RTSR>;
#[doc = "Writer for register RTSR"]
pub type W = crate::W<u32, super::RTSR>;
#[doc = "Register RTSR `reset()`'s with value 0"]
impl crate::ResetValue for super::RTSR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `TR0`"]
pub type TR0_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR0`"]
pub struct TR0_W<'a> {
w: &'a mut W,
}
impl<'a> TR0_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `TR1`"]
pub type TR1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR1`"]
pub struct TR1_W<'a> {
w: &'a mut W,
}
impl<'a> TR1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `TR2`"]
pub type TR2_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR2`"]
pub struct TR2_W<'a> {
w: &'a mut W,
}
impl<'a> TR2_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `TR3`"]
pub type TR3_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR3`"]
pub struct TR3_W<'a> {
w: &'a mut W,
}
impl<'a> TR3_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `TR4`"]
pub type TR4_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR4`"]
pub struct TR4_W<'a> {
w: &'a mut W,
}
impl<'a> TR4_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `TR5`"]
pub type TR5_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR5`"]
pub struct TR5_W<'a> {
w: &'a mut W,
}
impl<'a> TR5_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `TR6`"]
pub type TR6_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR6`"]
pub struct TR6_W<'a> {
w: &'a mut W,
}
impl<'a> TR6_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Reader of field `TR7`"]
pub type TR7_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR7`"]
pub struct TR7_W<'a> {
w: &'a mut W,
}
impl<'a> TR7_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Reader of field `TR8`"]
pub type TR8_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR8`"]
pub struct TR8_W<'a> {
w: &'a mut W,
}
impl<'a> TR8_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `TR9`"]
pub type TR9_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR9`"]
pub struct TR9_W<'a> {
w: &'a mut W,
}
impl<'a> TR9_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Reader of field `TR10`"]
pub type TR10_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR10`"]
pub struct TR10_W<'a> {
w: &'a mut W,
}
impl<'a> TR10_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Reader of field `TR11`"]
pub type TR11_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR11`"]
pub struct TR11_W<'a> {
w: &'a mut W,
}
impl<'a> TR11_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
#[doc = "Reader of field `TR12`"]
pub type TR12_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR12`"]
pub struct TR12_W<'a> {
w: &'a mut W,
}
impl<'a> TR12_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12);
self.w
}
}
#[doc = "Reader of field `TR13`"]
pub type TR13_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR13`"]
pub struct TR13_W<'a> {
w: &'a mut W,
}
impl<'a> TR13_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13);
self.w
}
}
#[doc = "Reader of field `TR14`"]
pub type TR14_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR14`"]
pub struct TR14_W<'a> {
w: &'a mut W,
}
impl<'a> TR14_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14);
self.w
}
}
#[doc = "Reader of field `TR15`"]
pub type TR15_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR15`"]
pub struct TR15_W<'a> {
w: &'a mut W,
}
impl<'a> TR15_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15);
self.w
}
}
#[doc = "Reader of field `TR16`"]
pub type TR16_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR16`"]
pub struct TR16_W<'a> {
w: &'a mut W,
}
impl<'a> TR16_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Reader of field `TR17`"]
pub type TR17_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR17`"]
pub struct TR17_W<'a> {
w: &'a mut W,
}
impl<'a> TR17_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Reader of field `TR18`"]
pub type TR18_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR18`"]
pub struct TR18_W<'a> {
w: &'a mut W,
}
impl<'a> TR18_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Reader of field `TR19`"]
pub type TR19_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR19`"]
pub struct TR19_W<'a> {
w: &'a mut W,
}
impl<'a> TR19_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19);
self.w
}
}
#[doc = "Reader of field `TR20`"]
pub type TR20_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR20`"]
pub struct TR20_W<'a> {
w: &'a mut W,
}
impl<'a> TR20_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20);
self.w
}
}
#[doc = "Reader of field `TR21`"]
pub type TR21_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR21`"]
pub struct TR21_W<'a> {
w: &'a mut W,
}
impl<'a> TR21_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 21)) | (((value as u32) & 0x01) << 21);
self.w
}
}
#[doc = "Reader of field `TR22`"]
pub type TR22_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TR22`"]
pub struct TR22_W<'a> {
w: &'a mut W,
}
impl<'a> TR22_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 22)) | (((value as u32) & 0x01) << 22);
self.w
}
}
impl R {
#[doc = "Bit 0 - Rising trigger event configuration of line 0"]
#[inline(always)]
pub fn tr0(&self) -> TR0_R {
TR0_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Rising trigger event configuration of line 1"]
#[inline(always)]
pub fn tr1(&self) -> TR1_R {
TR1_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Rising trigger event configuration of line 2"]
#[inline(always)]
pub fn tr2(&self) -> TR2_R {
TR2_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Rising trigger event configuration of line 3"]
#[inline(always)]
pub fn tr3(&self) -> TR3_R {
TR3_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Rising trigger event configuration of line 4"]
#[inline(always)]
pub fn tr4(&self) -> TR4_R {
TR4_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - Rising trigger event configuration of line 5"]
#[inline(always)]
pub fn tr5(&self) -> TR5_R {
TR5_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - Rising trigger event configuration of line 6"]
#[inline(always)]
pub fn tr6(&self) -> TR6_R {
TR6_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - Rising trigger event configuration of line 7"]
#[inline(always)]
pub fn tr7(&self) -> TR7_R {
TR7_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 8 - Rising trigger event configuration of line 8"]
#[inline(always)]
pub fn tr8(&self) -> TR8_R {
TR8_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - Rising trigger event configuration of line 9"]
#[inline(always)]
pub fn tr9(&self) -> TR9_R {
TR9_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 10 - Rising trigger event configuration of line 10"]
#[inline(always)]
pub fn tr10(&self) -> TR10_R {
TR10_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 11 - Rising trigger event configuration of line 11"]
#[inline(always)]
pub fn tr11(&self) -> TR11_R {
TR11_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 12 - Rising trigger event configuration of line 12"]
#[inline(always)]
pub fn tr12(&self) -> TR12_R {
TR12_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 13 - Rising trigger event configuration of line 13"]
#[inline(always)]
pub fn tr13(&self) -> TR13_R {
TR13_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 14 - Rising trigger event configuration of line 14"]
#[inline(always)]
pub fn tr14(&self) -> TR14_R {
TR14_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bit 15 - Rising trigger event configuration of line 15"]
#[inline(always)]
pub fn tr15(&self) -> TR15_R {
TR15_R::new(((self.bits >> 15) & 0x01) != 0)
}
#[doc = "Bit 16 - Rising trigger event configuration of line 16"]
#[inline(always)]
pub fn tr16(&self) -> TR16_R {
TR16_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 17 - Rising trigger event configuration of line 17"]
#[inline(always)]
pub fn tr17(&self) -> TR17_R {
TR17_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 18 - Rising trigger event configuration of line 18"]
#[inline(always)]
pub fn tr18(&self) -> TR18_R {
TR18_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 19 - Rising trigger event configuration of line 19"]
#[inline(always)]
pub fn tr19(&self) -> TR19_R {
TR19_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 20 - Rising trigger event configuration of line 20"]
#[inline(always)]
pub fn tr20(&self) -> TR20_R {
TR20_R::new(((self.bits >> 20) & 0x01) != 0)
}
#[doc = "Bit 21 - Rising trigger event configuration of line 21"]
#[inline(always)]
pub fn tr21(&self) -> TR21_R {
TR21_R::new(((self.bits >> 21) & 0x01) != 0)
}
#[doc = "Bit 22 - Rising trigger event configuration of line 22"]
#[inline(always)]
pub fn tr22(&self) -> TR22_R {
TR22_R::new(((self.bits >> 22) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Rising trigger event configuration of line 0"]
#[inline(always)]
pub fn tr0(&mut self) -> TR0_W {
TR0_W { w: self }
}
#[doc = "Bit 1 - Rising trigger event configuration of line 1"]
#[inline(always)]
pub fn tr1(&mut self) -> TR1_W {
TR1_W { w: self }
}
#[doc = "Bit 2 - Rising trigger event configuration of line 2"]
#[inline(always)]
pub fn tr2(&mut self) -> TR2_W {
TR2_W { w: self }
}
#[doc = "Bit 3 - Rising trigger event configuration of line 3"]
#[inline(always)]
pub fn tr3(&mut self) -> TR3_W {
TR3_W { w: self }
}
#[doc = "Bit 4 - Rising trigger event configuration of line 4"]
#[inline(always)]
pub fn tr4(&mut self) -> TR4_W {
TR4_W { w: self }
}
#[doc = "Bit 5 - Rising trigger event configuration of line 5"]
#[inline(always)]
pub fn tr5(&mut self) -> TR5_W {
TR5_W { w: self }
}
#[doc = "Bit 6 - Rising trigger event configuration of line 6"]
#[inline(always)]
pub fn tr6(&mut self) -> TR6_W {
TR6_W { w: self }
}
#[doc = "Bit 7 - Rising trigger event configuration of line 7"]
#[inline(always)]
pub fn tr7(&mut self) -> TR7_W {
TR7_W { w: self }
}
#[doc = "Bit 8 - Rising trigger event configuration of line 8"]
#[inline(always)]
pub fn tr8(&mut self) -> TR8_W {
TR8_W { w: self }
}
#[doc = "Bit 9 - Rising trigger event configuration of line 9"]
#[inline(always)]
pub fn tr9(&mut self) -> TR9_W {
TR9_W { w: self }
}
#[doc = "Bit 10 - Rising trigger event configuration of line 10"]
#[inline(always)]
pub fn tr10(&mut self) -> TR10_W {
TR10_W { w: self }
}
#[doc = "Bit 11 - Rising trigger event configuration of line 11"]
#[inline(always)]
pub fn tr11(&mut self) -> TR11_W {
TR11_W { w: self }
}
#[doc = "Bit 12 - Rising trigger event configuration of line 12"]
#[inline(always)]
pub fn tr12(&mut self) -> TR12_W {
TR12_W { w: self }
}
#[doc = "Bit 13 - Rising trigger event configuration of line 13"]
#[inline(always)]
pub fn tr13(&mut self) -> TR13_W {
TR13_W { w: self }
}
#[doc = "Bit 14 - Rising trigger event configuration of line 14"]
#[inline(always)]
pub fn tr14(&mut self) -> TR14_W {
TR14_W { w: self }
}
#[doc = "Bit 15 - Rising trigger event configuration of line 15"]
#[inline(always)]
pub fn tr15(&mut self) -> TR15_W {
TR15_W { w: self }
}
#[doc = "Bit 16 - Rising trigger event configuration of line 16"]
#[inline(always)]
pub fn tr16(&mut self) -> TR16_W {
TR16_W { w: self }
}
#[doc = "Bit 17 - Rising trigger event configuration of line 17"]
#[inline(always)]
pub fn tr17(&mut self) -> TR17_W {
TR17_W { w: self }
}
#[doc = "Bit 18 - Rising trigger event configuration of line 18"]
#[inline(always)]
pub fn tr18(&mut self) -> TR18_W {
TR18_W { w: self }
}
#[doc = "Bit 19 - Rising trigger event configuration of line 19"]
#[inline(always)]
pub fn tr19(&mut self) -> TR19_W {
TR19_W { w: self }
}
#[doc = "Bit 20 - Rising trigger event configuration of line 20"]
#[inline(always)]
pub fn tr20(&mut self) -> TR20_W {
TR20_W { w: self }
}
#[doc = "Bit 21 - Rising trigger event configuration of line 21"]
#[inline(always)]
pub fn tr21(&mut self) -> TR21_W {
TR21_W { w: self }
}
#[doc = "Bit 22 - Rising trigger event configuration of line 22"]
#[inline(always)]
pub fn tr22(&mut self) -> TR22_W {
TR22_W { w: self }
}
}
| 29.364205 | 86 | 0.531753 |
08f77db96cfff934ad5e7465957b59e49fc5e5da
| 1,663 |
#[macro_use]
extern crate derive_more;
mod states;
mod transitions;
use states::*;
use teloxide::{
dispatching::dialogue::{serializer::Bincode, RedisStorage, Storage},
prelude::*,
};
use thiserror::Error;
type StorageError = <RedisStorage<Bincode> as Storage<Dialogue>>::Error;
#[derive(Debug, Error)]
enum Error {
#[error("error from Telegram: {0}")]
TelegramError(#[from] RequestError),
#[error("error from storage: {0}")]
StorageError(#[from] StorageError),
}
type In = DialogueWithCx<Message, Dialogue, StorageError>;
#[tokio::main]
async fn main() {
run().await;
}
async fn run() {
let bot = Bot::from_env();
Dispatcher::new(bot)
.messages_handler(DialogueDispatcher::with_storage(
|DialogueWithCx { cx, dialogue }: In| async move {
let dialogue = dialogue.expect("std::convert::Infallible");
handle_message(cx, dialogue).await.expect("Something wrong with the bot!")
},
// You can also choose serializer::JSON or serializer::CBOR
// All serializers but JSON require enabling feature
// "serializer-<name>", e. g. "serializer-cbor"
// or "serializer-bincode"
RedisStorage::open("redis://127.0.0.1:6379", Bincode).await.unwrap(),
))
.dispatch()
.await;
}
async fn handle_message(cx: UpdateWithCx<Message>, dialogue: Dialogue) -> TransitionOut<Dialogue> {
match cx.update.text_owned() {
None => {
cx.answer_str("Send me a text message.").await?;
next(dialogue)
}
Some(ans) => dialogue.react(cx, ans).await,
}
}
| 28.186441 | 99 | 0.615153 |
ac20bf785396299c10160c414a1acfee1ef1149a
| 35,256 |
/* automatically generated by rust-bindgen */
pub const OCONFIG_TYPE_STRING: u32 = 0;
pub const OCONFIG_TYPE_NUMBER: u32 = 1;
pub const OCONFIG_TYPE_BOOLEAN: u32 = 2;
pub const DATA_MAX_NAME_LEN: u32 = 64;
pub const DS_TYPE_COUNTER: u32 = 0;
pub const DS_TYPE_GAUGE: u32 = 1;
pub const DS_TYPE_DERIVE: u32 = 2;
pub const DS_TYPE_ABSOLUTE: u32 = 3;
pub const LOG_ERR: u32 = 3;
pub const LOG_WARNING: u32 = 4;
pub const LOG_NOTICE: u32 = 5;
pub const LOG_INFO: u32 = 6;
pub const LOG_DEBUG: u32 = 7;
pub type __time_t = ::std::os::raw::c_long;
pub type __syscall_slong_t = ::std::os::raw::c_long;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct timespec {
pub tv_sec: __time_t,
pub tv_nsec: __syscall_slong_t,
}
#[test]
fn bindgen_test_layout_timespec() {
assert_eq!(
::std::mem::size_of::<timespec>(),
16usize,
concat!("Size of: ", stringify!(timespec))
);
assert_eq!(
::std::mem::align_of::<timespec>(),
8usize,
concat!("Alignment of ", stringify!(timespec))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<timespec>())).tv_sec as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(timespec),
"::",
stringify!(tv_sec)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<timespec>())).tv_nsec as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(timespec),
"::",
stringify!(tv_nsec)
)
);
}
pub type pthread_t = ::std::os::raw::c_ulong;
#[repr(C)]
#[derive(Copy, Clone)]
pub union pthread_attr_t {
pub __size: [::std::os::raw::c_char; 56usize],
pub __align: ::std::os::raw::c_long,
_bindgen_union_align: [u64; 7usize],
}
#[test]
fn bindgen_test_layout_pthread_attr_t() {
assert_eq!(
::std::mem::size_of::<pthread_attr_t>(),
56usize,
concat!("Size of: ", stringify!(pthread_attr_t))
);
assert_eq!(
::std::mem::align_of::<pthread_attr_t>(),
8usize,
concat!("Alignment of ", stringify!(pthread_attr_t))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<pthread_attr_t>())).__size as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(pthread_attr_t),
"::",
stringify!(__size)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<pthread_attr_t>())).__align as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(pthread_attr_t),
"::",
stringify!(__align)
)
);
}
pub type cdtime_t = u64;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct oconfig_value_s {
pub value: oconfig_value_s__bindgen_ty_1,
pub type_: ::std::os::raw::c_int,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union oconfig_value_s__bindgen_ty_1 {
pub string: *mut ::std::os::raw::c_char,
pub number: f64,
pub boolean: ::std::os::raw::c_int,
_bindgen_union_align: u64,
}
#[test]
fn bindgen_test_layout_oconfig_value_s__bindgen_ty_1() {
assert_eq!(
::std::mem::size_of::<oconfig_value_s__bindgen_ty_1>(),
8usize,
concat!("Size of: ", stringify!(oconfig_value_s__bindgen_ty_1))
);
assert_eq!(
::std::mem::align_of::<oconfig_value_s__bindgen_ty_1>(),
8usize,
concat!("Alignment of ", stringify!(oconfig_value_s__bindgen_ty_1))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<oconfig_value_s__bindgen_ty_1>())).string as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(oconfig_value_s__bindgen_ty_1),
"::",
stringify!(string)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<oconfig_value_s__bindgen_ty_1>())).number as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(oconfig_value_s__bindgen_ty_1),
"::",
stringify!(number)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<oconfig_value_s__bindgen_ty_1>())).boolean as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(oconfig_value_s__bindgen_ty_1),
"::",
stringify!(boolean)
)
);
}
#[test]
fn bindgen_test_layout_oconfig_value_s() {
assert_eq!(
::std::mem::size_of::<oconfig_value_s>(),
16usize,
concat!("Size of: ", stringify!(oconfig_value_s))
);
assert_eq!(
::std::mem::align_of::<oconfig_value_s>(),
8usize,
concat!("Alignment of ", stringify!(oconfig_value_s))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<oconfig_value_s>())).value as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(oconfig_value_s),
"::",
stringify!(value)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<oconfig_value_s>())).type_ as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(oconfig_value_s),
"::",
stringify!(type_)
)
);
}
pub type oconfig_value_t = oconfig_value_s;
pub type oconfig_item_t = oconfig_item_s;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct oconfig_item_s {
pub key: *mut ::std::os::raw::c_char,
pub values: *mut oconfig_value_t,
pub values_num: ::std::os::raw::c_int,
pub parent: *mut oconfig_item_t,
pub children: *mut oconfig_item_t,
pub children_num: ::std::os::raw::c_int,
}
#[test]
fn bindgen_test_layout_oconfig_item_s() {
assert_eq!(
::std::mem::size_of::<oconfig_item_s>(),
48usize,
concat!("Size of: ", stringify!(oconfig_item_s))
);
assert_eq!(
::std::mem::align_of::<oconfig_item_s>(),
8usize,
concat!("Alignment of ", stringify!(oconfig_item_s))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<oconfig_item_s>())).key as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(oconfig_item_s),
"::",
stringify!(key)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<oconfig_item_s>())).values as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(oconfig_item_s),
"::",
stringify!(values)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<oconfig_item_s>())).values_num as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(oconfig_item_s),
"::",
stringify!(values_num)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<oconfig_item_s>())).parent as *const _ as usize },
24usize,
concat!(
"Offset of field: ",
stringify!(oconfig_item_s),
"::",
stringify!(parent)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<oconfig_item_s>())).children as *const _ as usize },
32usize,
concat!(
"Offset of field: ",
stringify!(oconfig_item_s),
"::",
stringify!(children)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<oconfig_item_s>())).children_num as *const _ as usize },
40usize,
concat!(
"Offset of field: ",
stringify!(oconfig_item_s),
"::",
stringify!(children_num)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct meta_data_s {
_unused: [u8; 0],
}
pub type meta_data_t = meta_data_s;
pub type counter_t = ::std::os::raw::c_ulonglong;
pub type gauge_t = f64;
pub type derive_t = i64;
pub type absolute_t = u64;
#[repr(C)]
#[derive(Copy, Clone)]
pub union value_u {
pub counter: counter_t,
pub gauge: gauge_t,
pub derive: derive_t,
pub absolute: absolute_t,
_bindgen_union_align: u64,
}
#[test]
fn bindgen_test_layout_value_u() {
assert_eq!(
::std::mem::size_of::<value_u>(),
8usize,
concat!("Size of: ", stringify!(value_u))
);
assert_eq!(
::std::mem::align_of::<value_u>(),
8usize,
concat!("Alignment of ", stringify!(value_u))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_u>())).counter as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(value_u),
"::",
stringify!(counter)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_u>())).gauge as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(value_u),
"::",
stringify!(gauge)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_u>())).derive as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(value_u),
"::",
stringify!(derive)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_u>())).absolute as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(value_u),
"::",
stringify!(absolute)
)
);
}
pub type value_t = value_u;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct value_list_s {
pub values: *mut value_t,
pub values_len: ::std::os::raw::c_int,
pub time: cdtime_t,
pub interval: cdtime_t,
pub host: [::std::os::raw::c_char; 64usize],
pub plugin: [::std::os::raw::c_char; 64usize],
pub plugin_instance: [::std::os::raw::c_char; 64usize],
pub type_: [::std::os::raw::c_char; 64usize],
pub type_instance: [::std::os::raw::c_char; 64usize],
pub meta: *mut meta_data_t,
}
#[test]
fn bindgen_test_layout_value_list_s() {
assert_eq!(
::std::mem::size_of::<value_list_s>(),
360usize,
concat!("Size of: ", stringify!(value_list_s))
);
assert_eq!(
::std::mem::align_of::<value_list_s>(),
8usize,
concat!("Alignment of ", stringify!(value_list_s))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_list_s>())).values as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(value_list_s),
"::",
stringify!(values)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_list_s>())).values_len as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(value_list_s),
"::",
stringify!(values_len)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_list_s>())).time as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(value_list_s),
"::",
stringify!(time)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_list_s>())).interval as *const _ as usize },
24usize,
concat!(
"Offset of field: ",
stringify!(value_list_s),
"::",
stringify!(interval)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_list_s>())).host as *const _ as usize },
32usize,
concat!(
"Offset of field: ",
stringify!(value_list_s),
"::",
stringify!(host)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_list_s>())).plugin as *const _ as usize },
96usize,
concat!(
"Offset of field: ",
stringify!(value_list_s),
"::",
stringify!(plugin)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_list_s>())).plugin_instance as *const _ as usize },
160usize,
concat!(
"Offset of field: ",
stringify!(value_list_s),
"::",
stringify!(plugin_instance)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_list_s>())).type_ as *const _ as usize },
224usize,
concat!(
"Offset of field: ",
stringify!(value_list_s),
"::",
stringify!(type_)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_list_s>())).type_instance as *const _ as usize },
288usize,
concat!(
"Offset of field: ",
stringify!(value_list_s),
"::",
stringify!(type_instance)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<value_list_s>())).meta as *const _ as usize },
352usize,
concat!(
"Offset of field: ",
stringify!(value_list_s),
"::",
stringify!(meta)
)
);
}
pub type value_list_t = value_list_s;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct data_source_s {
pub name: [::std::os::raw::c_char; 64usize],
pub type_: ::std::os::raw::c_int,
pub min: f64,
pub max: f64,
}
#[test]
fn bindgen_test_layout_data_source_s() {
assert_eq!(
::std::mem::size_of::<data_source_s>(),
88usize,
concat!("Size of: ", stringify!(data_source_s))
);
assert_eq!(
::std::mem::align_of::<data_source_s>(),
8usize,
concat!("Alignment of ", stringify!(data_source_s))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<data_source_s>())).name as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(data_source_s),
"::",
stringify!(name)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<data_source_s>())).type_ as *const _ as usize },
64usize,
concat!(
"Offset of field: ",
stringify!(data_source_s),
"::",
stringify!(type_)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<data_source_s>())).min as *const _ as usize },
72usize,
concat!(
"Offset of field: ",
stringify!(data_source_s),
"::",
stringify!(min)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<data_source_s>())).max as *const _ as usize },
80usize,
concat!(
"Offset of field: ",
stringify!(data_source_s),
"::",
stringify!(max)
)
);
}
pub type data_source_t = data_source_s;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct data_set_s {
pub type_: [::std::os::raw::c_char; 64usize],
pub ds_num: ::std::os::raw::c_int,
pub ds: *mut data_source_t,
}
#[test]
fn bindgen_test_layout_data_set_s() {
assert_eq!(
::std::mem::size_of::<data_set_s>(),
80usize,
concat!("Size of: ", stringify!(data_set_s))
);
assert_eq!(
::std::mem::align_of::<data_set_s>(),
8usize,
concat!("Alignment of ", stringify!(data_set_s))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<data_set_s>())).type_ as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(data_set_s),
"::",
stringify!(type_)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<data_set_s>())).ds_num as *const _ as usize },
64usize,
concat!(
"Offset of field: ",
stringify!(data_set_s),
"::",
stringify!(ds_num)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<data_set_s>())).ds as *const _ as usize },
72usize,
concat!(
"Offset of field: ",
stringify!(data_set_s),
"::",
stringify!(ds)
)
);
}
pub type data_set_t = data_set_s;
pub const notification_meta_type_e_NM_TYPE_STRING: notification_meta_type_e = 0;
pub const notification_meta_type_e_NM_TYPE_SIGNED_INT: notification_meta_type_e = 1;
pub const notification_meta_type_e_NM_TYPE_UNSIGNED_INT: notification_meta_type_e = 2;
pub const notification_meta_type_e_NM_TYPE_DOUBLE: notification_meta_type_e = 3;
pub const notification_meta_type_e_NM_TYPE_BOOLEAN: notification_meta_type_e = 4;
pub type notification_meta_type_e = u32;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct notification_meta_s {
pub name: [::std::os::raw::c_char; 64usize],
pub type_: notification_meta_type_e,
pub nm_value: notification_meta_s__bindgen_ty_1,
pub next: *mut notification_meta_s,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union notification_meta_s__bindgen_ty_1 {
pub nm_string: *const ::std::os::raw::c_char,
pub nm_signed_int: i64,
pub nm_unsigned_int: u64,
pub nm_double: f64,
pub nm_boolean: bool,
_bindgen_union_align: u64,
}
#[test]
fn bindgen_test_layout_notification_meta_s__bindgen_ty_1() {
assert_eq!(
::std::mem::size_of::<notification_meta_s__bindgen_ty_1>(),
8usize,
concat!("Size of: ", stringify!(notification_meta_s__bindgen_ty_1))
);
assert_eq!(
::std::mem::align_of::<notification_meta_s__bindgen_ty_1>(),
8usize,
concat!(
"Alignment of ",
stringify!(notification_meta_s__bindgen_ty_1)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<notification_meta_s__bindgen_ty_1>())).nm_string as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(notification_meta_s__bindgen_ty_1),
"::",
stringify!(nm_string)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<notification_meta_s__bindgen_ty_1>())).nm_signed_int as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(notification_meta_s__bindgen_ty_1),
"::",
stringify!(nm_signed_int)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<notification_meta_s__bindgen_ty_1>())).nm_unsigned_int
as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(notification_meta_s__bindgen_ty_1),
"::",
stringify!(nm_unsigned_int)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<notification_meta_s__bindgen_ty_1>())).nm_double as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(notification_meta_s__bindgen_ty_1),
"::",
stringify!(nm_double)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<notification_meta_s__bindgen_ty_1>())).nm_boolean as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(notification_meta_s__bindgen_ty_1),
"::",
stringify!(nm_boolean)
)
);
}
#[test]
fn bindgen_test_layout_notification_meta_s() {
assert_eq!(
::std::mem::size_of::<notification_meta_s>(),
88usize,
concat!("Size of: ", stringify!(notification_meta_s))
);
assert_eq!(
::std::mem::align_of::<notification_meta_s>(),
8usize,
concat!("Alignment of ", stringify!(notification_meta_s))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<notification_meta_s>())).name as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(notification_meta_s),
"::",
stringify!(name)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<notification_meta_s>())).type_ as *const _ as usize },
64usize,
concat!(
"Offset of field: ",
stringify!(notification_meta_s),
"::",
stringify!(type_)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<notification_meta_s>())).nm_value as *const _ as usize },
72usize,
concat!(
"Offset of field: ",
stringify!(notification_meta_s),
"::",
stringify!(nm_value)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<notification_meta_s>())).next as *const _ as usize },
80usize,
concat!(
"Offset of field: ",
stringify!(notification_meta_s),
"::",
stringify!(next)
)
);
}
pub type notification_meta_t = notification_meta_s;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct notification_s {
pub severity: ::std::os::raw::c_int,
pub time: cdtime_t,
pub message: [::std::os::raw::c_char; 256usize],
pub host: [::std::os::raw::c_char; 64usize],
pub plugin: [::std::os::raw::c_char; 64usize],
pub plugin_instance: [::std::os::raw::c_char; 64usize],
pub type_: [::std::os::raw::c_char; 64usize],
pub type_instance: [::std::os::raw::c_char; 64usize],
pub meta: *mut notification_meta_t,
}
#[test]
fn bindgen_test_layout_notification_s() {
assert_eq!(
::std::mem::size_of::<notification_s>(),
600usize,
concat!("Size of: ", stringify!(notification_s))
);
assert_eq!(
::std::mem::align_of::<notification_s>(),
8usize,
concat!("Alignment of ", stringify!(notification_s))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<notification_s>())).severity as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(notification_s),
"::",
stringify!(severity)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<notification_s>())).time as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(notification_s),
"::",
stringify!(time)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<notification_s>())).message as *const _ as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(notification_s),
"::",
stringify!(message)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<notification_s>())).host as *const _ as usize },
272usize,
concat!(
"Offset of field: ",
stringify!(notification_s),
"::",
stringify!(host)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<notification_s>())).plugin as *const _ as usize },
336usize,
concat!(
"Offset of field: ",
stringify!(notification_s),
"::",
stringify!(plugin)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<notification_s>())).plugin_instance as *const _ as usize },
400usize,
concat!(
"Offset of field: ",
stringify!(notification_s),
"::",
stringify!(plugin_instance)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<notification_s>())).type_ as *const _ as usize },
464usize,
concat!(
"Offset of field: ",
stringify!(notification_s),
"::",
stringify!(type_)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<notification_s>())).type_instance as *const _ as usize },
528usize,
concat!(
"Offset of field: ",
stringify!(notification_s),
"::",
stringify!(type_instance)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<notification_s>())).meta as *const _ as usize },
592usize,
concat!(
"Offset of field: ",
stringify!(notification_s),
"::",
stringify!(meta)
)
);
}
pub type notification_t = notification_s;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct user_data_s {
pub data: *mut ::std::os::raw::c_void,
pub free_func: ::std::option::Option<unsafe extern "C" fn(arg1: *mut ::std::os::raw::c_void)>,
}
#[test]
fn bindgen_test_layout_user_data_s() {
assert_eq!(
::std::mem::size_of::<user_data_s>(),
16usize,
concat!("Size of: ", stringify!(user_data_s))
);
assert_eq!(
::std::mem::align_of::<user_data_s>(),
8usize,
concat!("Alignment of ", stringify!(user_data_s))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<user_data_s>())).data as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(user_data_s),
"::",
stringify!(data)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<user_data_s>())).free_func as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(user_data_s),
"::",
stringify!(free_func)
)
);
}
pub type user_data_t = user_data_s;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct plugin_ctx_s {
pub interval: cdtime_t,
}
#[test]
fn bindgen_test_layout_plugin_ctx_s() {
assert_eq!(
::std::mem::size_of::<plugin_ctx_s>(),
8usize,
concat!("Size of: ", stringify!(plugin_ctx_s))
);
assert_eq!(
::std::mem::align_of::<plugin_ctx_s>(),
8usize,
concat!("Alignment of ", stringify!(plugin_ctx_s))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<plugin_ctx_s>())).interval as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(plugin_ctx_s),
"::",
stringify!(interval)
)
);
}
pub type plugin_ctx_t = plugin_ctx_s;
pub type plugin_init_cb = ::std::option::Option<unsafe extern "C" fn() -> ::std::os::raw::c_int>;
pub type plugin_read_cb =
::std::option::Option<unsafe extern "C" fn(arg1: *mut user_data_t) -> ::std::os::raw::c_int>;
pub type plugin_write_cb = ::std::option::Option<
unsafe extern "C" fn(
arg1: *const data_set_t,
arg2: *const value_list_t,
arg3: *mut user_data_t,
) -> ::std::os::raw::c_int,
>;
pub type plugin_flush_cb = ::std::option::Option<
unsafe extern "C" fn(
timeout: cdtime_t,
identifier: *const ::std::os::raw::c_char,
arg1: *mut user_data_t,
) -> ::std::os::raw::c_int,
>;
pub type plugin_missing_cb = ::std::option::Option<
unsafe extern "C" fn(
arg1: *const value_list_t,
arg2: *mut user_data_t,
) -> ::std::os::raw::c_int,
>;
pub type plugin_log_cb = ::std::option::Option<
unsafe extern "C" fn(
severity: ::std::os::raw::c_int,
message: *const ::std::os::raw::c_char,
arg1: *mut user_data_t,
),
>;
pub type plugin_shutdown_cb =
::std::option::Option<unsafe extern "C" fn() -> ::std::os::raw::c_int>;
pub type plugin_notification_cb = ::std::option::Option<
unsafe extern "C" fn(
arg1: *const notification_t,
arg2: *mut user_data_t,
) -> ::std::os::raw::c_int,
>;
extern "C" {
pub fn plugin_set_dir(dir: *const ::std::os::raw::c_char);
}
extern "C" {
pub fn plugin_load(name: *const ::std::os::raw::c_char, flags: u32) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_init_all();
}
extern "C" {
pub fn plugin_read_all();
}
extern "C" {
pub fn plugin_read_all_once() -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_shutdown_all();
}
extern "C" {
pub fn plugin_write(
plugin: *const ::std::os::raw::c_char,
ds: *const data_set_t,
vl: *const value_list_t,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_flush(
plugin: *const ::std::os::raw::c_char,
timeout: cdtime_t,
identifier: *const ::std::os::raw::c_char,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_register_config(
name: *const ::std::os::raw::c_char,
callback: ::std::option::Option<
unsafe extern "C" fn(
key: *const ::std::os::raw::c_char,
val: *const ::std::os::raw::c_char,
) -> ::std::os::raw::c_int,
>,
keys: *mut *const ::std::os::raw::c_char,
keys_num: ::std::os::raw::c_int,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_register_complex_config(
type_: *const ::std::os::raw::c_char,
callback: ::std::option::Option<
unsafe extern "C" fn(arg1: *mut oconfig_item_t) -> ::std::os::raw::c_int,
>,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_register_init(
name: *const ::std::os::raw::c_char,
callback: plugin_init_cb,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_register_read(
name: *const ::std::os::raw::c_char,
callback: ::std::option::Option<unsafe extern "C" fn() -> ::std::os::raw::c_int>,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_register_complex_read(
group: *const ::std::os::raw::c_char,
name: *const ::std::os::raw::c_char,
callback: plugin_read_cb,
interval: *const timespec,
user_data: *mut user_data_t,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_register_write(
name: *const ::std::os::raw::c_char,
callback: plugin_write_cb,
user_data: *mut user_data_t,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_register_flush(
name: *const ::std::os::raw::c_char,
callback: plugin_flush_cb,
user_data: *mut user_data_t,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_register_missing(
name: *const ::std::os::raw::c_char,
callback: plugin_missing_cb,
user_data: *mut user_data_t,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_register_shutdown(
name: *const ::std::os::raw::c_char,
callback: plugin_shutdown_cb,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_register_data_set(ds: *const data_set_t) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_register_log(
name: *const ::std::os::raw::c_char,
callback: plugin_log_cb,
user_data: *mut user_data_t,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_register_notification(
name: *const ::std::os::raw::c_char,
callback: plugin_notification_cb,
user_data: *mut user_data_t,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_unregister_config(name: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_unregister_complex_config(
name: *const ::std::os::raw::c_char,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_unregister_init(name: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_unregister_read(name: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_unregister_read_group(
group: *const ::std::os::raw::c_char,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_unregister_write(name: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_unregister_flush(name: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_unregister_missing(name: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_unregister_shutdown(name: *const ::std::os::raw::c_char)
-> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_unregister_data_set(name: *const ::std::os::raw::c_char)
-> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_unregister_log(name: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_unregister_notification(
name: *const ::std::os::raw::c_char,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_dispatch_values(vl: *const value_list_t) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_dispatch_missing(vl: *const value_list_t) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_dispatch_notification(notif: *const notification_t) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_log(level: ::std::os::raw::c_int, format: *const ::std::os::raw::c_char, ...);
}
extern "C" {
pub fn plugin_get_ds(name: *const ::std::os::raw::c_char) -> *const data_set_t;
}
extern "C" {
pub fn plugin_notification_meta_add_string(
n: *mut notification_t,
name: *const ::std::os::raw::c_char,
value: *const ::std::os::raw::c_char,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_notification_meta_add_signed_int(
n: *mut notification_t,
name: *const ::std::os::raw::c_char,
value: i64,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_notification_meta_add_unsigned_int(
n: *mut notification_t,
name: *const ::std::os::raw::c_char,
value: u64,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_notification_meta_add_double(
n: *mut notification_t,
name: *const ::std::os::raw::c_char,
value: f64,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_notification_meta_add_boolean(
n: *mut notification_t,
name: *const ::std::os::raw::c_char,
value: bool,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_notification_meta_copy(
dst: *mut notification_t,
src: *const notification_t,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_notification_meta_free(n: *mut notification_meta_t) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn plugin_init_ctx();
}
extern "C" {
pub fn plugin_get_ctx() -> plugin_ctx_t;
}
extern "C" {
pub fn plugin_set_ctx(ctx: plugin_ctx_t) -> plugin_ctx_t;
}
extern "C" {
pub fn plugin_get_interval() -> cdtime_t;
}
extern "C" {
pub fn plugin_thread_create(
thread: *mut pthread_t,
attr: *const pthread_attr_t,
start_routine: ::std::option::Option<
unsafe extern "C" fn(arg1: *mut ::std::os::raw::c_void) -> *mut ::std::os::raw::c_void,
>,
arg: *mut ::std::os::raw::c_void,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn uc_get_rate(ds: *const data_set_t, vl: *const value_list_t) -> *mut gauge_t;
}
| 28.827473 | 99 | 0.53554 |
bf6ffd22c08e2eed1b625b2f26e17ce8d4d26b99
| 87,313 |
#![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
pub mod operations {
use crate::models::*;
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/providers/Microsoft.BatchAI/operations", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(|source| list::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| list::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| list::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| list::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: OperationListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod usage {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
location: &str,
) -> std::result::Result<ListUsagesResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.BatchAI/locations/{}/usages",
operation_config.base_path(),
subscription_id,
location
);
let mut url = url::Url::parse(url_str).map_err(|source| list::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| list::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| list::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| list::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ListUsagesResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod clusters {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
cluster_name: &str,
subscription_id: &str,
) -> std::result::Result<Cluster, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/clusters/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
cluster_name
);
let mut url = url::Url::parse(url_str).map_err(|source| get::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| get::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| get::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| get::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Cluster = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
cluster_name: &str,
parameters: &ClusterCreateParameters,
subscription_id: &str,
) -> std::result::Result<create::Response, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/clusters/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
cluster_name
);
let mut url = url::Url::parse(url_str).map_err(|source| create::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| create::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(|source| create::Error::SerializeError { source })?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| create::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| create::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Cluster = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(create::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(create::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(Cluster),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
cluster_name: &str,
parameters: &ClusterUpdateParameters,
subscription_id: &str,
) -> std::result::Result<Cluster, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/clusters/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
cluster_name
);
let mut url = url::Url::parse(url_str).map_err(|source| update::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| update::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(|source| update::Error::SerializeError { source })?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| update::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| update::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Cluster = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
cluster_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/clusters/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
cluster_name
);
let mut url = url::Url::parse(url_str).map_err(|source| delete::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| delete::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| delete::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| delete::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_remote_login_information(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
cluster_name: &str,
subscription_id: &str,
) -> std::result::Result<RemoteLoginInformationListResult, list_remote_login_information::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/clusters/{}/listRemoteLoginInformation",
operation_config.base_path(),
subscription_id,
resource_group_name,
cluster_name
);
let mut url = url::Url::parse(url_str).map_err(|source| list_remote_login_information::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| list_remote_login_information::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| list_remote_login_information::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| list_remote_login_information::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RemoteLoginInformationListResult =
serde_json::from_slice(rsp_body).map_err(|source| list_remote_login_information::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list_remote_login_information::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(list_remote_login_information::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_remote_login_information {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
filter: Option<&str>,
select: Option<&str>,
maxresults: Option<i32>,
subscription_id: &str,
) -> std::result::Result<ClusterListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.BatchAI/clusters",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(|source| list::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| list::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(select) = select {
url.query_pairs_mut().append_pair("$select", select);
}
if let Some(maxresults) = maxresults {
url.query_pairs_mut().append_pair("maxresults", maxresults.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| list::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| list::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ClusterListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
filter: Option<&str>,
select: Option<&str>,
maxresults: Option<i32>,
subscription_id: &str,
) -> std::result::Result<ClusterListResult, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/clusters",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(|source| list_by_resource_group::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| list_by_resource_group::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(select) = select {
url.query_pairs_mut().append_pair("$select", select);
}
if let Some(maxresults) = maxresults {
url.query_pairs_mut().append_pair("maxresults", maxresults.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| list_by_resource_group::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| list_by_resource_group::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ClusterListResult =
serde_json::from_slice(rsp_body).map_err(|source| list_by_resource_group::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list_by_resource_group::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(list_by_resource_group::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod jobs {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
job_name: &str,
subscription_id: &str,
) -> std::result::Result<Job, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/jobs/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
job_name
);
let mut url = url::Url::parse(url_str).map_err(|source| get::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| get::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| get::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| get::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Job = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
job_name: &str,
parameters: &JobCreateParameters,
subscription_id: &str,
) -> std::result::Result<create::Response, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/jobs/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
job_name
);
let mut url = url::Url::parse(url_str).map_err(|source| create::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| create::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(|source| create::Error::SerializeError { source })?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| create::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| create::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Job = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(create::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(create::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(Job),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
job_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/jobs/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
job_name
);
let mut url = url::Url::parse(url_str).map_err(|source| delete::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| delete::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| delete::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| delete::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_remote_login_information(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
job_name: &str,
subscription_id: &str,
) -> std::result::Result<RemoteLoginInformationListResult, list_remote_login_information::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/jobs/{}/listRemoteLoginInformation",
operation_config.base_path(),
subscription_id,
resource_group_name,
job_name
);
let mut url = url::Url::parse(url_str).map_err(|source| list_remote_login_information::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| list_remote_login_information::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| list_remote_login_information::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| list_remote_login_information::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RemoteLoginInformationListResult =
serde_json::from_slice(rsp_body).map_err(|source| list_remote_login_information::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list_remote_login_information::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(list_remote_login_information::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_remote_login_information {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn terminate(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
job_name: &str,
subscription_id: &str,
) -> std::result::Result<terminate::Response, terminate::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/jobs/{}/terminate",
operation_config.base_path(),
subscription_id,
resource_group_name,
job_name
);
let mut url = url::Url::parse(url_str).map_err(|source| terminate::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| terminate::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| terminate::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| terminate::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => Ok(terminate::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(terminate::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| terminate::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(terminate::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod terminate {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
filter: Option<&str>,
select: Option<&str>,
maxresults: Option<i32>,
subscription_id: &str,
) -> std::result::Result<JobListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.BatchAI/jobs",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(|source| list::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| list::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(select) = select {
url.query_pairs_mut().append_pair("$select", select);
}
if let Some(maxresults) = maxresults {
url.query_pairs_mut().append_pair("maxresults", maxresults.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| list::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| list::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: JobListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
filter: Option<&str>,
select: Option<&str>,
maxresults: Option<i32>,
subscription_id: &str,
) -> std::result::Result<JobListResult, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/jobs",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(|source| list_by_resource_group::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| list_by_resource_group::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(select) = select {
url.query_pairs_mut().append_pair("$select", select);
}
if let Some(maxresults) = maxresults {
url.query_pairs_mut().append_pair("maxresults", maxresults.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| list_by_resource_group::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| list_by_resource_group::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: JobListResult =
serde_json::from_slice(rsp_body).map_err(|source| list_by_resource_group::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list_by_resource_group::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(list_by_resource_group::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_output_files(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
job_name: &str,
outputdirectoryid: &str,
directory: Option<&str>,
linkexpiryinminutes: Option<i32>,
maxresults: Option<i32>,
subscription_id: &str,
) -> std::result::Result<FileListResult, list_output_files::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/jobs/{}/listOutputFiles",
operation_config.base_path(),
subscription_id,
resource_group_name,
job_name
);
let mut url = url::Url::parse(url_str).map_err(|source| list_output_files::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| list_output_files::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
url.query_pairs_mut().append_pair("outputdirectoryid", outputdirectoryid);
if let Some(directory) = directory {
url.query_pairs_mut().append_pair("directory", directory);
}
if let Some(linkexpiryinminutes) = linkexpiryinminutes {
url.query_pairs_mut()
.append_pair("linkexpiryinminutes", linkexpiryinminutes.to_string().as_str());
}
if let Some(maxresults) = maxresults {
url.query_pairs_mut().append_pair("maxresults", maxresults.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| list_output_files::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| list_output_files::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: FileListResult =
serde_json::from_slice(rsp_body).map_err(|source| list_output_files::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list_output_files::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(list_output_files::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_output_files {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod file_servers {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
file_server_name: &str,
subscription_id: &str,
) -> std::result::Result<FileServer, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/fileServers/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
file_server_name
);
let mut url = url::Url::parse(url_str).map_err(|source| get::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| get::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| get::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| get::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: FileServer = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
file_server_name: &str,
parameters: &FileServerCreateParameters,
subscription_id: &str,
) -> std::result::Result<create::Response, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/fileServers/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
file_server_name
);
let mut url = url::Url::parse(url_str).map_err(|source| create::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| create::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).map_err(|source| create::Error::SerializeError { source })?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| create::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| create::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: FileServer = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(create::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(create::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(FileServer),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
file_server_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/fileServers/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
file_server_name
);
let mut url = url::Url::parse(url_str).map_err(|source| delete::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| delete::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| delete::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| delete::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
filter: Option<&str>,
select: Option<&str>,
maxresults: Option<i32>,
subscription_id: &str,
) -> std::result::Result<FileServerListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.BatchAI/fileServers",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(|source| list::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| list::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(select) = select {
url.query_pairs_mut().append_pair("$select", select);
}
if let Some(maxresults) = maxresults {
url.query_pairs_mut().append_pair("maxresults", maxresults.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| list::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| list::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: FileServerListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
filter: Option<&str>,
select: Option<&str>,
maxresults: Option<i32>,
subscription_id: &str,
) -> std::result::Result<FileServerListResult, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BatchAI/fileServers",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(|source| list_by_resource_group::Error::ParseUrlError { source })?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(|source| list_by_resource_group::Error::GetTokenError { source })?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(select) = select {
url.query_pairs_mut().append_pair("$select", select);
}
if let Some(maxresults) = maxresults {
url.query_pairs_mut().append_pair("maxresults", maxresults.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(|source| list_by_resource_group::Error::BuildRequestError { source })?;
let rsp = http_client
.execute_request(req)
.await
.map_err(|source| list_by_resource_group::Error::ExecuteRequestError { source })?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: FileServerListResult =
serde_json::from_slice(rsp_body).map_err(|source| list_by_resource_group::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list_by_resource_group::Error::DeserializeError {
source,
body: rsp_body.clone(),
})?;
Err(list_by_resource_group::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {}", source)]
ParseUrlError { source: url::ParseError },
#[error("Failed to build request: {}", source)]
BuildRequestError { source: http::Error },
#[error("Failed to execute request: {}", source)]
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to serialize request body: {}", source)]
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
#[error("Failed to deserialize response body: {}", source)]
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
#[error("Failed to get access token: {}", source)]
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
| 47.426942 | 135 | 0.563249 |
33ea058c2c4b10dff4e03420efd61eac96babc84
| 13,660 |
//! Definitions of network messages.
use std::{error::Error, fmt, net, sync::Arc};
use chrono::{DateTime, Utc};
use zebra_chain::{
block::{self, Block},
transaction::UnminedTx,
};
use crate::meta_addr::MetaAddr;
use super::{inv::InventoryHash, types::*};
#[cfg(any(test, feature = "proptest-impl"))]
use proptest_derive::Arbitrary;
#[cfg(any(test, feature = "proptest-impl"))]
use zebra_chain::serialization::arbitrary::datetime_full;
/// A Bitcoin-like network message for the Zcash protocol.
///
/// The Zcash network protocol is mostly inherited from Bitcoin, and a list of
/// Bitcoin network messages can be found [on the Bitcoin
/// wiki][btc_wiki_protocol].
///
/// That page describes the wire format of the messages, while this enum stores
/// an internal representation. The internal representation is unlinked from the
/// wire format, and the translation between the two happens only during
/// serialization and deserialization. For instance, Bitcoin identifies messages
/// by a 12-byte ascii command string; we consider this a serialization detail
/// and use the enum discriminant instead. (As a side benefit, this also means
/// that we have a clearly-defined validation boundary for network messages
/// during serialization).
///
/// [btc_wiki_protocol]: https://en.bitcoin.it/wiki/Protocol_documentation
#[derive(Clone, Eq, PartialEq, Debug)]
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))]
pub enum Message {
/// A `version` message.
///
/// Note that although this is called `version` in Bitcoin, its role is really
/// analogous to a `ClientHello` message in TLS, used to begin a handshake, and
/// is distinct from a simple version number.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#version)
Version {
/// The network version number supported by the sender.
version: Version,
/// The network services advertised by the sender.
services: PeerServices,
/// The time when the version message was sent.
///
/// This is a 64-bit field. Zebra rejects out-of-range times as invalid.
#[cfg_attr(
any(test, feature = "proptest-impl"),
proptest(strategy = "datetime_full()")
)]
timestamp: DateTime<Utc>,
/// The network address of the node receiving this message, and its
/// advertised network services.
///
/// Q: how does the handshake know the remote peer's services already?
address_recv: (PeerServices, net::SocketAddr),
/// The network address of the node sending this message, and its
/// advertised network services.
address_from: (PeerServices, net::SocketAddr),
/// Node random nonce, randomly generated every time a version
/// packet is sent. This nonce is used to detect connections
/// to self.
nonce: Nonce,
/// The Zcash user agent advertised by the sender.
user_agent: String,
/// The last block received by the emitting node.
start_height: block::Height,
/// Whether the remote peer should announce relayed
/// transactions or not, see [BIP 0037](https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki)
relay: bool,
},
/// A `verack` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#verack)
Verack,
/// A `ping` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#ping)
Ping(
/// A nonce unique to this [`Ping`] message.
Nonce,
),
/// A `pong` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#pong)
Pong(
/// The nonce from the [`Ping`] message this was in response to.
Nonce,
),
/// A `reject` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#reject)
Reject {
/// Type of message rejected.
// It's unclear if this is strictly limited to message command
// codes, so leaving it a String.
message: String,
/// RejectReason code relating to rejected message.
ccode: RejectReason,
/// Human-readable version of rejection reason.
reason: String,
/// Optional extra data provided for some errors.
// Currently, all errors which provide this field fill it with
// the TXID or block header hash of the object being rejected,
// so the field is 32 bytes.
//
// Q: can we tell Rust that this field is optional? Or just
// default its value to an empty array, I guess.
data: Option<[u8; 32]>,
},
/// A `getaddr` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getaddr)
GetAddr,
/// An `addr` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#addr)
Addr(Vec<MetaAddr>),
/// A `getblocks` message.
///
/// `known_blocks` is a series of known block hashes spaced out along the
/// peer's best chain. The remote peer uses them to compute the intersection
/// of its best chain and determine the blocks following the intersection
/// point.
///
/// The peer responds with an `inv` packet with the hashes of subsequent blocks.
/// If supplied, the `stop` parameter specifies the last header to request.
/// Otherwise, an inv packet with the maximum number (500) are sent.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getheaders)
GetBlocks {
/// Hashes of known blocks, ordered from highest height to lowest height.
known_blocks: Vec<block::Hash>,
/// Optionally, the last header to request.
stop: Option<block::Hash>,
},
/// An `inv` message.
///
/// Allows a node to advertise its knowledge of one or more
/// objects. It can be received unsolicited, or in reply to
/// `getblocks`.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#inv)
/// [ZIP-239](https://zips.z.cash/zip-0239)
Inv(Vec<InventoryHash>),
/// A `getheaders` message.
///
/// `known_blocks` is a series of known block hashes spaced out along the
/// peer's best chain. The remote peer uses them to compute the intersection
/// of its best chain and determine the blocks following the intersection
/// point.
///
/// The peer responds with an `headers` packet with the headers of subsequent blocks.
/// If supplied, the `stop` parameter specifies the last header to request.
/// Otherwise, the maximum number of block headers (160) are sent.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getheaders)
GetHeaders {
/// Hashes of known blocks, ordered from highest height to lowest height.
known_blocks: Vec<block::Hash>,
/// Optionally, the last header to request.
stop: Option<block::Hash>,
},
/// A `headers` message.
///
/// Returns block headers in response to a getheaders packet.
///
/// Each block header is accompanied by a transaction count.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#headers)
Headers(Vec<block::CountedHeader>),
/// A `getdata` message.
///
/// `getdata` is used in response to `inv`, to retrieve the
/// content of a specific object, and is usually sent after
/// receiving an `inv` packet, after filtering known elements.
///
/// `zcashd` returns requested items in a single batch of messages.
/// Missing blocks are silently skipped. Missing transaction hashes are
/// included in a single `NotFound` message following the transactions.
/// Other item or non-item messages can come before or after the batch.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getdata)
/// [ZIP-239](https://zips.z.cash/zip-0239)
/// [zcashd code](https://github.com/zcash/zcash/blob/e7b425298f6d9a54810cb7183f00be547e4d9415/src/main.cpp#L5523)
GetData(Vec<InventoryHash>),
/// A `block` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#block)
Block(Arc<Block>),
/// A `tx` message.
///
/// This message is used to advertise unmined transactions for the mempool.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#tx)
Tx(UnminedTx),
/// A `notfound` message.
///
/// When a peer requests a list of transaction hashes, `zcashd` returns:
/// - a batch of messages containing found transactions, then
/// - a `NotFound` message containing a list of transaction hashes that
/// aren't available in its mempool or state.
///
/// But when a peer requests blocks or headers, any missing items are
/// silently skipped, without any `NotFound` messages.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#notfound)
/// [ZIP-239](https://zips.z.cash/zip-0239)
/// [zcashd code](https://github.com/zcash/zcash/blob/e7b425298f6d9a54810cb7183f00be547e4d9415/src/main.cpp#L5632)
// See note above on `Inventory`.
NotFound(Vec<InventoryHash>),
/// A `mempool` message.
///
/// This was defined in [BIP35], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#mempool)
/// [BIP35]: https://github.com/bitcoin/bips/blob/master/bip-0035.mediawiki
Mempool,
/// A `filterload` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterLoad {
/// The filter itself is simply a bit field of arbitrary
/// byte-aligned size. The maximum size is 36,000 bytes.
filter: Filter,
/// The number of hash functions to use in this filter. The
/// maximum value allowed in this field is 50.
hash_functions_count: u32,
/// A random value to add to the seed value in the hash
/// function used by the bloom filter.
tweak: Tweak,
/// A set of flags that control how matched items are added to the filter.
flags: u8,
},
/// A `filteradd` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterAdd {
/// The data element to add to the current filter.
// The data field must be smaller than or equal to 520 bytes
// in size (the maximum size of any potentially matched
// object).
//
// A Vec instead of [u8; 520] because of needed traits.
data: Vec<u8>,
},
/// A `filterclear` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterClear,
}
impl<E> From<E> for Message
where
E: Error,
{
fn from(e: E) -> Self {
Message::Reject {
message: e.to_string(),
// The generic case, impls for specific error types should
// use specific varieties of `RejectReason`.
ccode: RejectReason::Other,
reason: e.source().unwrap().to_string(),
// Allow this to be overridden but not populated by default, methinks.
data: None,
}
}
}
/// Reject Reason CCodes
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#reject)
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))]
#[repr(u8)]
#[allow(missing_docs)]
pub enum RejectReason {
Malformed = 0x01,
Invalid = 0x10,
Obsolete = 0x11,
Duplicate = 0x12,
Nonstandard = 0x40,
Dust = 0x41,
InsufficientFee = 0x42,
Checkpoint = 0x43,
Other = 0x50,
}
impl fmt::Display for Message {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match self {
Message::Version { .. } => "version",
Message::Verack => "verack",
Message::Ping(_) => "ping",
Message::Pong(_) => "pong",
Message::Reject { .. } => "reject",
Message::GetAddr => "getaddr",
Message::Addr(_) => "addr",
Message::GetBlocks { .. } => "getblocks",
Message::Inv(_) => "inv",
Message::GetHeaders { .. } => "getheaders",
Message::Headers(_) => "headers",
Message::GetData(_) => "getdata",
Message::Block(_) => "block",
Message::Tx(_) => "tx",
Message::NotFound(_) => "notfound",
Message::Mempool => "mempool",
Message::FilterLoad { .. } => "filterload",
Message::FilterAdd { .. } => "filteradd",
Message::FilterClear => "filterclear",
})
}
}
| 37.220708 | 132 | 0.633895 |
edb6be4b19aaf0713c4ec65c9f1bfec81b8118d5
| 1,470 |
use core::marker::PhantomData;
use core::pin::Pin;
use core::task::{Context, Poll};
use crate::stream::{DoubleEndedStream, ExactSizeStream, FusedStream, Stream};
/// A stream that never returns any items.
///
/// This stream is created by the [`pending`] function. See its
/// documentation for more.
///
/// [`pending`]: fn.pending.html
#[derive(Debug)]
pub struct Pending<T> {
_marker: PhantomData<T>,
}
/// Creates a stream that never returns any items.
///
/// The returned stream will always return `Pending` when polled.
/// # Examples
///
/// ```
/// # async_std::task::block_on(async {
/// #
/// use std::time::Duration;
///
/// use async_std::prelude::*;
/// use async_std::stream;
///
/// let dur = Duration::from_millis(100);
/// let mut s = stream::pending::<()>().timeout(dur);
///
/// let item = s.next().await;
///
/// assert!(item.is_some());
/// assert!(item.unwrap().is_err());
///
/// #
/// # })
/// ```
pub fn pending<T>() -> Pending<T> {
Pending {
_marker: PhantomData,
}
}
impl<T> Stream for Pending<T> {
type Item = T;
fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<T>> {
Poll::Pending
}
}
impl<T> DoubleEndedStream for Pending<T> {
fn poll_next_back(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<T>> {
Poll::Pending
}
}
impl<T> FusedStream for Pending<T> {}
impl<T> ExactSizeStream for Pending<T> {
fn len(&self) -> usize {
0
}
}
| 21.304348 | 85 | 0.6 |
61138a3ed2c224dd756ed5ac694311019e920e57
| 5,309 |
use crate::api::*;
use std::fmt;
use std::path;
use serde::de::{self, Deserialize, Deserializer, MapAccess, Visitor};
use shell_words;
impl<'de> Deserialize<'de> for Entry {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
enum Field {
Directory,
File,
Command,
Arguments,
Output,
};
const FIELDS: &[&str] = &["directory", "file", "command", "arguments", "output"];
impl<'de> Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Field, D::Error>
where
D: Deserializer<'de>,
{
struct FieldVisitor;
impl<'de> Visitor<'de> for FieldVisitor {
type Value = Field;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter
.write_str("`directory`, `file`, `command`, `arguments`, or `output`")
}
fn visit_str<E>(self, value: &str) -> Result<Field, E>
where
E: de::Error,
{
match value {
"directory" => Ok(Field::Directory),
"file" => Ok(Field::File),
"command" => Ok(Field::Command),
"arguments" => Ok(Field::Arguments),
"output" => Ok(Field::Output),
_ => Err(de::Error::unknown_field(value, FIELDS)),
}
}
}
deserializer.deserialize_identifier(FieldVisitor)
}
}
struct EntryVisitor;
impl<'de> Visitor<'de> for EntryVisitor {
type Value = Entry;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("struct Entry")
}
fn visit_map<V>(self, mut map: V) -> Result<Entry, V::Error>
where
V: MapAccess<'de>,
{
let mut directory: Option<path::PathBuf> = None;
let mut file: Option<path::PathBuf> = None;
let mut command: Option<String> = None;
let mut arguments: Option<Vec<String>> = None;
let mut output: Option<path::PathBuf> = None;
while let Some(key) = map.next_key()? {
match key {
Field::Directory => {
if directory.is_some() {
return Err(de::Error::duplicate_field("directory"));
}
directory = Some(map.next_value()?);
}
Field::File => {
if file.is_some() {
return Err(de::Error::duplicate_field("file"));
}
file = Some(map.next_value()?);
}
Field::Command => {
if command.is_some() {
return Err(de::Error::duplicate_field("command"));
}
command = Some(map.next_value()?);
}
Field::Arguments => {
if arguments.is_some() {
return Err(de::Error::duplicate_field("arguments"));
}
arguments = Some(map.next_value()?);
}
Field::Output => {
if output.is_some() {
return Err(de::Error::duplicate_field("output"));
}
output = Some(map.next_value()?);
}
}
}
let directory = directory.ok_or_else(|| de::Error::missing_field("directory"))?;
let file = file.ok_or_else(|| de::Error::missing_field("file"))?;
let arguments = arguments.map_or_else(
|| {
command
.ok_or_else(|| de::Error::missing_field("`command` or `arguments`"))
.and_then(|cmd| {
shell_words::split(cmd.as_str()).map_err(|_| {
de::Error::invalid_value(
de::Unexpected::Str(cmd.as_str()),
&"quotes needs to be matched",
)
})
})
},
Ok,
)?;
Ok(Entry {
directory,
file,
arguments,
output,
})
}
}
deserializer.deserialize_struct("Entry", FIELDS, EntryVisitor)
}
}
| 38.194245 | 98 | 0.379733 |
d76c4a015a2c2f5236ee0b5598f54d28d4110dba
| 2,821 |
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn illegal_cast<U:?Sized,V:?Sized>(u: *const U) -> *const V
{
u as *const V //~ ERROR is invalid
}
fn illegal_cast_2<U:?Sized>(u: *const U) -> *const str
{
u as *const str //~ ERROR is invalid
}
trait Foo { fn foo(&self) {} }
impl<T> Foo for T {}
trait Bar { fn foo(&self) {} }
impl<T> Bar for T {}
enum E {
A, B
}
fn main()
{
let f: f32 = 1.2;
let v = 0 as *const u8;
let fat_v : *const [u8] = unsafe { &*(0 as *const [u8; 1])};
let fat_sv : *const [i8] = unsafe { &*(0 as *const [i8; 1])};
let foo: &Foo = &f;
let _ = v as &u8; //~ ERROR non-primitive cast
let _ = v as E; //~ ERROR non-primitive cast
let _ = v as fn(); //~ ERROR non-primitive cast
let _ = v as (u32,); //~ ERROR non-primitive cast
let _ = Some(&v) as *const u8; //~ ERROR non-primitive cast
let _ = v as f32; //~ ERROR is invalid
let _ = main as f64; //~ ERROR is invalid
let _ = &v as usize; //~ ERROR is invalid
let _ = f as *const u8; //~ ERROR is invalid
let _ = 3_i32 as bool; //~ ERROR cannot cast
let _ = E::A as bool; //~ ERROR cannot cast
let _ = 0x61u32 as char; //~ ERROR can be cast as
let _ = false as f32; //~ ERROR is invalid
let _ = E::A as f32; //~ ERROR is invalid
let _ = 'a' as f32; //~ ERROR is invalid
let _ = false as *const u8; //~ ERROR is invalid
let _ = E::A as *const u8; //~ ERROR is invalid
let _ = 'a' as *const u8; //~ ERROR is invalid
let _ = 42usize as *const [u8]; //~ ERROR is invalid
let _ = v as *const [u8]; //~ ERROR cannot cast
let _ = fat_v as *const Foo; //~ ERROR the size for value values of type
let _ = foo as *const str; //~ ERROR is invalid
let _ = foo as *mut str; //~ ERROR is invalid
let _ = main as *mut str; //~ ERROR is invalid
let _ = &f as *mut f32; //~ ERROR is invalid
let _ = &f as *const f64; //~ ERROR is invalid
let _ = fat_sv as usize; //~ ERROR is invalid
let a : *const str = "hello";
let _ = a as *const Foo; //~ ERROR the size for value values of type
// check no error cascade
let _ = main.f as *const u32; //~ ERROR no field
let cf: *const Foo = &0;
let _ = cf as *const [u16]; //~ ERROR is invalid
let _ = cf as *const Bar; //~ ERROR is invalid
vec![0.0].iter().map(|s| s as f32).collect::<Vec<f32>>(); //~ ERROR is invalid
}
| 33.987952 | 82 | 0.59766 |
eb5ec4034904bff72117186a11228acdbe73fe0b
| 1,754 |
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{ArgType, FnType, Reg};
use rustc::ty::layout;
// Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
pub fn compute_abi_info(fty: &mut FnType) {
let fixup = |a: &mut ArgType| {
match a.layout.abi {
layout::Abi::Uninhabited => {}
layout::Abi::ScalarPair(..) |
layout::Abi::Aggregate { .. } => {
match a.layout.size.bits() {
8 => a.cast_to(Reg::i8()),
16 => a.cast_to(Reg::i16()),
32 => a.cast_to(Reg::i32()),
64 => a.cast_to(Reg::i64()),
_ => a.make_indirect()
}
}
layout::Abi::Vector { .. } => {
// FIXME(eddyb) there should be a size cap here
// (probably what clang calls "illegal vectors").
}
layout::Abi::Scalar(_) => {
if a.layout.size.bytes() > 8 {
a.make_indirect();
} else {
a.extend_integer_width_to(32);
}
}
}
};
if !fty.ret.is_ignore() {
fixup(&mut fty.ret);
}
for arg in &mut fty.args {
if arg.is_ignore() { continue; }
fixup(arg);
}
}
| 33.09434 | 68 | 0.514253 |
dd8ebd7ad0c64b15f5ee3a204cc01133f2d1eaea
| 11,843 |
//! Module containing all deprecated API that will be removed in the next major version.
use crate::{
Dynamic, Engine, EvalAltResult, Expression, FnPtr, ImmutableString, NativeCallContext,
RhaiResult, RhaiResultOf, Scope, AST,
};
#[cfg(feature = "no_std")]
use std::prelude::v1::*;
impl Engine {
/// Evaluate a file, but throw away the result and only return error (if any).
/// Useful for when you don't need the result, but still need to keep track of possible errors.
///
/// Not available under `no_std` or `WASM`.
///
/// # Deprecated
///
/// This method is deprecated. Use [`run_file`][Engine::run_file] instead.
///
/// This method will be removed in the next major version.
#[deprecated(since = "1.1.0", note = "use `run_file` instead")]
#[cfg(not(feature = "no_std"))]
#[cfg(not(target_family = "wasm"))]
#[inline(always)]
pub fn consume_file(&self, path: std::path::PathBuf) -> RhaiResultOf<()> {
self.run_file(path)
}
/// Evaluate a file with own scope, but throw away the result and only return error (if any).
/// Useful for when you don't need the result, but still need to keep track of possible errors.
///
/// Not available under `no_std` or `WASM`.
///
/// # Deprecated
///
/// This method is deprecated. Use [`run_file_with_scope`][Engine::run_file_with_scope] instead.
///
/// This method will be removed in the next major version.
#[deprecated(since = "1.1.0", note = "use `run_file_with_scope` instead")]
#[cfg(not(feature = "no_std"))]
#[cfg(not(target_family = "wasm"))]
#[inline(always)]
pub fn consume_file_with_scope(
&self,
scope: &mut Scope,
path: std::path::PathBuf,
) -> RhaiResultOf<()> {
self.run_file_with_scope(scope, path)
}
/// Evaluate a string, but throw away the result and only return error (if any).
/// Useful for when you don't need the result, but still need to keep track of possible errors.
///
/// # Deprecated
///
/// This method is deprecated. Use [`run`][Engine::run] instead.
///
/// This method will be removed in the next major version.
#[deprecated(since = "1.1.0", note = "use `run` instead")]
#[inline(always)]
pub fn consume(&self, script: &str) -> RhaiResultOf<()> {
self.run(script)
}
/// Evaluate a string with own scope, but throw away the result and only return error (if any).
/// Useful for when you don't need the result, but still need to keep track of possible errors.
///
/// # Deprecated
///
/// This method is deprecated. Use [`run_with_scope`][Engine::run_with_scope] instead.
///
/// This method will be removed in the next major version.
#[deprecated(since = "1.1.0", note = "use `run_with_scope` instead")]
#[inline(always)]
pub fn consume_with_scope(&self, scope: &mut Scope, script: &str) -> RhaiResultOf<()> {
self.run_with_scope(scope, script)
}
/// Evaluate an [`AST`], but throw away the result and only return error (if any).
/// Useful for when you don't need the result, but still need to keep track of possible errors.
///
/// # Deprecated
///
/// This method is deprecated. Use [`run_ast`][Engine::run_ast] instead.
///
/// This method will be removed in the next major version.
#[deprecated(since = "1.1.0", note = "use `run_ast` instead")]
#[inline(always)]
pub fn consume_ast(&self, ast: &AST) -> RhaiResultOf<()> {
self.run_ast(ast)
}
/// Evaluate an [`AST`] with own scope, but throw away the result and only return error (if any).
/// Useful for when you don't need the result, but still need to keep track of possible errors.
///
/// # Deprecated
///
/// This method is deprecated. Use [`run_ast_with_scope`][Engine::run_ast_with_scope] instead.
///
/// This method will be removed in the next major version.
#[deprecated(since = "1.1.0", note = "use `run_ast_with_scope` instead")]
#[inline(always)]
pub fn consume_ast_with_scope(&self, scope: &mut Scope, ast: &AST) -> RhaiResultOf<()> {
self.run_ast_with_scope(scope, ast)
}
/// Call a script function defined in an [`AST`] with multiple [`Dynamic`] arguments
/// and optionally a value for binding to the `this` pointer.
///
/// Not available under `no_function`.
///
/// There is an option to evaluate the [`AST`] to load necessary modules before calling the function.
///
/// # Deprecated
///
/// This method is deprecated. Use [`run_ast_with_scope`][Engine::run_ast_with_scope] instead.
///
/// This method will be removed in the next major version.
///
/// # WARNING - Low Level API
///
/// This function is very low level.
///
/// # Arguments
///
/// All the arguments are _consumed_, meaning that they're replaced by `()`.
/// This is to avoid unnecessarily cloning the arguments.
///
/// Do not use the arguments after this call. If they are needed afterwards,
/// clone them _before_ calling this function.
///
/// # Example
///
/// ```
/// # fn main() -> Result<(), Box<rhai::EvalAltResult>> {
/// # #[cfg(not(feature = "no_function"))]
/// # {
/// use rhai::{Engine, Scope, Dynamic};
///
/// let engine = Engine::new();
///
/// let ast = engine.compile("
/// fn add(x, y) { len(x) + y + foo }
/// fn add1(x) { len(x) + 1 + foo }
/// fn bar() { foo/2 }
/// fn action(x) { this += x; } // function using 'this' pointer
/// ")?;
///
/// let mut scope = Scope::new();
/// scope.push("foo", 42_i64);
///
/// // Call the script-defined function
/// let result = engine.call_fn_dynamic(&mut scope, &ast, true, "add", None, [ "abc".into(), 123_i64.into() ])?;
/// // ^^^^ no 'this' pointer
/// assert_eq!(result.cast::<i64>(), 168);
///
/// let result = engine.call_fn_dynamic(&mut scope, &ast, true, "add1", None, [ "abc".into() ])?;
/// assert_eq!(result.cast::<i64>(), 46);
///
/// let result = engine.call_fn_dynamic(&mut scope, &ast, true, "bar", None, [])?;
/// assert_eq!(result.cast::<i64>(), 21);
///
/// let mut value: Dynamic = 1_i64.into();
/// let result = engine.call_fn_dynamic(&mut scope, &ast, true, "action", Some(&mut value), [ 41_i64.into() ])?;
/// // ^^^^^^^^^^^^^^^^ binding the 'this' pointer
/// assert_eq!(value.as_int().expect("value should be INT"), 42);
/// # }
/// # Ok(())
/// # }
/// ```
#[deprecated(since = "1.1.0", note = "use `call_fn_raw` instead")]
#[cfg(not(feature = "no_function"))]
#[inline(always)]
pub fn call_fn_dynamic(
&self,
scope: &mut Scope,
ast: &AST,
eval_ast: bool,
name: impl AsRef<str>,
this_ptr: Option<&mut Dynamic>,
arg_values: impl AsMut<[Dynamic]>,
) -> RhaiResult {
self.call_fn_raw(scope, ast, eval_ast, true, name, this_ptr, arg_values)
}
}
impl Dynamic {
/// Convert the [`Dynamic`] into a [`String`] and return it.
/// If there are other references to the same string, a cloned copy is returned.
/// Returns the name of the actual type if the cast fails.
///
/// # Deprecated
///
/// This method is deprecated. Use [`into_string`][Dynamic::into_string] instead.
///
/// This method will be removed in the next major version.
#[deprecated(since = "1.1.0", note = "use `into_string` instead")]
#[inline(always)]
pub fn as_string(self) -> Result<String, &'static str> {
self.into_string()
}
/// Convert the [`Dynamic`] into an [`ImmutableString`] and return it.
/// Returns the name of the actual type if the cast fails.
///
/// # Deprecated
///
/// This method is deprecated. Use [`into_immutable_string`][Dynamic::into_immutable_string] instead.
///
/// This method will be removed in the next major version.
#[deprecated(since = "1.1.0", note = "use `into_immutable_string` instead")]
#[inline(always)]
pub fn as_immutable_string(self) -> Result<ImmutableString, &'static str> {
self.into_immutable_string()
}
}
impl NativeCallContext<'_> {
/// Call a function inside the call context.
///
/// # WARNING - Low Level API
///
/// This function is very low level.
///
/// # Arguments
///
/// All arguments may be _consumed_, meaning that they may be replaced by `()`. This is to avoid
/// unnecessarily cloning the arguments.
///
/// Do not use the arguments after this call. If they are needed afterwards, clone them _before_
/// calling this function.
///
/// If `is_method` is [`true`], the first argument is assumed to be passed by reference and is
/// not consumed.
///
/// # Deprecated
///
/// This method is deprecated. Use [`call_fn_raw`][NativeCallContext::call_fn_raw] instead.
///
/// This method will be removed in the next major version.
#[deprecated(since = "1.2.0", note = "use `call_fn_raw` instead")]
#[inline(always)]
pub fn call_fn_dynamic_raw(
&self,
fn_name: impl AsRef<str>,
is_method_call: bool,
args: &mut [&mut Dynamic],
) -> RhaiResult {
self.call_fn_raw(fn_name.as_ref(), is_method_call, is_method_call, args)
}
}
#[allow(useless_deprecated)]
#[deprecated(since = "1.2.0", note = "explicitly wrap `EvalAltResult` in `Err`")]
impl<T> From<EvalAltResult> for RhaiResultOf<T> {
#[inline(always)]
fn from(err: EvalAltResult) -> Self {
Err(err.into())
}
}
impl FnPtr {
/// Call the function pointer with curried arguments (if any).
/// The function may be script-defined (not available under `no_function`) or native Rust.
///
/// This method is intended for calling a function pointer that is passed into a native Rust
/// function as an argument. Therefore, the [`AST`] is _NOT_ evaluated before calling the
/// function.
///
/// # Deprecated
///
/// This method is deprecated. Use [`call_within_context`][FnPtr::call_within_context] or
/// [`call_raw`][FnPtr::call_raw] instead.
///
/// This method will be removed in the next major version.
///
/// # WARNING - Low Level API
///
/// This function is very low level.
///
/// # Arguments
///
/// All the arguments are _consumed_, meaning that they're replaced by `()`.
/// This is to avoid unnecessarily cloning the arguments.
///
/// Do not use the arguments after this call. If they are needed afterwards,
/// clone them _before_ calling this function.
#[deprecated(
since = "1.3.0",
note = "use `call_within_context` or `call_raw` instead"
)]
#[inline(always)]
pub fn call_dynamic(
&self,
context: &NativeCallContext,
this_ptr: Option<&mut Dynamic>,
arg_values: impl AsMut<[Dynamic]>,
) -> RhaiResult {
self.call_raw(context, this_ptr, arg_values)
}
}
impl Expression<'_> {
/// If this expression is a variable name, return it. Otherwise [`None`].
///
/// # Deprecated
///
/// This method is deprecated. Use [`get_string_value`][Expression::get_string_value] instead.
///
/// This method will be removed in the next major version.
#[deprecated(since = "1.4.0", note = "use `get_string_value` instead")]
#[inline(always)]
#[must_use]
pub fn get_variable_name(&self) -> Option<&str> {
self.get_string_value()
}
}
| 37.359621 | 121 | 0.595288 |
7a5d9b2680c2795fbe074140dd82ab990bbcb794
| 271 |
extern crate rayon_1_0_0 ; extern crate lolbench_support ; use
lolbench_support :: { criterion_from_env , init_logging } ; fn main ( ) {
init_logging ( ) ; let mut crit = criterion_from_env ( ) ; rayon_1_0_0 ::
fibonacci :: fibonacci_split_recursive ( & mut crit ) ; }
| 67.75 | 75 | 0.723247 |
1d6096a44da26dd2b40e598406766499a06df16a
| 3,652 |
extern crate zip;
extern crate minidom;
extern crate wpm;
use minidom::Element;
use wpm::{Document, RootEntry, Run, RunElement};
pub fn import(xml_data: &str) -> Result<Document, String> {
let root : Element = xml_data.parse().unwrap();
let w_ns = root.attr("xmlns:w").unwrap_or("http://schemas.openxmlformats.org/wordprocessingml/2006/main");
let body = root.get_child("body", w_ns).expect("no body found");
let doc = Document {
entries: body.children()
.map(|be| {
match be.name() {
"p" => {
if let Some(r) = be.get_child("r", w_ns) {
RootEntry::Paragraph{
run: Some(Run{
elements: r.children()
.map(|re| {
match re.name() {
"t" => {
RunElement::Text{
value: String::from(re.text())
}
},
_ => RunElement::Unknown
}
})
.collect()
})
}
} else {
RootEntry::Paragraph{
run: None
}
}
}
_ => RootEntry::Unknown
}
})
.collect()
};
Ok(doc)
}
#[cfg(test)]
mod tests {
use super::import;
use super::wpm::{Document, RootEntry, Run, RunElement};
#[test]
fn it_works() {
let xml_data = r#"
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<w:document xmlns:ve="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:o="urn:schemas-microsoft-com:office:office" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:m="http://schemas.openxmlformats.org/officeDocument/2006/math" xmlns:v="urn:schemas-microsoft-com:vml" xmlns:wp="http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing" xmlns:w10="urn:schemas-microsoft-com:office:word" xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main" xmlns:wne="http://schemas.microsoft.com/office/word/2006/wordml">
<w:body>
<w:p>
<w:pPr>
<w:pStyle w:val="Heading1"/>
</w:pPr>
<w:r><w:t>Introduction</w:t></w:r>
</w:p>
</w:body>
</w:document>
"#;
let result = import(xml_data);
if let Some(document) = result.ok() {
assert_eq!(document.entries.len(), 1);
if let Some(RootEntry::Paragraph{
run
}) = document.entries.last() {
if let Some(r) = run {
assert_eq!(r.elements.len(), 1);
if let Some(RunElement::Text{
value
}) = r.elements.last() {
assert_eq!(*value, String::from("Introduction"));
} else {
assert!(false);
}
} else {
assert!(false);
}
} else {
assert!(false);
}
} else {
assert!(false);
}
}
}
| 38.442105 | 588 | 0.420044 |
f98eb4e6eadf6ed33f722a0fe6e28ce08471f776
| 7,207 |
use merkle::{MerkleValue, MerkleNode};
use merkle::nibble::{NibbleVec, Nibble};
use {Change, DatabaseHandle};
use rlp::Rlp;
fn find_and_remove_child<'a, D: DatabaseHandle>(
merkle: MerkleValue<'a>, database: &'a D
) -> (MerkleNode<'a>, Change) {
let mut change = Change::default();
let node = match merkle {
MerkleValue::Empty => panic!(),
MerkleValue::Full(ref sub_node) => sub_node.as_ref().clone(),
MerkleValue::Hash(h) => {
let sub_node = MerkleNode::decode(&Rlp::new(database.get(h)));
change.remove_raw(h);
sub_node
},
};
(node, change)
}
fn collapse_extension<'a>(
node_nibble: NibbleVec, subnode: MerkleNode<'a>
) -> (MerkleNode<'a>, Change) {
let mut change = Change::default();
let node = match subnode {
MerkleNode::Leaf(mut sub_nibble, sub_value) => {
let mut new_sub_nibble = node_nibble.clone();
new_sub_nibble.append(&mut sub_nibble);
MerkleNode::Leaf(new_sub_nibble, sub_value)
},
MerkleNode::Extension(mut sub_nibble, sub_value) => {
debug_assert!(sub_value != MerkleValue::Empty);
let mut new_sub_nibble = node_nibble.clone();
new_sub_nibble.append(&mut sub_nibble);
MerkleNode::Extension(new_sub_nibble, sub_value)
},
branch => {
let subvalue = change.add_value(&branch);
MerkleNode::Extension(node_nibble, subvalue)
},
};
(node, change)
}
fn nonempty_node_count<'a, 'b>(
nodes: &'b [MerkleValue<'a>; 16], additional: &'b Option<&'a [u8]>
) -> usize {
additional.iter().count() +
nodes.iter().filter(|v| v != &&MerkleValue::Empty).count()
}
fn collapse_branch<'a, D: DatabaseHandle>(
node_nodes: [MerkleValue<'a>; 16], node_additional: Option<&'a [u8]>,
database: &'a D
) -> (MerkleNode<'a>, Change) {
let mut change = Change::default();
let value_count = nonempty_node_count(&node_nodes, &node_additional);
let node = match value_count {
0 => panic!(),
1 if node_additional.is_some() =>
MerkleNode::Leaf(NibbleVec::new(), node_additional.unwrap()),
1 /* value in node_nodes */ => {
let (subindex, subvalue) = node_nodes.iter().enumerate()
.filter(|&(_, v)| v != &MerkleValue::Empty).next()
.map(|(i, v)| (i, v.clone())).unwrap();
let subnibble: Nibble = subindex.into();
let (subnode, subchange) = find_and_remove_child(subvalue, database);
change.merge(&subchange);
match subnode {
MerkleNode::Leaf(mut leaf_nibble, leaf_value) => {
leaf_nibble.insert(0, subnibble);
MerkleNode::Leaf(leaf_nibble, leaf_value)
},
MerkleNode::Extension(mut ext_nibble, ext_value) => {
debug_assert!(ext_value != MerkleValue::Empty);
ext_nibble.insert(0, subnibble);
MerkleNode::Extension(ext_nibble, ext_value)
},
branch => {
let subvalue = change.add_value(&branch);
MerkleNode::Extension(vec![subnibble], subvalue)
},
}
},
_ /* value_count > 1 */ =>
MerkleNode::Branch(node_nodes, node_additional),
};
(node, change)
}
pub fn delete_by_child<'a, D: DatabaseHandle>(
merkle: MerkleValue<'a>, nibble: NibbleVec, database: &'a D
) -> (Option<MerkleNode<'a>>, Change) {
let mut change = Change::default();
let new = match merkle {
MerkleValue::Empty => {
None
},
MerkleValue::Full(ref sub_node) => {
let (new_node, subchange) = delete_by_node(
sub_node.as_ref().clone(), nibble, database);
change.merge(&subchange);
match new_node {
Some(new_node) => Some(new_node),
None => None,
}
},
MerkleValue::Hash(h) => {
let sub_node = MerkleNode::decode(&Rlp::new(database.get(h)));
change.remove_raw(h);
let (new_node, subchange) = delete_by_node(
sub_node, nibble, database);
change.merge(&subchange);
match new_node {
Some(new_node) => Some(new_node),
None => None,
}
},
};
(new, change)
}
pub fn delete_by_node<'a, D: DatabaseHandle>(
node: MerkleNode<'a>, nibble: NibbleVec, database: &'a D
) -> (Option<MerkleNode<'a>>, Change) {
let mut change = Change::default();
let new = match node {
MerkleNode::Leaf(node_nibble, node_value) => {
if node_nibble == nibble {
None
} else {
Some(MerkleNode::Leaf(node_nibble, node_value))
}
},
MerkleNode::Extension(node_nibble, node_value) => {
if nibble.starts_with(&node_nibble) {
let (subnode, subchange) = delete_by_child(
node_value, nibble[node_nibble.len()..].into(),
database);
change.merge(&subchange);
match subnode {
Some(subnode) => {
let (new, subchange) = collapse_extension(node_nibble, subnode);
change.merge(&subchange);
Some(new)
},
None => None,
}
} else {
Some(MerkleNode::Extension(node_nibble, node_value))
}
},
MerkleNode::Branch(mut node_nodes, mut node_additional) => {
let needs_collapse;
if nibble.len() == 0 {
node_additional = None;
needs_collapse = true;
} else {
let ni: usize = nibble[0].into();
let (new_subnode, subchange) = delete_by_child(
node_nodes[ni].clone(), nibble[1..].into(),
database);
change.merge(&subchange);
match new_subnode {
Some(new_subnode) => {
let new_subvalue = change.add_value(&new_subnode);
node_nodes[ni] = new_subvalue;
needs_collapse = false;
},
None => {
node_nodes[ni] = MerkleValue::Empty;
needs_collapse = true;
},
}
}
if needs_collapse {
let value_count = nonempty_node_count(&node_nodes, &node_additional);
if value_count > 0 {
let (new, subchange) = collapse_branch(node_nodes, node_additional, database);
change.merge(&subchange);
Some(new)
} else {
None
}
} else {
Some(MerkleNode::Branch(node_nodes, node_additional))
}
},
};
(new, change)
}
| 33.365741 | 98 | 0.51006 |
14f0099eb6070b9c26278845100d1bd917f69b4d
| 6,063 |
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod vec_backend;
use self::core::core::merkle_proof::MerkleProof;
use self::core::core::pmmr::PMMR;
use self::core::ser::{self, PMMRIndexHashable};
use crate::vec_backend::{TestElem, VecBackend};
use grin_core as core;
#[test]
fn empty_merkle_proof() {
let proof = MerkleProof::empty();
assert_eq!(proof.path, vec![]);
assert_eq!(proof.mmr_size, 0);
}
#[test]
fn merkle_proof_ser_deser() {
let mut ba = VecBackend::new();
let mut pmmr = PMMR::new(&mut ba);
for x in 0..15 {
pmmr.push(&TestElem([0, 0, 0, x])).unwrap();
}
let proof = pmmr.merkle_proof(9).unwrap();
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &proof).expect("serialization failed");
let proof_2: MerkleProof = ser::deserialize_default(&mut &vec[..]).unwrap();
assert_eq!(proof, proof_2);
}
#[test]
fn pmmr_merkle_proof_prune_and_rewind() {
let mut ba = VecBackend::new();
let mut pmmr = PMMR::new(&mut ba);
pmmr.push(&TestElem([0, 0, 0, 1])).unwrap();
pmmr.push(&TestElem([0, 0, 0, 2])).unwrap();
let proof = pmmr.merkle_proof(2).unwrap();
// now prune an element and check we can still generate
// the correct Merkle proof for the other element (after sibling pruned)
pmmr.prune(1).unwrap();
let proof_2 = pmmr.merkle_proof(2).unwrap();
assert_eq!(proof, proof_2);
}
#[test]
fn pmmr_merkle_proof() {
let elems = [
TestElem([0, 0, 0, 1]),
TestElem([0, 0, 0, 2]),
TestElem([0, 0, 0, 3]),
TestElem([0, 0, 0, 4]),
TestElem([0, 0, 0, 5]),
TestElem([0, 0, 0, 6]),
TestElem([0, 0, 0, 7]),
TestElem([0, 0, 0, 8]),
TestElem([1, 0, 0, 0]),
];
let mut ba = VecBackend::new();
let mut pmmr = PMMR::new(&mut ba);
pmmr.push(&elems[0]).unwrap();
let pos_0 = elems[0].hash_with_index(0);
assert_eq!(pmmr.get_hash(1).unwrap(), pos_0);
let proof = pmmr.merkle_proof(1).unwrap();
assert_eq!(proof.path, vec![]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok());
pmmr.push(&elems[1]).unwrap();
let pos_1 = elems[1].hash_with_index(1);
assert_eq!(pmmr.get_hash(2).unwrap(), pos_1);
let pos_2 = (pos_0, pos_1).hash_with_index(2);
assert_eq!(pmmr.get_hash(3).unwrap(), pos_2);
assert_eq!(pmmr.root().unwrap(), pos_2);
assert_eq!(pmmr.peaks(), [pos_2]);
// single peak, path with single sibling
let proof = pmmr.merkle_proof(1).unwrap();
assert_eq!(proof.path, vec![pos_1]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok());
let proof = pmmr.merkle_proof(2).unwrap();
assert_eq!(proof.path, vec![pos_0]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 2).is_ok());
// three leaves, two peaks (one also the right-most leaf)
pmmr.push(&elems[2]).unwrap();
let pos_3 = elems[2].hash_with_index(3);
assert_eq!(pmmr.get_hash(4).unwrap(), pos_3);
assert_eq!(pmmr.root().unwrap(), (pos_2, pos_3).hash_with_index(4));
assert_eq!(pmmr.peaks(), [pos_2, pos_3]);
let proof = pmmr.merkle_proof(1).unwrap();
assert_eq!(proof.path, vec![pos_1, pos_3]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok());
let proof = pmmr.merkle_proof(2).unwrap();
assert_eq!(proof.path, vec![pos_0, pos_3]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 2).is_ok());
let proof = pmmr.merkle_proof(4).unwrap();
assert_eq!(proof.path, vec![pos_2]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[2], 4).is_ok());
// 7 leaves, 3 peaks, 11 pos in total
pmmr.push(&elems[3]).unwrap();
let pos_4 = elems[3].hash_with_index(4);
assert_eq!(pmmr.get_hash(5).unwrap(), pos_4);
let pos_5 = (pos_3, pos_4).hash_with_index(5);
assert_eq!(pmmr.get_hash(6).unwrap(), pos_5);
let pos_6 = (pos_2, pos_5).hash_with_index(6);
assert_eq!(pmmr.get_hash(7).unwrap(), pos_6);
pmmr.push(&elems[4]).unwrap();
let pos_7 = elems[4].hash_with_index(7);
assert_eq!(pmmr.get_hash(8).unwrap(), pos_7);
pmmr.push(&elems[5]).unwrap();
let pos_8 = elems[5].hash_with_index(8);
assert_eq!(pmmr.get_hash(9).unwrap(), pos_8);
let pos_9 = (pos_7, pos_8).hash_with_index(9);
assert_eq!(pmmr.get_hash(10).unwrap(), pos_9);
pmmr.push(&elems[6]).unwrap();
let pos_10 = elems[6].hash_with_index(10);
assert_eq!(pmmr.get_hash(11).unwrap(), pos_10);
assert_eq!(pmmr.unpruned_size(), 11);
let proof = pmmr.merkle_proof(1).unwrap();
assert_eq!(
proof.path,
vec![pos_1, pos_5, (pos_9, pos_10).hash_with_index(11)]
);
assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok());
let proof = pmmr.merkle_proof(2).unwrap();
assert_eq!(
proof.path,
vec![pos_0, pos_5, (pos_9, pos_10).hash_with_index(11)]
);
assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 2).is_ok());
let proof = pmmr.merkle_proof(4).unwrap();
assert_eq!(
proof.path,
vec![pos_4, pos_2, (pos_9, pos_10).hash_with_index(11)]
);
assert!(proof.verify(pmmr.root().unwrap(), &elems[2], 4).is_ok());
let proof = pmmr.merkle_proof(5).unwrap();
assert_eq!(
proof.path,
vec![pos_3, pos_2, (pos_9, pos_10).hash_with_index(11)]
);
assert!(proof.verify(pmmr.root().unwrap(), &elems[3], 5).is_ok());
let proof = pmmr.merkle_proof(8).unwrap();
assert_eq!(proof.path, vec![pos_8, pos_10, pos_6]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[4], 8).is_ok());
let proof = pmmr.merkle_proof(9).unwrap();
assert_eq!(proof.path, vec![pos_7, pos_10, pos_6]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[5], 9).is_ok());
let proof = pmmr.merkle_proof(11).unwrap();
assert_eq!(proof.path, vec![pos_9, pos_6]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[6], 11).is_ok());
}
| 31.910526 | 77 | 0.672769 |
0eb09a44011d2ffea6e2e4958beab2db1b4afb87
| 8,809 |
//! Poem is a full-featured and easy-to-use web framework with the Rust
//! programming language.
//!
//! # Table of contents
//!
//! - [Quickstart](#quickstart)
//! - [Endpoint](#endpoint)
//! - [Extractors](#extractors)
//! - [Routing](#routing)
//! - [Responses](#responses)
//! - [Handling errors](#handling-errors)
//! - [Middleware](#middleware)
//! - [Crate features](#crate-features)
//!
//! # Quickstart
//!
//! ```no_run
//! use poem::{get, handler, listener::TcpListener, web::Path, IntoResponse, Route, Server};
//!
//! #[handler]
//! fn hello(Path(name): Path<String>) -> String {
//! format!("hello: {}", name)
//! }
//!
//! #[tokio::main]
//! async fn main() -> Result<(), std::io::Error> {
//! let app = Route::new().at("/hello/:name", get(hello));
//! Server::new(TcpListener::bind("127.0.0.1:3000"))
//! .run(app)
//! .await
//! }
//! ```
//!
//! # Endpoint
//!
//! The [`Endpoint`] trait represents a type that can handle HTTP requests, and
//! it returns the `Result<T: IntoResponse, Error>` type.
//!
//! The [`handler`] macro is used to convert a function into an endpoint.
//!
//! ```
//! use poem::{error::NotFoundError, handler, Endpoint, Request, Result};
//!
//! #[handler]
//! fn return_str() -> &'static str {
//! "hello"
//! }
//!
//! #[handler]
//! fn return_err() -> Result<&'static str, NotFoundError> {
//! Err(NotFoundError)
//! }
//!
//! # tokio::runtime::Runtime::new().unwrap().block_on(async {
//! let resp = return_str.call(Request::default()).await.unwrap();
//! assert_eq!(resp.into_body().into_string().await.unwrap(), "hello");
//!
//! let err = return_err.call(Request::default()).await.unwrap_err();
//! assert!(err.is::<NotFoundError>());
//! # });
//! ```
//!
//! # Extractors
//!
//! The extractor is used to extract something from the HTTP request.
//!
//! `Poem` provides some commonly used extractors for extracting something from
//! HTTP requests.
//!
//! In the following example, the `index` function uses 3 extractors to extract
//! the remote address, HTTP method and URI.
//!
//! ```rust
//! use poem::{
//! handler,
//! http::{Method, Uri},
//! web::RemoteAddr,
//! };
//!
//! #[handler]
//! fn index(remote_addr: &RemoteAddr, method: Method, uri: &Uri) {}
//! ```
//!
//! By default, the extractor will return a `400 Bad Request` when an error
//! occurs, but sometimes you may want to change this behavior, so you can
//! handle the error yourself.
//!
//! In the following example, when the [`Query`](web::Query) extractor fails, it
//! will return a `503 Internal Server` response and the reason for the error.
//!
//! ```
//! use poem::{
//! error::ParseQueryError, handler, http::StatusCode, web::Query, IntoResponse, Response,
//! Result,
//! };
//! use serde::Deserialize;
//!
//! #[derive(Debug, Deserialize)]
//! struct Params {
//! name: String,
//! }
//!
//! #[handler]
//! fn index(res: Result<Query<Params>>) -> Result<impl IntoResponse> {
//! match res {
//! Ok(Query(params)) => Ok(params.name.into_response()),
//! Err(err) if err.is::<ParseQueryError>() => Ok(Response::builder()
//! .status(StatusCode::INTERNAL_SERVER_ERROR)
//! .body(err.to_string())),
//! Err(err) => Err(err),
//! }
//! }
//! ```
//!
//! # Routing
//!
//! There are three available routes.
//!
//! - [`Route`] Routing for path
//! - [`RouteDomain`] Routing for domain
//! - [`RouteMethod`] Routing for HTTP method
//!
//! ```
//! use poem::{get, handler, post, web::Path, Route};
//!
//! #[handler]
//! async fn get_user(id: Path<String>) {}
//!
//! #[handler]
//! async fn delete_user(id: Path<String>) {}
//!
//! #[handler]
//! async fn create_user() {}
//!
//! let app = Route::new()
//! .at("/user/:id", get(get_user).delete(delete_user))
//! .at("/user", post(create_user));
//! ```
//!
//! You can create custom extractors, see also [`FromRequest`].
//!
//! # Responses
//!
//! All types that can be converted to HTTP response [`Response`] should
//! implement [`IntoResponse`].
//!
//! In the following example, the `string_response` and `status_response`
//! functions return the `String` and `StatusCode` types, because `Poem` has
//! implemented the [`IntoResponse`] trait for them.
//!
//! The `no_response` function does not return a value. We can think that
//! its return type is `()`, and `Poem` also implements [`IntoResponse`] for
//! `()`, which is always converted to `200 OK`.
//!
//! The `result_response` function returns a `Result` type, which means that an
//! error may occur.
//! ```
//! use poem::{handler, http::StatusCode, Result};
//!
//! #[handler]
//! fn string_response() -> String {
//! todo!()
//! }
//!
//! #[handler]
//! fn status_response() -> StatusCode {
//! todo!()
//! }
//!
//! #[handler]
//! fn no_response() {}
//!
//! #[handler]
//! fn result_response() -> Result<String> {
//! todo!()
//! }
//! ```
//!
//! # Handling errors
//!
//! The following example returns customized content when
//! [`NotFoundError`](error::NotFoundError) occurs.
//!
//! ```
//! use poem::{
//! error::NotFoundError, handler, http::StatusCode, EndpointExt, IntoResponse, Response, Route,
//! };
//!
//! #[handler]
//! fn foo() {}
//!
//! #[handler]
//! fn bar() {}
//!
//! let app =
//! Route::new()
//! .at("/foo", foo)
//! .at("/bar", bar)
//! .catch_error(|err: NotFoundError| async move {
//! Response::builder()
//! .status(StatusCode::NOT_FOUND)
//! .body("custom not found")
//! });
//! ```
//!
//! # Middleware
//!
//! You can call the [`with`](EndpointExt::with) method on the [`Endpoint`] to
//! apply a middleware to an endpoint. It actually converts the original
//! endpoint to a new endpoint.
//! ```
//! use poem::{handler, middleware::Tracing, EndpointExt, Route};
//!
//! #[handler]
//! fn index() {}
//!
//! let app = Route::new().at("/", index).with(Tracing);
//! ```
//!
//! You can create your own middleware, see also [`Middleware`].
//!
//! # Crate features
//!
//! To avoid compiling unused dependencies, `Poem` gates certain features, all
//! of which are disabled by default:
//!
//! |Feature |Description |
//! |------------------|--------------------------------|
//! |compression | Support decompress request body and compress response body |
//! |cookie | Support for Cookie |
//! |csrf | Support for Cross-Site Request Forgery (CSRF) protection |
//! |multipart | Support for Multipart |
//! |native-tls | Support for HTTP server over TLS with [`native-tls`](https://crates.io/crates/native-tls) |
//! |opentelemetry | Support for opentelemetry |
//! |prometheus | Support for Prometheus |
//! |redis-session | Support for RedisSession |
//! |rustls | Support for HTTP server over TLS with [`rustls`](https://crates.io/crates/rustls) |
//! |session | Support for session |
//! |sse | Support Server-Sent Events (SSE) |
//! |tempfile | Support for [`tempfile`](https://crates.io/crates/tempfile) |
//! |tower-compat | Adapters for `tower::Layer` and `tower::Service`. |
//! |websocket | Support for WebSocket |
//! | anyhow | Integrate with the [`anyhow`](https://crates.io/crates/anyhow) crate. |
//! | i18n | Support for internationalization |
#![doc(html_favicon_url = "https://raw.githubusercontent.com/poem-web/poem/master/favicon.ico")]
#![doc(html_logo_url = "https://raw.githubusercontent.com/poem-web/poem/master/logo.png")]
#![forbid(unsafe_code)]
#![deny(private_in_public, unreachable_pub)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(missing_docs)]
pub mod endpoint;
pub mod error;
#[cfg(feature = "i18n")]
#[cfg_attr(docsrs, doc(cfg(feature = "i18n")))]
pub mod i18n;
pub mod listener;
pub mod middleware;
#[cfg(feature = "session")]
#[cfg_attr(docsrs, doc(cfg(feature = "session")))]
pub mod session;
#[cfg(feature = "test")]
#[cfg_attr(docsrs, doc(cfg(feature = "test")))]
pub mod test;
pub mod web;
#[doc(inline)]
pub use http;
mod addr;
mod body;
mod request;
mod response;
mod route;
mod server;
pub use addr::Addr;
pub use async_trait::async_trait;
pub use body::Body;
pub use endpoint::{Endpoint, EndpointExt, IntoEndpoint};
pub use error::{Error, Result};
pub use middleware::Middleware;
pub use poem_derive::handler;
pub use request::{OnUpgrade, Request, RequestBuilder, RequestParts, Upgraded};
pub use response::{Response, ResponseBuilder, ResponseParts};
pub use route::{
connect, delete, get, head, options, patch, post, put, trace, Route, RouteDomain, RouteMethod,
};
pub use server::Server;
pub use web::{FromRequest, IntoResponse, RequestBody};
| 30.167808 | 117 | 0.602452 |
e69dfaa16f3e1088bef46872c6fc21213c305ce2
| 501 |
use lazy_static::lazy_static;
pub use lighthouse_metrics::*;
lazy_static! {
pub static ref ATTESTATION_PREV_EPOCH_PACKING_TIME: Result<Histogram> = try_create_histogram(
"op_pool_attestation_prev_epoch_packing_time",
"Time to pack previous epoch attestations"
);
pub static ref ATTESTATION_CURR_EPOCH_PACKING_TIME: Result<Histogram> = try_create_histogram(
"op_pool_attestation_curr_epoch_packing_time",
"Time to pack current epoch attestations"
);
}
| 33.4 | 97 | 0.756487 |
29ffcdc7f5e3ce46ad829fbff551d59a6e8bc70d
| 13,251 |
#[doc = "Reader of register CH8_INTERACT"]
pub type R = crate::R<u32, super::CH8_INTERACT>;
#[doc = "Writer for register CH8_INTERACT"]
pub type W = crate::W<u32, super::CH8_INTERACT>;
#[doc = "Register CH8_INTERACT `reset()`'s with value 0"]
impl crate::ResetValue for super::CH8_INTERACT {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type { 0 }
}
#[doc = "Reader of field `THRES`"]
pub type THRES_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `THRES`"]
pub struct THRES_W<'a> {
w: &'a mut W,
}
impl<'a> THRES_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0fff) | ((value as u32) & 0x0fff);
self.w
}
}
#[doc = "Select Sample Mode\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum SAMPLE_A {
#[doc = "0: Counter output will be used in evaluation"]
ACMPCOUNT = 0,
#[doc = "1: ACMP output will be used in evaluation"]
ACMP = 1,
#[doc = "2: ADC output will be used in evaluation"]
ADC = 2,
#[doc = "3: Differential ADC output will be used in evaluation"]
ADCDIFF = 3,
}
impl From<SAMPLE_A> for u8 {
#[inline(always)]
fn from(variant: SAMPLE_A) -> Self { variant as _ }
}
#[doc = "Reader of field `SAMPLE`"]
pub type SAMPLE_R = crate::R<u8, SAMPLE_A>;
impl SAMPLE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SAMPLE_A {
match self.bits {
0 => SAMPLE_A::ACMPCOUNT,
1 => SAMPLE_A::ACMP,
2 => SAMPLE_A::ADC,
3 => SAMPLE_A::ADCDIFF,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `ACMPCOUNT`"]
#[inline(always)]
pub fn is_acmpcount(&self) -> bool { *self == SAMPLE_A::ACMPCOUNT }
#[doc = "Checks if the value of the field is `ACMP`"]
#[inline(always)]
pub fn is_acmp(&self) -> bool { *self == SAMPLE_A::ACMP }
#[doc = "Checks if the value of the field is `ADC`"]
#[inline(always)]
pub fn is_adc(&self) -> bool { *self == SAMPLE_A::ADC }
#[doc = "Checks if the value of the field is `ADCDIFF`"]
#[inline(always)]
pub fn is_adcdiff(&self) -> bool { *self == SAMPLE_A::ADCDIFF }
}
#[doc = "Write proxy for field `SAMPLE`"]
pub struct SAMPLE_W<'a> {
w: &'a mut W,
}
impl<'a> SAMPLE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SAMPLE_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Counter output will be used in evaluation"]
#[inline(always)]
pub fn acmpcount(self) -> &'a mut W { self.variant(SAMPLE_A::ACMPCOUNT) }
#[doc = "ACMP output will be used in evaluation"]
#[inline(always)]
pub fn acmp(self) -> &'a mut W { self.variant(SAMPLE_A::ACMP) }
#[doc = "ADC output will be used in evaluation"]
#[inline(always)]
pub fn adc(self) -> &'a mut W { self.variant(SAMPLE_A::ADC) }
#[doc = "Differential ADC output will be used in evaluation"]
#[inline(always)]
pub fn adcdiff(self) -> &'a mut W { self.variant(SAMPLE_A::ADCDIFF) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 12)) | (((value as u32) & 0x03) << 12);
self.w
}
}
#[doc = "Enable Interrupt Generation\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum SETIF_A {
#[doc = "0: No interrupt is generated"]
NONE = 0,
#[doc = "1: Set interrupt flag if the sensor triggers."]
LEVEL = 1,
#[doc = "2: Set interrupt flag on positive edge of the sensor state"]
POSEDGE = 2,
#[doc = "3: Set interrupt flag on negative edge of the sensor state"]
NEGEDGE = 3,
#[doc = "4: Set interrupt flag on both edges of the sensor state"]
BOTHEDGES = 4,
}
impl From<SETIF_A> for u8 {
#[inline(always)]
fn from(variant: SETIF_A) -> Self { variant as _ }
}
#[doc = "Reader of field `SETIF`"]
pub type SETIF_R = crate::R<u8, SETIF_A>;
impl SETIF_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, SETIF_A> {
use crate::Variant::*;
match self.bits {
0 => Val(SETIF_A::NONE),
1 => Val(SETIF_A::LEVEL),
2 => Val(SETIF_A::POSEDGE),
3 => Val(SETIF_A::NEGEDGE),
4 => Val(SETIF_A::BOTHEDGES),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `NONE`"]
#[inline(always)]
pub fn is_none(&self) -> bool { *self == SETIF_A::NONE }
#[doc = "Checks if the value of the field is `LEVEL`"]
#[inline(always)]
pub fn is_level(&self) -> bool { *self == SETIF_A::LEVEL }
#[doc = "Checks if the value of the field is `POSEDGE`"]
#[inline(always)]
pub fn is_posedge(&self) -> bool { *self == SETIF_A::POSEDGE }
#[doc = "Checks if the value of the field is `NEGEDGE`"]
#[inline(always)]
pub fn is_negedge(&self) -> bool { *self == SETIF_A::NEGEDGE }
#[doc = "Checks if the value of the field is `BOTHEDGES`"]
#[inline(always)]
pub fn is_bothedges(&self) -> bool { *self == SETIF_A::BOTHEDGES }
}
#[doc = "Write proxy for field `SETIF`"]
pub struct SETIF_W<'a> {
w: &'a mut W,
}
impl<'a> SETIF_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SETIF_A) -> &'a mut W { unsafe { self.bits(variant.into()) } }
#[doc = "No interrupt is generated"]
#[inline(always)]
pub fn none(self) -> &'a mut W { self.variant(SETIF_A::NONE) }
#[doc = "Set interrupt flag if the sensor triggers."]
#[inline(always)]
pub fn level(self) -> &'a mut W { self.variant(SETIF_A::LEVEL) }
#[doc = "Set interrupt flag on positive edge of the sensor state"]
#[inline(always)]
pub fn posedge(self) -> &'a mut W { self.variant(SETIF_A::POSEDGE) }
#[doc = "Set interrupt flag on negative edge of the sensor state"]
#[inline(always)]
pub fn negedge(self) -> &'a mut W { self.variant(SETIF_A::NEGEDGE) }
#[doc = "Set interrupt flag on both edges of the sensor state"]
#[inline(always)]
pub fn bothedges(self) -> &'a mut W { self.variant(SETIF_A::BOTHEDGES) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 14)) | (((value as u32) & 0x07) << 14);
self.w
}
}
#[doc = "Set GPIO Mode\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum EXMODE_A {
#[doc = "0: Disabled"]
DISABLE = 0,
#[doc = "1: Push Pull, GPIO is driven high"]
HIGH = 1,
#[doc = "2: Push Pull, GPIO is driven low"]
LOW = 2,
#[doc = "3: VDAC output"]
DACOUT = 3,
}
impl From<EXMODE_A> for u8 {
#[inline(always)]
fn from(variant: EXMODE_A) -> Self { variant as _ }
}
#[doc = "Reader of field `EXMODE`"]
pub type EXMODE_R = crate::R<u8, EXMODE_A>;
impl EXMODE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EXMODE_A {
match self.bits {
0 => EXMODE_A::DISABLE,
1 => EXMODE_A::HIGH,
2 => EXMODE_A::LOW,
3 => EXMODE_A::DACOUT,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `DISABLE`"]
#[inline(always)]
pub fn is_disable(&self) -> bool { *self == EXMODE_A::DISABLE }
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool { *self == EXMODE_A::HIGH }
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool { *self == EXMODE_A::LOW }
#[doc = "Checks if the value of the field is `DACOUT`"]
#[inline(always)]
pub fn is_dacout(&self) -> bool { *self == EXMODE_A::DACOUT }
}
#[doc = "Write proxy for field `EXMODE`"]
pub struct EXMODE_W<'a> {
w: &'a mut W,
}
impl<'a> EXMODE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EXMODE_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Disabled"]
#[inline(always)]
pub fn disable(self) -> &'a mut W { self.variant(EXMODE_A::DISABLE) }
#[doc = "Push Pull, GPIO is driven high"]
#[inline(always)]
pub fn high(self) -> &'a mut W { self.variant(EXMODE_A::HIGH) }
#[doc = "Push Pull, GPIO is driven low"]
#[inline(always)]
pub fn low(self) -> &'a mut W { self.variant(EXMODE_A::LOW) }
#[doc = "VDAC output"]
#[inline(always)]
pub fn dacout(self) -> &'a mut W { self.variant(EXMODE_A::DACOUT) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 17)) | (((value as u32) & 0x03) << 17);
self.w
}
}
#[doc = "Reader of field `EXCLK`"]
pub type EXCLK_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EXCLK`"]
pub struct EXCLK_W<'a> {
w: &'a mut W,
}
impl<'a> EXCLK_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W { self.bit(true) }
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W { self.bit(false) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19);
self.w
}
}
#[doc = "Reader of field `SAMPLECLK`"]
pub type SAMPLECLK_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SAMPLECLK`"]
pub struct SAMPLECLK_W<'a> {
w: &'a mut W,
}
impl<'a> SAMPLECLK_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W { self.bit(true) }
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W { self.bit(false) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20);
self.w
}
}
#[doc = "Reader of field `ALTEX`"]
pub type ALTEX_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ALTEX`"]
pub struct ALTEX_W<'a> {
w: &'a mut W,
}
impl<'a> ALTEX_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W { self.bit(true) }
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W { self.bit(false) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 21)) | (((value as u32) & 0x01) << 21);
self.w
}
}
impl R {
#[doc = "Bits 0:11 - ACMP Threshold or VDAC Data"]
#[inline(always)]
pub fn thres(&self) -> THRES_R { THRES_R::new((self.bits & 0x0fff) as u16) }
#[doc = "Bits 12:13 - Select Sample Mode"]
#[inline(always)]
pub fn sample(&self) -> SAMPLE_R { SAMPLE_R::new(((self.bits >> 12) & 0x03) as u8) }
#[doc = "Bits 14:16 - Enable Interrupt Generation"]
#[inline(always)]
pub fn setif(&self) -> SETIF_R { SETIF_R::new(((self.bits >> 14) & 0x07) as u8) }
#[doc = "Bits 17:18 - Set GPIO Mode"]
#[inline(always)]
pub fn exmode(&self) -> EXMODE_R { EXMODE_R::new(((self.bits >> 17) & 0x03) as u8) }
#[doc = "Bit 19 - Select Clock Used for Excitation Timing"]
#[inline(always)]
pub fn exclk(&self) -> EXCLK_R { EXCLK_R::new(((self.bits >> 19) & 0x01) != 0) }
#[doc = "Bit 20 - Select Clock Used for Timing of Sample Delay"]
#[inline(always)]
pub fn sampleclk(&self) -> SAMPLECLK_R { SAMPLECLK_R::new(((self.bits >> 20) & 0x01) != 0) }
#[doc = "Bit 21 - Use Alternative Excite Pin"]
#[inline(always)]
pub fn altex(&self) -> ALTEX_R { ALTEX_R::new(((self.bits >> 21) & 0x01) != 0) }
}
impl W {
#[doc = "Bits 0:11 - ACMP Threshold or VDAC Data"]
#[inline(always)]
pub fn thres(&mut self) -> THRES_W { THRES_W { w: self } }
#[doc = "Bits 12:13 - Select Sample Mode"]
#[inline(always)]
pub fn sample(&mut self) -> SAMPLE_W { SAMPLE_W { w: self } }
#[doc = "Bits 14:16 - Enable Interrupt Generation"]
#[inline(always)]
pub fn setif(&mut self) -> SETIF_W { SETIF_W { w: self } }
#[doc = "Bits 17:18 - Set GPIO Mode"]
#[inline(always)]
pub fn exmode(&mut self) -> EXMODE_W { EXMODE_W { w: self } }
#[doc = "Bit 19 - Select Clock Used for Excitation Timing"]
#[inline(always)]
pub fn exclk(&mut self) -> EXCLK_W { EXCLK_W { w: self } }
#[doc = "Bit 20 - Select Clock Used for Timing of Sample Delay"]
#[inline(always)]
pub fn sampleclk(&mut self) -> SAMPLECLK_W { SAMPLECLK_W { w: self } }
#[doc = "Bit 21 - Use Alternative Excite Pin"]
#[inline(always)]
pub fn altex(&mut self) -> ALTEX_W { ALTEX_W { w: self } }
}
| 36.604972 | 96 | 0.575957 |
1af92f6281383ccef9c26f6145be39bf27a4821d
| 793 |
use std::{
fmt::{
Display,
Formatter,
Result,
},
ops::{
Add,
AddAssign,
Sub,
SubAssign,
},
};
#[derive(PartialEq, PartialOrd)]
pub struct Sugar(pub isize);
impl Add for Sugar {
type Output = isize;
fn add(self, rhs: Self) -> Self::Output {
self.0 + rhs.0
}
}
impl AddAssign for Sugar {
fn add_assign(&mut self, rhs: Self) {
self.0 += rhs.0;
}
}
impl Default for Sugar {
fn default() -> Self {
Self(0)
}
}
impl Display for Sugar {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
write!(f, "{}", self.0)
}
}
impl Sub for Sugar {
type Output = isize;
fn sub(self, rhs: Self) -> Self::Output {
self.0 - rhs.0
}
}
impl SubAssign for Sugar {
fn sub_assign(&mut self, rhs: Self) {
self.0 -= rhs.0;
}
}
| 13.912281 | 50 | 0.558638 |
0329fb0db1422e61479174abaa09f7d211261155
| 5,389 |
use self::WhichLine::*;
use std::fmt;
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::path::Path;
use std::str::FromStr;
#[derive(Clone, Debug, PartialEq)]
pub enum ErrorKind {
Help,
Error,
Note,
Suggestion,
Warning,
}
impl FromStr for ErrorKind {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
let s = s.to_uppercase();
let part0: &str = s.split(':').next().unwrap();
match part0 {
"HELP" => Ok(ErrorKind::Help),
"ERROR" => Ok(ErrorKind::Error),
"NOTE" => Ok(ErrorKind::Note),
"SUGGESTION" => Ok(ErrorKind::Suggestion),
"WARN" | "WARNING" => Ok(ErrorKind::Warning),
_ => Err(()),
}
}
}
impl fmt::Display for ErrorKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
ErrorKind::Help => write!(f, "help message"),
ErrorKind::Error => write!(f, "error"),
ErrorKind::Note => write!(f, "note"),
ErrorKind::Suggestion => write!(f, "suggestion"),
ErrorKind::Warning => write!(f, "warning"),
}
}
}
#[derive(Debug)]
pub struct Error {
pub line_num: usize,
/// What kind of message we expect (e.g., warning, error, suggestion).
/// `None` if not specified or unknown message kind.
pub kind: Option<ErrorKind>,
pub msg: String,
}
#[derive(PartialEq, Debug)]
enum WhichLine {
ThisLine,
FollowPrevious(usize),
AdjustBackward(usize),
}
/// Looks for either "//~| KIND MESSAGE" or "//~^^... KIND MESSAGE"
/// The former is a "follow" that inherits its target from the preceding line;
/// the latter is an "adjusts" that goes that many lines up.
///
/// Goal is to enable tests both like: //~^^^ ERROR go up three
/// and also //~^ ERROR message one for the preceding line, and
/// //~| ERROR message two for that same line.
///
/// If cfg is not None (i.e., in an incremental test), then we look
/// for `//[X]~` instead, where `X` is the current `cfg`.
pub fn load_errors(testfile: &Path, cfg: Option<&str>) -> Vec<Error> {
let rdr = BufReader::new(File::open(testfile).unwrap());
// `last_nonfollow_error` tracks the most recently seen
// line with an error template that did not use the
// follow-syntax, "//~| ...".
//
// (pnkfelix could not find an easy way to compose Iterator::scan
// and Iterator::filter_map to pass along this information into
// `parse_expected`. So instead I am storing that state here and
// updating it in the map callback below.)
let mut last_nonfollow_error = None;
let tag = match cfg {
Some(rev) => format!("//[{}]~", rev),
None => "//~".to_string(),
};
rdr.lines()
.enumerate()
.filter_map(|(line_num, line)| {
parse_expected(last_nonfollow_error, line_num + 1, &line.unwrap(), &tag).map(
|(which, error)| {
match which {
FollowPrevious(_) => {}
_ => last_nonfollow_error = Some(error.line_num),
}
error
},
)
})
.collect()
}
fn parse_expected(
last_nonfollow_error: Option<usize>,
line_num: usize,
line: &str,
tag: &str,
) -> Option<(WhichLine, Error)> {
let start = line.find(tag)?;
let (follow, adjusts) = if line[start + tag.len()..].chars().next().unwrap() == '|' {
(true, 0)
} else {
(
false,
line[start + tag.len()..]
.chars()
.take_while(|c| *c == '^')
.count(),
)
};
let kind_start = start + tag.len() + adjusts + (follow as usize);
let (kind, msg);
match line[kind_start..]
.split_whitespace()
.next()
.expect("Encountered unexpected empty comment")
.parse::<ErrorKind>()
{
Ok(k) => {
// If we find `//~ ERROR foo` or something like that:
kind = Some(k);
let letters = line[kind_start..].chars();
msg = letters
.skip_while(|c| c.is_whitespace())
.skip_while(|c| !c.is_whitespace())
.collect::<String>();
}
Err(_) => {
// Otherwise we found `//~ foo`:
kind = None;
let letters = line[kind_start..].chars();
msg = letters
.skip_while(|c| c.is_whitespace())
.collect::<String>();
}
}
let msg = msg.trim().to_owned();
let (which, line_num) = if follow {
assert_eq!(adjusts, 0, "use either //~| or //~^, not both.");
let line_num = last_nonfollow_error.expect(
"encountered //~| without \
preceding //~^ line.",
);
(FollowPrevious(line_num), line_num)
} else {
let which = if adjusts > 0 {
AdjustBackward(adjusts)
} else {
ThisLine
};
let line_num = line_num - adjusts;
(which, line_num)
};
debug!(
"line={} tag={:?} which={:?} kind={:?} msg={:?}",
line_num, tag, which, kind, msg
);
Some((
which,
Error {
line_num,
kind,
msg,
},
))
}
| 29.448087 | 89 | 0.515123 |
2fc09a29f9b63d9d9bce3232894e54dd4eb74e6c
| 4,197 |
use super::errs;
use std::ops::RangeInclusive;
use std::time::Duration;
/// Rules placed on the expected heartbeat interval via out-of-band rules of
/// engatement.
///
/// Please note that [`HeartbeatRule`] is marked with
/// `#[non_exhaustive]`, which future-proofs the enumeration type in case more
/// variants are added.
///
/// Please refer to specs. §4.3.5 for more information.
#[derive(Debug, Clone, Hash)]
#[non_exhaustive]
pub enum HeartbeatRule {
/// The acceptor requires a specific heartbeat interval, expressed as a
/// [`Duration`]. Please refer to specs. §4.3.5.1 for
/// more information.
Exact(Duration),
/// The acceptor requires the initiator to specify a heartbeat value within a
/// [`RangeInclusive`] of
/// [`Duration`s](Duration). Please refer to specs. §4.3.5.3 for
/// more information.
Range(RangeInclusive<Duration>),
/// The acceptor poses no restrictions on the heartbeat interval and the
/// initiator can choose any value. Please refer to specs. §4.3.5.3 for more
/// information.
Any,
}
impl HeartbeatRule {
/// Validates an initiator-provided heartbeat value according to the
/// heartbeat rule represented by `self`.
///
/// # Examples
///
/// Require exact matching with [`HeartbeatRule::Exact`](HeartbeatRule::Exact):
///
/// ```
/// use fefix::session::HeartbeatRule;
/// use std::time::Duration;
///
/// let rule = HeartbeatRule::Exact(Duration::from_secs(30));
/// assert!(rule.validate(&Duration::from_secs(60)).is_err());
/// assert!(rule.validate(&Duration::from_secs(20)).is_err());
/// assert!(rule.validate(&Duration::from_secs(30)).is_ok());
/// ```
///
/// Accepting any proposed heartbeat value with
/// [`HeartbeatRule::Any`](HeartbeatRule::Any):
///
/// ```
/// use fefix::session::HeartbeatRule;
/// use std::time::Duration;
///
/// let rule = HeartbeatRule::Any;
/// assert!(rule.validate(&Duration::from_secs(1000)).is_ok());
/// assert!(rule.validate(&Duration::from_secs(1)).is_ok());
/// ```
pub fn validate(&self, proposal: &Duration) -> std::result::Result<(), String> {
match self {
HeartbeatRule::Exact(expected) => {
if proposal == expected {
Ok(())
} else {
Err(errs::heartbeat_exact(expected.as_secs()))
}
}
HeartbeatRule::Range(range) => {
if range.contains(proposal) {
Ok(())
} else {
Err(errs::heartbeat_range(
range.start().as_secs(),
range.end().as_secs(),
))
}
}
HeartbeatRule::Any => {
if *proposal != Duration::from_secs(0) {
Ok(())
} else {
Err(errs::heartbeat_gt_0())
}
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn heartebeat_validation() {
let rule_exact_1 = HeartbeatRule::Exact(Duration::from_secs(1));
let rule_range_5_30 =
HeartbeatRule::Range(Duration::from_secs(5)..=Duration::from_secs(30));
let rule_any = HeartbeatRule::Any;
assert!(rule_exact_1.validate(&Duration::from_secs(1)).is_ok());
assert!(!rule_exact_1.validate(&Duration::from_secs(2)).is_ok());
assert!(!rule_exact_1.validate(&Duration::from_secs(0)).is_ok());
assert!(rule_range_5_30.validate(&Duration::from_secs(5)).is_ok());
assert!(rule_range_5_30.validate(&Duration::from_secs(10)).is_ok());
assert!(rule_range_5_30.validate(&Duration::from_secs(30)).is_ok());
assert!(!rule_range_5_30.validate(&Duration::from_secs(0)).is_ok());
assert!(!rule_range_5_30.validate(&Duration::from_secs(4)).is_ok());
assert!(!rule_range_5_30.validate(&Duration::from_secs(60)).is_ok());
assert!(rule_any.validate(&Duration::from_secs(1)).is_ok());
assert!(!rule_any.validate(&Duration::from_secs(0)).is_ok());
}
}
| 37.141593 | 84 | 0.578747 |
140d4c89016633a9334c8bcfb38925721a987814
| 966 |
use super::{Compose, Identity, Invert, Iso, PartialLens};
/// The supertype of all prism families.
pub trait Prism: PartialLens
where Self::AtInitial: Prism,
Self::AtFinal: Prism
{
fn inject(&self, v: Self::FinalTarget) -> Self::FinalSource;
}
impl<S, T> Prism for Identity<S, T> {
#[inline]
fn inject(&self, v: T) -> T {
v
}
}
impl<LF: Prism, LS: ?Sized> Prism for Compose<LF, LS>
where LS: Prism<InitialTarget = LF::InitialSource, FinalTarget = LF::FinalSource>,
LF::AtInitial: Prism,
LF::AtFinal: Prism,
LS::AtInitial: Prism,
LS::AtFinal: Prism
{
fn inject(&self, v: Self::FinalTarget) -> Self::FinalSource {
self.second.inject(self.first.inject(v))
}
}
impl<L: Iso> Prism for Invert<L>
where L::AtInitial: Iso,
L::AtFinal: Iso
{
#[inline]
fn inject(&self, v: Self::FinalTarget) -> Self::FinalSource {
self.deinvert.get(v)
}
}
| 24.769231 | 86 | 0.598344 |
48277f03a1bc0f46614bbf8ef0cc8c4f3da24c30
| 2,671 |
//! Systems using rust native loader and freetype for rasterizing
use crate::config::{Config, TextStyle};
use crate::font::fontloader;
use crate::font::ftfont::FreeTypeFontImpl;
use crate::font::{
ftwrap, shape_with_harfbuzz, FallbackIdx, Font, FontSystem, GlyphInfo, NamedFont,
};
use failure::{format_err, Error};
use log::{debug, warn};
struct NamedFontImpl {
_lib: ftwrap::Library,
fonts: Vec<FreeTypeFontImpl>,
_fontdata: Vec<Vec<u8>>,
}
impl Drop for NamedFontImpl {
fn drop(&mut self) {
// Ensure that we drop the fonts before we drop the
// library, otherwise we will end up faulting
self.fonts.clear();
}
}
pub type FontSystemImpl = FontLoaderAndFreeType;
pub struct FontLoaderAndFreeType {}
impl FontLoaderAndFreeType {
pub fn new() -> Self {
Self {}
}
}
impl FontSystem for FontLoaderAndFreeType {
fn load_font(
&self,
config: &Config,
style: &TextStyle,
font_scale: f64,
) -> Result<Box<dyn NamedFont>, Error> {
let mut lib = ftwrap::Library::new()?;
// Some systems don't support this mode, so if it fails, we don't
// care to abort the rest of what we're doing
match lib.set_lcd_filter(ftwrap::FT_LcdFilter::FT_LCD_FILTER_DEFAULT) {
Ok(_) => (),
Err(err) => warn!("Ignoring: FT_LcdFilter failed: {:?}", err),
};
let mut fonts = Vec::new();
let mut fontdata = Vec::new();
// Clippy is dead wrong about this iterator being an identity_conversion
#[cfg_attr(feature = "cargo-clippy", allow(clippy::identity_conversion))]
for (data, idx) in fontloader::load_system_fonts(config, style)? {
debug!("want idx {} in bytes of len {}", idx, data.len());
let face = lib.new_face_from_slice(&data, idx.into())?;
fontdata.push(data);
fonts.push(FreeTypeFontImpl::with_face_size_and_dpi(
face,
config.font_size * font_scale,
config.dpi as u32,
)?);
}
Ok(Box::new(NamedFontImpl {
fonts,
_lib: lib,
_fontdata: fontdata,
}))
}
}
impl NamedFont for NamedFontImpl {
fn get_fallback(&mut self, idx: FallbackIdx) -> Result<&dyn Font, Error> {
self.fonts
.get(idx)
.map(|f| {
let f: &dyn Font = f;
f
})
.ok_or_else(|| format_err!("no fallback fonts available (idx={})", idx))
}
fn shape(&mut self, s: &str) -> Result<Vec<GlyphInfo>, Error> {
shape_with_harfbuzz(self, 0, s)
}
}
| 30.701149 | 85 | 0.5848 |
6a7a3b1d7d3f04ebbe00e9af3e6c9d68a6056d21
| 2,176 |
// Copyright (c) 2018-2022 The MobileCoin Foundation
mod autogenerated_code {
// Expose proto data types from included third-party/external proto files.
pub use mc_api::{blockchain, external};
pub use protobuf::well_known_types::Empty;
// Needed due to how to the auto-generated code references the Empty message.
pub mod empty {
pub use protobuf::well_known_types::Empty;
}
// Include the auto-generated code.
include!(concat!(env!("OUT_DIR"), "/protos-auto-gen/mod.rs"));
}
pub use autogenerated_code::*;
// These are needed for tests
impl Eq for report::Report {}
impl Eq for report::ReportResponse {}
impl From<report::Report> for mc_fog_report_types::Report {
fn from(mut src: report::Report) -> mc_fog_report_types::Report {
mc_fog_report_types::Report {
fog_report_id: src.take_fog_report_id(),
report: (&src.take_report()).into(),
pubkey_expiry: src.pubkey_expiry,
}
}
}
impl From<mc_fog_report_types::Report> for report::Report {
fn from(src: mc_fog_report_types::Report) -> report::Report {
let mut result = report::Report::new();
result.set_fog_report_id(src.fog_report_id);
result.set_report((&src.report).into());
result.set_pubkey_expiry(src.pubkey_expiry);
result
}
}
impl From<report::ReportResponse> for mc_fog_report_types::ReportResponse {
fn from(src: report::ReportResponse) -> Self {
Self {
// Note: this is out of order because get_chain is a borrow, but the
// iter below is a partial move.
chain: src.get_chain().into(),
reports: src.reports.into_iter().map(|x| x.into()).collect(),
signature: src.signature,
}
}
}
impl From<mc_fog_report_types::ReportResponse> for report::ReportResponse {
fn from(src: mc_fog_report_types::ReportResponse) -> Self {
let mut result = report::ReportResponse::new();
result.set_signature(src.signature);
result.set_chain(src.chain.into());
result.set_reports(src.reports.into_iter().map(|x| x.into()).collect());
result
}
}
| 34 | 81 | 0.653033 |
d67c1e498323f97acbf69b7157ea15cb8835c372
| 1,088 |
macro_rules! opt_leading_space {
($e:expr) => {
if let Some(ref e) = $e {
space!();
emit!(e);
}
};
}
macro_rules! opt {
($e:expr) => {{
if let Some(ref expr) = $e {
emit!(expr);
}
}};
}
macro_rules! emit {
($e:expr) => {{
::Node::emit_with(&$e, __cur_emitter!())?;
}};
}
macro_rules! keyword {
($span:expr, $s:expr) => {
__cur_emitter!().wr.write_keyword(Some($span), $s)?;
};
($s:expr) => {
__cur_emitter!().wr.write_keyword(None, $s)?;
};
}
macro_rules! punct {
(";") => {
__cur_emitter!().wr.write_semi()?;
};
($s:expr) => {
__cur_emitter!().wr.write_punct($s)?;
};
}
macro_rules! operator {
($s:expr) => {
__cur_emitter!().wr.write_operator($s)?;
};
}
macro_rules! space {
() => {
__cur_emitter!().wr.write_space()?;
};
}
macro_rules! formatting_space {
() => {
__cur_emitter!().wr.write_space()?;
};
}
macro_rules! semi {
() => {
punct!(";")
};
}
| 16.738462 | 60 | 0.455882 |
26229b3acc7cf330713303cd565e66fcc7b45aed
| 11,275 |
use std::ffi::OsString;
use std::fs::File;
use std::io;
use std::io::{BufRead, BufReader, BufWriter, Seek};
use std::path::Path;
use std::u32;
#[cfg(feature = "bmp")]
use bmp;
#[cfg(feature = "gif_codec")]
use gif;
#[cfg(feature = "hdr")]
use hdr;
#[cfg(feature = "ico")]
use ico;
#[cfg(feature = "jpeg")]
use jpeg;
#[cfg(feature = "png_codec")]
use png;
#[cfg(feature = "pnm")]
use pnm;
#[cfg(feature = "tga")]
use tga;
#[cfg(feature = "tiff")]
use tiff;
#[cfg(feature = "webp")]
use webp;
use color;
use image;
use dynimage::DynamicImage;
use image::{ImageDecoder, ImageFormat, ImageResult};
use ImageError;
/// Internal error type for guessing format from path.
pub(crate) enum PathError {
/// The extension did not fit a supported format.
UnknownExtension(OsString),
/// Extension could not be converted to `str`.
NoExtension,
}
pub(crate) fn open_impl(path: &Path) -> ImageResult<DynamicImage> {
let fin = match File::open(path) {
Ok(f) => f,
Err(err) => return Err(image::ImageError::IoError(err)),
};
let fin = BufReader::new(fin);
load(fin, ImageFormat::from_path(path)?)
}
/// Create a new image from a Reader
///
/// Try [`io::Reader`] for more advanced uses.
///
/// [`io::Reader`]: io/struct.Reader.html
pub fn load<R: BufRead + Seek>(r: R, format: ImageFormat) -> ImageResult<DynamicImage> {
#[allow(deprecated, unreachable_patterns)]
// Default is unreachable if all features are supported.
match format {
#[cfg(feature = "png_codec")]
image::ImageFormat::PNG => DynamicImage::from_decoder(png::PNGDecoder::new(r)?),
#[cfg(feature = "gif_codec")]
image::ImageFormat::GIF => DynamicImage::from_decoder(gif::Decoder::new(r)?),
#[cfg(feature = "jpeg")]
image::ImageFormat::JPEG => DynamicImage::from_decoder(jpeg::JPEGDecoder::new(r)?),
#[cfg(feature = "webp")]
image::ImageFormat::WEBP => DynamicImage::from_decoder(webp::WebpDecoder::new(r)?),
#[cfg(feature = "tiff")]
image::ImageFormat::TIFF => DynamicImage::from_decoder(tiff::TIFFDecoder::new(r)?),
#[cfg(feature = "tga")]
image::ImageFormat::TGA => DynamicImage::from_decoder(tga::TGADecoder::new(r)?),
#[cfg(feature = "bmp")]
image::ImageFormat::BMP => DynamicImage::from_decoder(bmp::BMPDecoder::new(r)?),
#[cfg(feature = "ico")]
image::ImageFormat::ICO => DynamicImage::from_decoder(ico::ICODecoder::new(r)?),
#[cfg(feature = "hdr")]
image::ImageFormat::HDR => DynamicImage::from_decoder(hdr::HDRAdapter::new(BufReader::new(r))?),
#[cfg(feature = "pnm")]
image::ImageFormat::PNM => DynamicImage::from_decoder(pnm::PNMDecoder::new(BufReader::new(r))?),
_ => Err(image::ImageError::UnsupportedError(format!(
"A decoder for {:?} is not available.",
format
))),
}
}
pub(crate) fn image_dimensions_impl(path: &Path) -> ImageResult<(u32, u32)> {
let format = image::ImageFormat::from_path(path)?;
let fin = File::open(path)?;
let fin = BufReader::new(fin);
image_dimensions_with_format_impl(fin, format)
}
pub(crate) fn image_dimensions_with_format_impl<R: BufRead + Seek>(fin: R, format: ImageFormat)
-> ImageResult<(u32, u32)>
{
#[allow(unreachable_patterns)]
// Default is unreachable if all features are supported.
let (w, h): (u64, u64) = match format {
#[cfg(feature = "jpeg")]
image::ImageFormat::JPEG => jpeg::JPEGDecoder::new(fin)?.dimensions(),
#[cfg(feature = "png_codec")]
image::ImageFormat::PNG => png::PNGDecoder::new(fin)?.dimensions(),
#[cfg(feature = "gif_codec")]
image::ImageFormat::GIF => gif::Decoder::new(fin)?.dimensions(),
#[cfg(feature = "webp")]
image::ImageFormat::WEBP => webp::WebpDecoder::new(fin)?.dimensions(),
#[cfg(feature = "tiff")]
image::ImageFormat::TIFF => tiff::TIFFDecoder::new(fin)?.dimensions(),
#[cfg(feature = "tga")]
image::ImageFormat::TGA => tga::TGADecoder::new(fin)?.dimensions(),
#[cfg(feature = "bmp")]
image::ImageFormat::BMP => bmp::BMPDecoder::new(fin)?.dimensions(),
#[cfg(feature = "ico")]
image::ImageFormat::ICO => ico::ICODecoder::new(fin)?.dimensions(),
#[cfg(feature = "hdr")]
image::ImageFormat::HDR => hdr::HDRAdapter::new(fin)?.dimensions(),
#[cfg(feature = "pnm")]
image::ImageFormat::PNM => {
pnm::PNMDecoder::new(fin)?.dimensions()
}
format => return Err(image::ImageError::UnsupportedError(format!(
"Image format image/{:?} is not supported.",
format
))),
};
if w >= u64::from(u32::MAX) || h >= u64::from(u32::MAX) {
return Err(image::ImageError::DimensionError);
}
Ok((w as u32, h as u32))
}
pub(crate) fn save_buffer_impl(
path: &Path,
buf: &[u8],
width: u32,
height: u32,
color: color::ColorType,
) -> io::Result<()> {
let fout = &mut BufWriter::new(File::create(path)?);
let ext = path.extension()
.and_then(|s| s.to_str())
.map_or("".to_string(), |s| s.to_ascii_lowercase());
match &*ext {
#[cfg(feature = "ico")]
"ico" => ico::ICOEncoder::new(fout).encode(buf, width, height, color),
#[cfg(feature = "jpeg")]
"jpg" | "jpeg" => jpeg::JPEGEncoder::new(fout).encode(buf, width, height, color),
#[cfg(feature = "png_codec")]
"png" => png::PNGEncoder::new(fout).encode(buf, width, height, color),
#[cfg(feature = "pnm")]
"pbm" => pnm::PNMEncoder::new(fout)
.with_subtype(pnm::PNMSubtype::Bitmap(pnm::SampleEncoding::Binary))
.encode(buf, width, height, color),
#[cfg(feature = "pnm")]
"pgm" => pnm::PNMEncoder::new(fout)
.with_subtype(pnm::PNMSubtype::Graymap(pnm::SampleEncoding::Binary))
.encode(buf, width, height, color),
#[cfg(feature = "pnm")]
"ppm" => pnm::PNMEncoder::new(fout)
.with_subtype(pnm::PNMSubtype::Pixmap(pnm::SampleEncoding::Binary))
.encode(buf, width, height, color),
#[cfg(feature = "pnm")]
"pam" => pnm::PNMEncoder::new(fout).encode(buf, width, height, color),
#[cfg(feature = "bmp")]
"bmp" => bmp::BMPEncoder::new(fout).encode(buf, width, height, color),
#[cfg(feature = "tiff")]
"tif" | "tiff" => tiff::TiffEncoder::new(fout).encode(buf, width, height, color)
.map_err(|e| io::Error::new(io::ErrorKind::Other, Box::new(e))), // FIXME: see https://github.com/image-rs/image/issues/921
format => Err(io::Error::new(
io::ErrorKind::InvalidInput,
&format!("Unsupported image format image/{:?}", format)[..],
)),
}
}
pub(crate) fn save_buffer_with_format_impl(
path: &Path,
buf: &[u8],
width: u32,
height: u32,
color: color::ColorType,
format: ImageFormat,
) -> io::Result<()> {
let fout = &mut BufWriter::new(File::create(path)?);
match format {
#[cfg(feature = "ico")]
image::ImageFormat::ICO => ico::ICOEncoder::new(fout).encode(buf, width, height, color),
#[cfg(feature = "jpeg")]
image::ImageFormat::JPEG => jpeg::JPEGEncoder::new(fout).encode(buf, width, height, color),
#[cfg(feature = "png_codec")]
image::ImageFormat::PNG => png::PNGEncoder::new(fout).encode(buf, width, height, color),
#[cfg(feature = "bmp")]
image::ImageFormat::BMP => bmp::BMPEncoder::new(fout).encode(buf, width, height, color),
#[cfg(feature = "tiff")]
image::ImageFormat::TIFF => tiff::TiffEncoder::new(fout)
.encode(buf, width, height, color)
.map_err(|e| io::Error::new(io::ErrorKind::Other, Box::new(e))),
_ => Err(io::Error::new(
io::ErrorKind::InvalidInput,
&format!("Unsupported image format image/{:?}", format)[..],
)),
}
}
/// Guess format from a path.
///
/// Returns `PathError::NoExtension` if the path has no extension or returns a
/// `PathError::UnknownExtension` containing the extension if it can not be convert to a `str`.
pub(crate) fn guess_format_from_path_impl(path: &Path) -> Result<ImageFormat, PathError> {
let exact_ext = path.extension();
let ext = exact_ext
.and_then(|s| s.to_str())
.map(str::to_ascii_lowercase);
let ext = ext.as_ref()
.map(String::as_str);
Ok(match ext {
Some("jpg") | Some("jpeg") => image::ImageFormat::JPEG,
Some("png") => image::ImageFormat::PNG,
Some("gif") => image::ImageFormat::GIF,
Some("webp") => image::ImageFormat::WEBP,
Some("tif") | Some("tiff") => image::ImageFormat::TIFF,
Some("tga") => image::ImageFormat::TGA,
Some("bmp") => image::ImageFormat::BMP,
Some("ico") => image::ImageFormat::ICO,
Some("hdr") => image::ImageFormat::HDR,
Some("pbm") | Some("pam") | Some("ppm") | Some("pgm") => image::ImageFormat::PNM,
// The original extension is used, instead of _format
_format => return match exact_ext {
None => Err(PathError::NoExtension),
Some(os) => Err(PathError::UnknownExtension(os.to_owned())),
},
})
}
static MAGIC_BYTES: [(&[u8], ImageFormat); 17] = [
(b"\x89PNG\r\n\x1a\n", ImageFormat::PNG),
(&[0xff, 0xd8, 0xff], ImageFormat::JPEG),
(b"GIF89a", ImageFormat::GIF),
(b"GIF87a", ImageFormat::GIF),
(b"RIFF", ImageFormat::WEBP), // TODO: better magic byte detection, see https://github.com/image-rs/image/issues/660
(b"MM\x00*", ImageFormat::TIFF),
(b"II*\x00", ImageFormat::TIFF),
(b"BM", ImageFormat::BMP),
(&[0, 0, 1, 0], ImageFormat::ICO),
(b"#?RADIANCE", ImageFormat::HDR),
(b"P1", ImageFormat::PNM),
(b"P2", ImageFormat::PNM),
(b"P3", ImageFormat::PNM),
(b"P4", ImageFormat::PNM),
(b"P5", ImageFormat::PNM),
(b"P6", ImageFormat::PNM),
(b"P7", ImageFormat::PNM),
];
/// Guess image format from memory block
///
/// Makes an educated guess about the image format based on the Magic Bytes at the beginning.
/// TGA is not supported by this function.
/// This is not to be trusted on the validity of the whole memory block
pub fn guess_format(buffer: &[u8]) -> ImageResult<ImageFormat> {
match guess_format_impl(buffer) {
Some(format) => Ok(format),
None => Err(image::ImageError::UnsupportedError(
"Unsupported image format".to_string(),
)),
}
}
pub(crate) fn guess_format_impl(buffer: &[u8]) -> Option<ImageFormat> {
for &(signature, format) in &MAGIC_BYTES {
if buffer.starts_with(signature) {
return Some(format);
}
}
None
}
impl From<PathError> for ImageError {
fn from(path: PathError) -> Self {
match path {
PathError::NoExtension => ImageError::UnsupportedError(
"Image format could not be recognized: no extension present".into()),
PathError::UnknownExtension(ext) => ImageError::UnsupportedError(format!(
"Image format image/{} is not recognized.", Path::new(&ext).display()))
}
}
}
| 37.70903 | 135 | 0.595743 |
18eed9999fa99d201c52020382e215f0e7d982db
| 809 |
#![deny(clippy::print_stdout, clippy::print_stderr)]
//! Contains functionality for dealing with projects.
mod command;
mod domain;
mod error;
mod meta_syntax;
mod plugin;
mod policy;
mod tmc_project_yml;
pub use self::command::{ExitStatus, Output, TmcCommand};
pub use self::domain::{
ExerciseDesc, ExercisePackagingConfiguration, RunResult, RunStatus, StyleValidationError,
StyleValidationResult, StyleValidationStrategy, TestDesc, TestResult,
};
pub use self::error::{CommandError, PopenError, TmcError};
pub use self::meta_syntax::{MetaString, MetaSyntaxParser};
pub use self::plugin::{Language, LanguagePlugin};
pub use self::policy::{
EverythingIsStudentFilePolicy, NothingIsStudentFilePolicy, StudentFilePolicy,
};
pub use self::tmc_project_yml::{PythonVer, TmcProjectYml};
pub use nom;
| 31.115385 | 93 | 0.78492 |
099811094eba8975c20b0272f9c6fcabec76b6b8
| 34 |
pub mod color;
pub mod primitive;
| 11.333333 | 18 | 0.764706 |
f9c4b164da2f511774cd87edf49fad13719d3cc9
| 184 |
use anyhow::Error;
use backup_app_rust::backup_opts::BackupOpts;
#[tokio::main]
async fn main() -> Result<(), Error> {
env_logger::init();
BackupOpts::process_args().await
}
| 18.4 | 45 | 0.679348 |
e59690e54abbb606c44d27b14b7278566b225fa4
| 439 |
use std::collections::HashMap;
use sdl2::render::Texture as SdlTexture;
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct AssetRef(pub(crate) u32);
pub(crate) struct GraphicsContext {
pub(crate) textures: HashMap<AssetRef, SdlTexture>,
pub(crate) counter: u32,
}
impl GraphicsContext {
pub(crate) fn new() -> Self {
Self {
textures: HashMap::new(),
counter: 0,
}
}
}
| 20.904762 | 55 | 0.621868 |
879584c35a5c71d37a11bade36b091df26a8a207
| 785 |
use cursive::theme::Color::{self, *};
use cursive::theme::PaletteColor::*;
use cursive::theme::{BorderStyle, Palette, Theme};
pub fn pallete_gen() -> Palette {
let mut p = Palette::default();
p[Background] = TerminalDefault;
p[Shadow] = TerminalDefault;
p[View] = TerminalDefault;
p[Primary] = TerminalDefault;
p[Secondary] = TerminalDefault;
p[Tertiary] = TerminalDefault;
p[TitlePrimary] = TerminalDefault;
p[Highlight] = TerminalDefault;
p[HighlightInactive] = TerminalDefault;
return p;
}
pub fn theme_gen() -> Theme {
let mut t = Theme::default();
t.shadow = false;
t.borders = BorderStyle::None;
t.palette = pallete_gen();
return t;
}
pub fn cursor_bg() -> Color {
Light(cursive::theme::BaseColor::Black)
}
| 25.322581 | 50 | 0.656051 |
ccddb46c8af48cf0aee233461ce9a574dfdd340b
| 60,223 |
// Generated from definition io.k8s.api.core.v1.PersistentVolumeClaim
/// PersistentVolumeClaim is a user's request for and claim to a persistent volume
#[derive(Clone, Debug, Default, PartialEq)]
pub struct PersistentVolumeClaim {
/// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
pub metadata: Option<crate::v1_8::apimachinery::pkg::apis::meta::v1::ObjectMeta>,
/// Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
pub spec: Option<crate::v1_8::api::core::v1::PersistentVolumeClaimSpec>,
/// Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
pub status: Option<crate::v1_8::api::core::v1::PersistentVolumeClaimStatus>,
}
// Begin /v1/PersistentVolumeClaim
// Generated from operation createCoreV1NamespacedPersistentVolumeClaim
impl PersistentVolumeClaim {
/// create a PersistentVolumeClaim
///
/// Use the returned [`crate::ResponseBody`]`<`[`CreateNamespacedPersistentVolumeClaimResponse`]`>` constructor, or [`CreateNamespacedPersistentVolumeClaimResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn create_namespaced_persistent_volume_claim(
namespace: &str,
body: &crate::v1_8::api::core::v1::PersistentVolumeClaim,
optional: CreateNamespacedPersistentVolumeClaimOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<CreateNamespacedPersistentVolumeClaimResponse>), crate::RequestError> {
let CreateNamespacedPersistentVolumeClaimOptional {
pretty,
} = optional;
let __url = format!("/api/v1/namespaces/{namespace}/persistentvolumeclaims?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::post(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`PersistentVolumeClaim::create_namespaced_persistent_volume_claim`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct CreateNamespacedPersistentVolumeClaimOptional<'a> {
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<CreateNamespacedPersistentVolumeClaimResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolumeClaim::create_namespaced_persistent_volume_claim`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum CreateNamespacedPersistentVolumeClaimResponse {
Ok(crate::v1_8::api::core::v1::PersistentVolumeClaim),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for CreateNamespacedPersistentVolumeClaimResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((CreateNamespacedPersistentVolumeClaimResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((CreateNamespacedPersistentVolumeClaimResponse::Other(result), read))
},
}
}
}
// Generated from operation deleteCoreV1CollectionNamespacedPersistentVolumeClaim
impl PersistentVolumeClaim {
/// delete collection of PersistentVolumeClaim
///
/// Use the returned [`crate::ResponseBody`]`<`[`DeleteCollectionNamespacedPersistentVolumeClaimResponse`]`>` constructor, or [`DeleteCollectionNamespacedPersistentVolumeClaimResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `delete_optional`
///
/// Delete options. Use `Default::default()` to not pass any.
///
/// * `list_optional`
///
/// List options. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_collection_namespaced_persistent_volume_claim(
namespace: &str,
delete_optional: crate::v1_8::DeleteOptional<'_>,
list_optional: crate::v1_8::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteCollectionNamespacedPersistentVolumeClaimResponse>), crate::RequestError> {
let __url = format!("/api/v1/namespaces/{namespace}/persistentvolumeclaims?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
list_optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::delete(__url);
let __body = serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<DeleteCollectionNamespacedPersistentVolumeClaimResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolumeClaim::delete_collection_namespaced_persistent_volume_claim`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum DeleteCollectionNamespacedPersistentVolumeClaimResponse {
OkStatus(crate::v1_8::apimachinery::pkg::apis::meta::v1::Status),
OkValue(crate::v1_8::api::core::v1::PersistentVolumeClaimList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for DeleteCollectionNamespacedPersistentVolumeClaimResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
let is_status = match result.get("kind") {
Some(serde_json::Value::String(s)) if s == "Status" => true,
_ => false,
};
if is_status {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteCollectionNamespacedPersistentVolumeClaimResponse::OkStatus(result), buf.len()))
}
else {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteCollectionNamespacedPersistentVolumeClaimResponse::OkValue(result), buf.len()))
}
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((DeleteCollectionNamespacedPersistentVolumeClaimResponse::Other(result), read))
},
}
}
}
// Generated from operation deleteCoreV1NamespacedPersistentVolumeClaim
impl PersistentVolumeClaim {
/// delete a PersistentVolumeClaim
///
/// Use the returned [`crate::ResponseBody`]`<`[`DeleteNamespacedPersistentVolumeClaimResponse`]`>` constructor, or [`DeleteNamespacedPersistentVolumeClaimResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolumeClaim
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_namespaced_persistent_volume_claim(
name: &str,
namespace: &str,
optional: crate::v1_8::DeleteOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteNamespacedPersistentVolumeClaimResponse>), crate::RequestError> {
let __url = format!("/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __request = http::Request::delete(__url);
let __body = serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<DeleteNamespacedPersistentVolumeClaimResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolumeClaim::delete_namespaced_persistent_volume_claim`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum DeleteNamespacedPersistentVolumeClaimResponse {
OkStatus(crate::v1_8::apimachinery::pkg::apis::meta::v1::Status),
OkValue(crate::v1_8::api::core::v1::PersistentVolumeClaim),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for DeleteNamespacedPersistentVolumeClaimResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
let is_status = match result.get("kind") {
Some(serde_json::Value::String(s)) if s == "Status" => true,
_ => false,
};
if is_status {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteNamespacedPersistentVolumeClaimResponse::OkStatus(result), buf.len()))
}
else {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteNamespacedPersistentVolumeClaimResponse::OkValue(result), buf.len()))
}
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((DeleteNamespacedPersistentVolumeClaimResponse::Other(result), read))
},
}
}
}
// Generated from operation listCoreV1NamespacedPersistentVolumeClaim
impl PersistentVolumeClaim {
/// list or watch objects of kind PersistentVolumeClaim
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`ListNamespacedPersistentVolumeClaimResponse`]`>` constructor, or [`ListNamespacedPersistentVolumeClaimResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_namespaced_persistent_volume_claim(
namespace: &str,
optional: crate::v1_8::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ListNamespacedPersistentVolumeClaimResponse>), crate::RequestError> {
let __url = format!("/api/v1/namespaces/{namespace}/persistentvolumeclaims?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<ListNamespacedPersistentVolumeClaimResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolumeClaim::list_namespaced_persistent_volume_claim`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ListNamespacedPersistentVolumeClaimResponse {
Ok(crate::v1_8::api::core::v1::PersistentVolumeClaimList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ListNamespacedPersistentVolumeClaimResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ListNamespacedPersistentVolumeClaimResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ListNamespacedPersistentVolumeClaimResponse::Other(result), read))
},
}
}
}
// Generated from operation listCoreV1PersistentVolumeClaimForAllNamespaces
impl PersistentVolumeClaim {
/// list or watch objects of kind PersistentVolumeClaim
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`ListPersistentVolumeClaimForAllNamespacesResponse`]`>` constructor, or [`ListPersistentVolumeClaimForAllNamespacesResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_persistent_volume_claim_for_all_namespaces(
optional: crate::v1_8::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ListPersistentVolumeClaimForAllNamespacesResponse>), crate::RequestError> {
let __url = "/api/v1/persistentvolumeclaims?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<ListPersistentVolumeClaimForAllNamespacesResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolumeClaim::list_persistent_volume_claim_for_all_namespaces`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ListPersistentVolumeClaimForAllNamespacesResponse {
Ok(crate::v1_8::api::core::v1::PersistentVolumeClaimList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ListPersistentVolumeClaimForAllNamespacesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ListPersistentVolumeClaimForAllNamespacesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ListPersistentVolumeClaimForAllNamespacesResponse::Other(result), read))
},
}
}
}
// Generated from operation patchCoreV1NamespacedPersistentVolumeClaim
impl PersistentVolumeClaim {
/// partially update the specified PersistentVolumeClaim
///
/// Use the returned [`crate::ResponseBody`]`<`[`PatchNamespacedPersistentVolumeClaimResponse`]`>` constructor, or [`PatchNamespacedPersistentVolumeClaimResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolumeClaim
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_namespaced_persistent_volume_claim(
name: &str,
namespace: &str,
body: &crate::v1_8::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::v1_8::PatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<PatchNamespacedPersistentVolumeClaimResponse>), crate::RequestError> {
let __url = format!("/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::patch(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static(match body {
crate::v1_8::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::v1_8::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::v1_8::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<PatchNamespacedPersistentVolumeClaimResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolumeClaim::patch_namespaced_persistent_volume_claim`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum PatchNamespacedPersistentVolumeClaimResponse {
Ok(crate::v1_8::api::core::v1::PersistentVolumeClaim),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for PatchNamespacedPersistentVolumeClaimResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((PatchNamespacedPersistentVolumeClaimResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((PatchNamespacedPersistentVolumeClaimResponse::Other(result), read))
},
}
}
}
// Generated from operation patchCoreV1NamespacedPersistentVolumeClaimStatus
impl PersistentVolumeClaim {
/// partially update status of the specified PersistentVolumeClaim
///
/// Use the returned [`crate::ResponseBody`]`<`[`PatchNamespacedPersistentVolumeClaimStatusResponse`]`>` constructor, or [`PatchNamespacedPersistentVolumeClaimStatusResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolumeClaim
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_namespaced_persistent_volume_claim_status(
name: &str,
namespace: &str,
body: &crate::v1_8::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::v1_8::PatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<PatchNamespacedPersistentVolumeClaimStatusResponse>), crate::RequestError> {
let __url = format!("/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}/status?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::patch(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static(match body {
crate::v1_8::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::v1_8::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::v1_8::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<PatchNamespacedPersistentVolumeClaimStatusResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolumeClaim::patch_namespaced_persistent_volume_claim_status`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum PatchNamespacedPersistentVolumeClaimStatusResponse {
Ok(crate::v1_8::api::core::v1::PersistentVolumeClaim),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for PatchNamespacedPersistentVolumeClaimStatusResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((PatchNamespacedPersistentVolumeClaimStatusResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((PatchNamespacedPersistentVolumeClaimStatusResponse::Other(result), read))
},
}
}
}
// Generated from operation readCoreV1NamespacedPersistentVolumeClaim
impl PersistentVolumeClaim {
/// read the specified PersistentVolumeClaim
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadNamespacedPersistentVolumeClaimResponse`]`>` constructor, or [`ReadNamespacedPersistentVolumeClaimResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolumeClaim
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn read_namespaced_persistent_volume_claim(
name: &str,
namespace: &str,
optional: ReadNamespacedPersistentVolumeClaimOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReadNamespacedPersistentVolumeClaimResponse>), crate::RequestError> {
let ReadNamespacedPersistentVolumeClaimOptional {
exact,
export,
pretty,
} = optional;
let __url = format!("/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(exact) = exact {
__query_pairs.append_pair("exact", &exact.to_string());
}
if let Some(export) = export {
__query_pairs.append_pair("export", &export.to_string());
}
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`PersistentVolumeClaim::read_namespaced_persistent_volume_claim`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReadNamespacedPersistentVolumeClaimOptional<'a> {
/// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
pub exact: Option<bool>,
/// Should this value be exported. Export strips fields that a user can not specify.
pub export: Option<bool>,
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReadNamespacedPersistentVolumeClaimResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolumeClaim::read_namespaced_persistent_volume_claim`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadNamespacedPersistentVolumeClaimResponse {
Ok(crate::v1_8::api::core::v1::PersistentVolumeClaim),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadNamespacedPersistentVolumeClaimResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadNamespacedPersistentVolumeClaimResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadNamespacedPersistentVolumeClaimResponse::Other(result), read))
},
}
}
}
// Generated from operation readCoreV1NamespacedPersistentVolumeClaimStatus
impl PersistentVolumeClaim {
/// read status of the specified PersistentVolumeClaim
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadNamespacedPersistentVolumeClaimStatusResponse`]`>` constructor, or [`ReadNamespacedPersistentVolumeClaimStatusResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolumeClaim
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn read_namespaced_persistent_volume_claim_status(
name: &str,
namespace: &str,
optional: ReadNamespacedPersistentVolumeClaimStatusOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReadNamespacedPersistentVolumeClaimStatusResponse>), crate::RequestError> {
let ReadNamespacedPersistentVolumeClaimStatusOptional {
pretty,
} = optional;
let __url = format!("/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}/status?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`PersistentVolumeClaim::read_namespaced_persistent_volume_claim_status`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReadNamespacedPersistentVolumeClaimStatusOptional<'a> {
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReadNamespacedPersistentVolumeClaimStatusResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolumeClaim::read_namespaced_persistent_volume_claim_status`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadNamespacedPersistentVolumeClaimStatusResponse {
Ok(crate::v1_8::api::core::v1::PersistentVolumeClaim),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadNamespacedPersistentVolumeClaimStatusResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadNamespacedPersistentVolumeClaimStatusResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadNamespacedPersistentVolumeClaimStatusResponse::Other(result), read))
},
}
}
}
// Generated from operation replaceCoreV1NamespacedPersistentVolumeClaim
impl PersistentVolumeClaim {
/// replace the specified PersistentVolumeClaim
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReplaceNamespacedPersistentVolumeClaimResponse`]`>` constructor, or [`ReplaceNamespacedPersistentVolumeClaimResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolumeClaim
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_namespaced_persistent_volume_claim(
name: &str,
namespace: &str,
body: &crate::v1_8::api::core::v1::PersistentVolumeClaim,
optional: ReplaceNamespacedPersistentVolumeClaimOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReplaceNamespacedPersistentVolumeClaimResponse>), crate::RequestError> {
let ReplaceNamespacedPersistentVolumeClaimOptional {
pretty,
} = optional;
let __url = format!("/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::put(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`PersistentVolumeClaim::replace_namespaced_persistent_volume_claim`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReplaceNamespacedPersistentVolumeClaimOptional<'a> {
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReplaceNamespacedPersistentVolumeClaimResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolumeClaim::replace_namespaced_persistent_volume_claim`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReplaceNamespacedPersistentVolumeClaimResponse {
Ok(crate::v1_8::api::core::v1::PersistentVolumeClaim),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReplaceNamespacedPersistentVolumeClaimResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReplaceNamespacedPersistentVolumeClaimResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReplaceNamespacedPersistentVolumeClaimResponse::Other(result), read))
},
}
}
}
// Generated from operation replaceCoreV1NamespacedPersistentVolumeClaimStatus
impl PersistentVolumeClaim {
/// replace status of the specified PersistentVolumeClaim
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReplaceNamespacedPersistentVolumeClaimStatusResponse`]`>` constructor, or [`ReplaceNamespacedPersistentVolumeClaimStatusResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PersistentVolumeClaim
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_namespaced_persistent_volume_claim_status(
name: &str,
namespace: &str,
body: &crate::v1_8::api::core::v1::PersistentVolumeClaim,
optional: ReplaceNamespacedPersistentVolumeClaimStatusOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReplaceNamespacedPersistentVolumeClaimStatusResponse>), crate::RequestError> {
let ReplaceNamespacedPersistentVolumeClaimStatusOptional {
pretty,
} = optional;
let __url = format!("/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}/status?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::put(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`PersistentVolumeClaim::replace_namespaced_persistent_volume_claim_status`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReplaceNamespacedPersistentVolumeClaimStatusOptional<'a> {
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReplaceNamespacedPersistentVolumeClaimStatusResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolumeClaim::replace_namespaced_persistent_volume_claim_status`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReplaceNamespacedPersistentVolumeClaimStatusResponse {
Ok(crate::v1_8::api::core::v1::PersistentVolumeClaim),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReplaceNamespacedPersistentVolumeClaimStatusResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReplaceNamespacedPersistentVolumeClaimStatusResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReplaceNamespacedPersistentVolumeClaimStatusResponse::Other(result), read))
},
}
}
}
// Generated from operation watchCoreV1NamespacedPersistentVolumeClaim
impl PersistentVolumeClaim {
/// list or watch objects of kind PersistentVolumeClaim
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`WatchNamespacedPersistentVolumeClaimResponse`]`>` constructor, or [`WatchNamespacedPersistentVolumeClaimResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_namespaced_persistent_volume_claim(
namespace: &str,
optional: crate::v1_8::WatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<WatchNamespacedPersistentVolumeClaimResponse>), crate::RequestError> {
let __url = format!("/api/v1/namespaces/{namespace}/persistentvolumeclaims?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<WatchNamespacedPersistentVolumeClaimResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolumeClaim::watch_namespaced_persistent_volume_claim`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum WatchNamespacedPersistentVolumeClaimResponse {
Ok(crate::v1_8::apimachinery::pkg::apis::meta::v1::WatchEvent<PersistentVolumeClaim>),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for WatchNamespacedPersistentVolumeClaimResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let mut deserializer = serde_json::Deserializer::from_slice(buf).into_iter();
let (result, byte_offset) = match deserializer.next() {
Some(Ok(value)) => (value, deserializer.byte_offset()),
Some(Err(ref err)) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Some(Err(err)) => return Err(crate::ResponseError::Json(err)),
None => return Err(crate::ResponseError::NeedMoreData),
};
Ok((WatchNamespacedPersistentVolumeClaimResponse::Ok(result), byte_offset))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((WatchNamespacedPersistentVolumeClaimResponse::Other(result), read))
},
}
}
}
// Generated from operation watchCoreV1PersistentVolumeClaimForAllNamespaces
impl PersistentVolumeClaim {
/// list or watch objects of kind PersistentVolumeClaim
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`WatchPersistentVolumeClaimForAllNamespacesResponse`]`>` constructor, or [`WatchPersistentVolumeClaimForAllNamespacesResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_persistent_volume_claim_for_all_namespaces(
optional: crate::v1_8::WatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<WatchPersistentVolumeClaimForAllNamespacesResponse>), crate::RequestError> {
let __url = "/api/v1/persistentvolumeclaims?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<WatchPersistentVolumeClaimForAllNamespacesResponse as Response>::try_from_parts` to parse the HTTP response body of [`PersistentVolumeClaim::watch_persistent_volume_claim_for_all_namespaces`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum WatchPersistentVolumeClaimForAllNamespacesResponse {
Ok(crate::v1_8::apimachinery::pkg::apis::meta::v1::WatchEvent<PersistentVolumeClaim>),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for WatchPersistentVolumeClaimForAllNamespacesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let mut deserializer = serde_json::Deserializer::from_slice(buf).into_iter();
let (result, byte_offset) = match deserializer.next() {
Some(Ok(value)) => (value, deserializer.byte_offset()),
Some(Err(ref err)) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Some(Err(err)) => return Err(crate::ResponseError::Json(err)),
None => return Err(crate::ResponseError::NeedMoreData),
};
Ok((WatchPersistentVolumeClaimForAllNamespacesResponse::Ok(result), byte_offset))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((WatchPersistentVolumeClaimForAllNamespacesResponse::Other(result), read))
},
}
}
}
// End /v1/PersistentVolumeClaim
impl crate::Resource for PersistentVolumeClaim {
fn api_version() -> &'static str {
"v1"
}
fn group() -> &'static str {
""
}
fn kind() -> &'static str {
"PersistentVolumeClaim"
}
fn version() -> &'static str {
"v1"
}
}
impl crate::Metadata for PersistentVolumeClaim {
type Ty = crate::v1_8::apimachinery::pkg::apis::meta::v1::ObjectMeta;
fn metadata(&self) -> Option<&<Self as crate::Metadata>::Ty> {
self.metadata.as_ref()
}
}
impl<'de> serde::Deserialize<'de> for PersistentVolumeClaim {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_api_version,
Key_kind,
Key_metadata,
Key_spec,
Key_status,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"apiVersion" => Field::Key_api_version,
"kind" => Field::Key_kind,
"metadata" => Field::Key_metadata,
"spec" => Field::Key_spec,
"status" => Field::Key_status,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = PersistentVolumeClaim;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "struct PersistentVolumeClaim")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_metadata: Option<crate::v1_8::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None;
let mut value_spec: Option<crate::v1_8::api::core::v1::PersistentVolumeClaimSpec> = None;
let mut value_status: Option<crate::v1_8::api::core::v1::PersistentVolumeClaimStatus> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_api_version => {
let value_api_version: String = serde::de::MapAccess::next_value(&mut map)?;
if value_api_version != <Self::Value as crate::Resource>::api_version() {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::api_version()));
}
},
Field::Key_kind => {
let value_kind: String = serde::de::MapAccess::next_value(&mut map)?;
if value_kind != <Self::Value as crate::Resource>::kind() {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::kind()));
}
},
Field::Key_metadata => value_metadata = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_spec => value_spec = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_status => value_status = serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(PersistentVolumeClaim {
metadata: value_metadata,
spec: value_spec,
status: value_status,
})
}
}
deserializer.deserialize_struct(
"PersistentVolumeClaim",
&[
"apiVersion",
"kind",
"metadata",
"spec",
"status",
],
Visitor,
)
}
}
impl serde::Serialize for PersistentVolumeClaim {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"PersistentVolumeClaim",
2 +
self.metadata.as_ref().map_or(0, |_| 1) +
self.spec.as_ref().map_or(0, |_| 1) +
self.status.as_ref().map_or(0, |_| 1),
)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::api_version())?;
serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::kind())?;
if let Some(value) = &self.metadata {
serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", value)?;
}
if let Some(value) = &self.spec {
serde::ser::SerializeStruct::serialize_field(&mut state, "spec", value)?;
}
if let Some(value) = &self.status {
serde::ser::SerializeStruct::serialize_field(&mut state, "status", value)?;
}
serde::ser::SerializeStruct::end(state)
}
}
| 45.727411 | 227 | 0.59904 |
26c1301e3aa86bde6a335ebc8e7bcf155abee893
| 137,754 |
use {
crate::{
account_rent_state::{check_rent_state_with_account, RentState},
accounts_db::{
AccountShrinkThreshold, AccountsAddRootTiming, AccountsDb, AccountsDbConfig,
BankHashInfo, ErrorCounters, LoadHint, LoadedAccount, ScanStorageResult,
ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING,
},
accounts_index::{AccountSecondaryIndexes, IndexKey, ScanConfig, ScanError, ScanResult},
accounts_update_notifier_interface::AccountsUpdateNotifier,
ancestors::Ancestors,
bank::{
Bank, NonceFull, NonceInfo, RentDebits, TransactionCheckResult,
TransactionExecutionResult,
},
blockhash_queue::BlockhashQueue,
rent_collector::RentCollector,
system_instruction_processor::{get_system_account_kind, SystemAccountKind},
},
dashmap::{
mapref::entry::Entry::{Occupied, Vacant},
DashMap,
},
log::*,
rand::{thread_rng, Rng},
solana_address_lookup_table_program::{error::AddressLookupError, state::AddressLookupTable},
solana_sdk::{
account::{Account, AccountSharedData, ReadableAccount, WritableAccount},
account_utils::StateMut,
bpf_loader_upgradeable::{self, UpgradeableLoaderState},
clock::{BankId, Slot, INITIAL_RENT_EPOCH},
feature_set::{self, tx_wide_compute_cap, FeatureSet},
fee::FeeStructure,
genesis_config::ClusterType,
hash::Hash,
message::{
v0::{LoadedAddresses, MessageAddressTableLookup},
SanitizedMessage,
},
native_loader,
nonce::{state::Versions as NonceVersions, State as NonceState},
pubkey::Pubkey,
slot_hashes::SlotHashes,
system_program,
sysvar::{self, epoch_schedule::EpochSchedule, instructions::construct_instructions_data},
transaction::{Result, SanitizedTransaction, TransactionAccountLocks, TransactionError},
transaction_context::TransactionAccount,
},
std::{
cmp::Reverse,
collections::{hash_map, BinaryHeap, HashMap, HashSet},
ops::RangeBounds,
path::PathBuf,
sync::{
atomic::{AtomicUsize, Ordering},
Arc, Mutex,
},
},
};
#[derive(Debug, Default, AbiExample)]
pub struct AccountLocks {
write_locks: HashSet<Pubkey>,
readonly_locks: HashMap<Pubkey, u64>,
}
impl AccountLocks {
fn is_locked_readonly(&self, key: &Pubkey) -> bool {
self.readonly_locks
.get(key)
.map_or(false, |count| *count > 0)
}
fn is_locked_write(&self, key: &Pubkey) -> bool {
self.write_locks.contains(key)
}
fn insert_new_readonly(&mut self, key: &Pubkey) {
assert!(self.readonly_locks.insert(*key, 1).is_none());
}
fn lock_readonly(&mut self, key: &Pubkey) -> bool {
self.readonly_locks.get_mut(key).map_or(false, |count| {
*count += 1;
true
})
}
fn unlock_readonly(&mut self, key: &Pubkey) {
if let hash_map::Entry::Occupied(mut occupied_entry) = self.readonly_locks.entry(*key) {
let count = occupied_entry.get_mut();
*count -= 1;
if *count == 0 {
occupied_entry.remove_entry();
}
}
}
fn unlock_write(&mut self, key: &Pubkey) {
self.write_locks.remove(key);
}
}
/// This structure handles synchronization for db
#[derive(Debug, AbiExample)]
pub struct Accounts {
/// Single global AccountsDb
pub accounts_db: Arc<AccountsDb>,
/// set of read-only and writable accounts which are currently
/// being processed by banking/replay threads
pub(crate) account_locks: Mutex<AccountLocks>,
}
// for the load instructions
pub type TransactionRent = u64;
pub type TransactionProgramIndices = Vec<Vec<usize>>;
#[derive(PartialEq, Debug, Clone)]
pub struct LoadedTransaction {
pub accounts: Vec<TransactionAccount>,
pub program_indices: TransactionProgramIndices,
pub rent: TransactionRent,
pub rent_debits: RentDebits,
}
pub type TransactionLoadResult = (Result<LoadedTransaction>, Option<NonceFull>);
pub enum AccountAddressFilter {
Exclude, // exclude all addresses matching the filter
Include, // only include addresses matching the filter
}
impl Accounts {
pub fn default_for_tests() -> Self {
Self {
accounts_db: Arc::new(AccountsDb::default_for_tests()),
account_locks: Mutex::default(),
}
}
pub fn new_with_config_for_tests(
paths: Vec<PathBuf>,
cluster_type: &ClusterType,
account_indexes: AccountSecondaryIndexes,
caching_enabled: bool,
shrink_ratio: AccountShrinkThreshold,
) -> Self {
Self::new_with_config(
paths,
cluster_type,
account_indexes,
caching_enabled,
shrink_ratio,
Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
None,
)
}
pub fn new_with_config_for_benches(
paths: Vec<PathBuf>,
cluster_type: &ClusterType,
account_indexes: AccountSecondaryIndexes,
caching_enabled: bool,
shrink_ratio: AccountShrinkThreshold,
) -> Self {
Self::new_with_config(
paths,
cluster_type,
account_indexes,
caching_enabled,
shrink_ratio,
Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS),
None,
)
}
pub fn new_with_config(
paths: Vec<PathBuf>,
cluster_type: &ClusterType,
account_indexes: AccountSecondaryIndexes,
caching_enabled: bool,
shrink_ratio: AccountShrinkThreshold,
accounts_db_config: Option<AccountsDbConfig>,
accounts_update_notifier: Option<AccountsUpdateNotifier>,
) -> Self {
Self {
accounts_db: Arc::new(AccountsDb::new_with_config(
paths,
cluster_type,
account_indexes,
caching_enabled,
shrink_ratio,
accounts_db_config,
accounts_update_notifier,
)),
account_locks: Mutex::new(AccountLocks::default()),
}
}
pub fn new_from_parent(parent: &Accounts, slot: Slot, parent_slot: Slot) -> Self {
let accounts_db = parent.accounts_db.clone();
accounts_db.set_hash(slot, parent_slot);
Self {
accounts_db,
account_locks: Mutex::new(AccountLocks::default()),
}
}
pub(crate) fn new_empty(accounts_db: AccountsDb) -> Self {
Self {
accounts_db: Arc::new(accounts_db),
account_locks: Mutex::new(AccountLocks::default()),
}
}
fn construct_instructions_account(
message: &SanitizedMessage,
is_owned_by_sysvar: bool,
) -> AccountSharedData {
let data = construct_instructions_data(&message.decompile_instructions());
let owner = if is_owned_by_sysvar {
sysvar::id()
} else {
system_program::id()
};
AccountSharedData::from(Account {
data,
owner,
..Account::default()
})
}
fn load_transaction(
&self,
ancestors: &Ancestors,
tx: &SanitizedTransaction,
fee: u64,
error_counters: &mut ErrorCounters,
rent_collector: &RentCollector,
feature_set: &FeatureSet,
) -> Result<LoadedTransaction> {
// Copy all the accounts
let message = tx.message();
// NOTE: this check will never fail because `tx` is sanitized
if tx.signatures().is_empty() && fee != 0 {
Err(TransactionError::MissingSignatureForFee)
} else {
// There is no way to predict what program will execute without an error
// If a fee can pay for execution then the program will be scheduled
let mut payer_index = None;
let mut tx_rent: TransactionRent = 0;
let account_keys = message.account_keys();
let mut accounts = Vec::with_capacity(account_keys.len());
let mut account_deps = Vec::with_capacity(account_keys.len());
let mut rent_debits = RentDebits::default();
for (i, key) in account_keys.iter().enumerate() {
let account = if !message.is_non_loader_key(i) {
// Fill in an empty account for the program slots.
AccountSharedData::default()
} else {
if payer_index.is_none() {
payer_index = Some(i);
}
if solana_sdk::sysvar::instructions::check_id(key) {
Self::construct_instructions_account(
message,
feature_set
.is_active(&feature_set::instructions_sysvar_owned_by_sysvar::id()),
)
} else {
let (account, rent) = self
.accounts_db
.load_with_fixed_root(ancestors, key)
.map(|(mut account, _)| {
if message.is_writable(i) {
let rent_due = rent_collector
.collect_from_existing_account(
key,
&mut account,
self.accounts_db.filler_account_suffix.as_ref(),
)
.rent_amount;
(account, rent_due)
} else {
(account, 0)
}
})
.unwrap_or_default();
if bpf_loader_upgradeable::check_id(account.owner()) {
if message.is_writable(i) && !message.is_upgradeable_loader_present() {
error_counters.invalid_writable_account += 1;
return Err(TransactionError::InvalidWritableAccount);
}
if account.executable() {
// The upgradeable loader requires the derived ProgramData account
if let Ok(UpgradeableLoaderState::Program {
programdata_address,
}) = account.state()
{
if let Some((programdata_account, _)) = self
.accounts_db
.load_with_fixed_root(ancestors, &programdata_address)
{
account_deps
.push((programdata_address, programdata_account));
} else {
error_counters.account_not_found += 1;
return Err(TransactionError::ProgramAccountNotFound);
}
} else {
error_counters.invalid_program_for_execution += 1;
return Err(TransactionError::InvalidProgramForExecution);
}
}
} else if account.executable() && message.is_writable(i) {
error_counters.invalid_writable_account += 1;
return Err(TransactionError::InvalidWritableAccount);
}
tx_rent += rent;
rent_debits.insert(key, rent, account.lamports());
account
}
};
accounts.push((*key, account));
}
debug_assert_eq!(accounts.len(), account_keys.len());
// Appends the account_deps at the end of the accounts,
// this way they can be accessed in a uniform way.
// At places where only the accounts are needed,
// the account_deps are truncated using e.g:
// accounts.iter().take(message.account_keys.len())
accounts.append(&mut account_deps);
if let Some(payer_index) = payer_index {
if payer_index != 0 {
warn!("Payer index should be 0! {:?}", tx);
}
let (ref payer_address, ref mut payer_account) = accounts[payer_index];
if payer_account.lamports() == 0 {
error_counters.account_not_found += 1;
return Err(TransactionError::AccountNotFound);
}
let min_balance = match get_system_account_kind(payer_account).ok_or_else(|| {
error_counters.invalid_account_for_fee += 1;
TransactionError::InvalidAccountForFee
})? {
SystemAccountKind::System => 0,
SystemAccountKind::Nonce => {
// Should we ever allow a fees charge to zero a nonce account's
// balance. The state MUST be set to uninitialized in that case
rent_collector.rent.minimum_balance(NonceState::size())
}
};
if payer_account.lamports() < fee + min_balance {
error_counters.insufficient_funds += 1;
return Err(TransactionError::InsufficientFundsForFee);
}
let payer_pre_rent_state =
RentState::from_account(payer_account, &rent_collector.rent);
payer_account
.checked_sub_lamports(fee)
.map_err(|_| TransactionError::InsufficientFundsForFee)?;
let payer_post_rent_state =
RentState::from_account(payer_account, &rent_collector.rent);
let rent_state_result = check_rent_state_with_account(
&payer_pre_rent_state,
&payer_post_rent_state,
payer_address,
payer_account,
feature_set.is_active(&feature_set::do_support_realloc::id()),
);
// Feature gate only wraps the actual error return so that the metrics and debug
// logging generated by `check_rent_state_with_account()` can be examined before
// feature activation
if feature_set.is_active(&feature_set::require_rent_exempt_accounts::id()) {
rent_state_result?;
}
let program_indices = message
.instructions()
.iter()
.map(|instruction| {
self.load_executable_accounts(
ancestors,
&mut accounts,
instruction.program_id_index as usize,
error_counters,
)
})
.collect::<Result<Vec<Vec<usize>>>>()?;
Ok(LoadedTransaction {
accounts,
program_indices,
rent: tx_rent,
rent_debits,
})
} else {
error_counters.account_not_found += 1;
Err(TransactionError::AccountNotFound)
}
}
}
fn load_executable_accounts(
&self,
ancestors: &Ancestors,
accounts: &mut Vec<TransactionAccount>,
mut program_account_index: usize,
error_counters: &mut ErrorCounters,
) -> Result<Vec<usize>> {
let mut account_indices = Vec::new();
let mut program_id = match accounts.get(program_account_index) {
Some(program_account) => program_account.0,
None => {
error_counters.account_not_found += 1;
return Err(TransactionError::ProgramAccountNotFound);
}
};
let mut depth = 0;
while !native_loader::check_id(&program_id) {
if depth >= 5 {
error_counters.call_chain_too_deep += 1;
return Err(TransactionError::CallChainTooDeep);
}
depth += 1;
program_account_index = match self
.accounts_db
.load_with_fixed_root(ancestors, &program_id)
{
Some((program_account, _)) => {
let account_index = accounts.len();
accounts.push((program_id, program_account));
account_index
}
None => {
error_counters.account_not_found += 1;
return Err(TransactionError::ProgramAccountNotFound);
}
};
let program = &accounts[program_account_index].1;
if !program.executable() {
error_counters.invalid_program_for_execution += 1;
return Err(TransactionError::InvalidProgramForExecution);
}
// Add loader to chain
let program_owner = *program.owner();
account_indices.insert(0, program_account_index);
if bpf_loader_upgradeable::check_id(&program_owner) {
// The upgradeable loader requires the derived ProgramData account
if let Ok(UpgradeableLoaderState::Program {
programdata_address,
}) = program.state()
{
let programdata_account_index = match self
.accounts_db
.load_with_fixed_root(ancestors, &programdata_address)
{
Some((programdata_account, _)) => {
let account_index = accounts.len();
accounts.push((programdata_address, programdata_account));
account_index
}
None => {
error_counters.account_not_found += 1;
return Err(TransactionError::ProgramAccountNotFound);
}
};
account_indices.insert(0, programdata_account_index);
} else {
error_counters.invalid_program_for_execution += 1;
return Err(TransactionError::InvalidProgramForExecution);
}
}
program_id = program_owner;
}
Ok(account_indices)
}
pub fn load_accounts(
&self,
ancestors: &Ancestors,
txs: &[SanitizedTransaction],
lock_results: Vec<TransactionCheckResult>,
hash_queue: &BlockhashQueue,
error_counters: &mut ErrorCounters,
rent_collector: &RentCollector,
feature_set: &FeatureSet,
fee_structure: &FeeStructure,
) -> Vec<TransactionLoadResult> {
txs.iter()
.zip(lock_results)
.map(|etx| match etx {
(tx, (Ok(()), nonce)) => {
let lamports_per_signature = nonce
.as_ref()
.map(|nonce| nonce.lamports_per_signature())
.unwrap_or_else(|| {
hash_queue.get_lamports_per_signature(tx.message().recent_blockhash())
});
let fee = if let Some(lamports_per_signature) = lamports_per_signature {
Bank::calculate_fee(
tx.message(),
lamports_per_signature,
fee_structure,
feature_set.is_active(&tx_wide_compute_cap::id()),
)
} else {
return (Err(TransactionError::BlockhashNotFound), None);
};
let loaded_transaction = match self.load_transaction(
ancestors,
tx,
fee,
error_counters,
rent_collector,
feature_set,
) {
Ok(loaded_transaction) => loaded_transaction,
Err(e) => return (Err(e), None),
};
// Update nonce with fee-subtracted accounts
let nonce = if let Some(nonce) = nonce {
match NonceFull::from_partial(
nonce,
tx.message(),
&loaded_transaction.accounts,
&loaded_transaction.rent_debits,
) {
Ok(nonce) => Some(nonce),
Err(e) => return (Err(e), None),
}
} else {
None
};
(Ok(loaded_transaction), nonce)
}
(_, (Err(e), _nonce)) => (Err(e), None),
})
.collect()
}
pub fn load_lookup_table_addresses(
&self,
ancestors: &Ancestors,
address_table_lookup: &MessageAddressTableLookup,
slot_hashes: &SlotHashes,
) -> std::result::Result<LoadedAddresses, AddressLookupError> {
let table_account = self
.accounts_db
.load_with_fixed_root(ancestors, &address_table_lookup.account_key)
.map(|(account, _rent)| account)
.ok_or(AddressLookupError::LookupTableAccountNotFound)?;
if table_account.owner() == &solana_address_lookup_table_program::id() {
let current_slot = ancestors.max_slot();
let lookup_table = AddressLookupTable::deserialize(table_account.data())
.map_err(|_ix_err| AddressLookupError::InvalidAccountData)?;
Ok(LoadedAddresses {
writable: lookup_table.lookup(
current_slot,
&address_table_lookup.writable_indexes,
slot_hashes,
)?,
readonly: lookup_table.lookup(
current_slot,
&address_table_lookup.readonly_indexes,
slot_hashes,
)?,
})
} else {
Err(AddressLookupError::InvalidAccountOwner)
}
}
fn filter_zero_lamport_account(
account: AccountSharedData,
slot: Slot,
) -> Option<(AccountSharedData, Slot)> {
if account.lamports() > 0 {
Some((account, slot))
} else {
None
}
}
/// Slow because lock is held for 1 operation instead of many
fn load_slow(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
load_hint: LoadHint,
) -> Option<(AccountSharedData, Slot)> {
let (account, slot) = self.accounts_db.load(ancestors, pubkey, load_hint)?;
Self::filter_zero_lamport_account(account, slot)
}
pub fn load_with_fixed_root(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
) -> Option<(AccountSharedData, Slot)> {
self.load_slow(ancestors, pubkey, LoadHint::FixedMaxRoot)
}
pub fn load_without_fixed_root(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
) -> Option<(AccountSharedData, Slot)> {
self.load_slow(ancestors, pubkey, LoadHint::Unspecified)
}
/// scans underlying accounts_db for this delta (slot) with a map function
/// from LoadedAccount to B
/// returns only the latest/current version of B for this slot
pub fn scan_slot<F, B>(&self, slot: Slot, func: F) -> Vec<B>
where
F: Fn(LoadedAccount) -> Option<B> + Send + Sync,
B: Sync + Send + Default + std::cmp::Eq,
{
let scan_result = self.accounts_db.scan_account_storage(
slot,
|loaded_account: LoadedAccount| {
// Cache only has one version per key, don't need to worry about versioning
func(loaded_account)
},
|accum: &DashMap<Pubkey, (u64, B)>, loaded_account: LoadedAccount| {
let loaded_account_pubkey = *loaded_account.pubkey();
let loaded_write_version = loaded_account.write_version();
let should_insert = accum
.get(&loaded_account_pubkey)
.map(|existing_entry| loaded_write_version > existing_entry.value().0)
.unwrap_or(true);
if should_insert {
if let Some(val) = func(loaded_account) {
// Detected insertion is necessary, grabs the write lock to commit the write,
match accum.entry(loaded_account_pubkey) {
// Double check in case another thread interleaved a write between the read + write.
Occupied(mut occupied_entry) => {
if loaded_write_version > occupied_entry.get().0 {
occupied_entry.insert((loaded_write_version, val));
}
}
Vacant(vacant_entry) => {
vacant_entry.insert((loaded_write_version, val));
}
}
}
}
},
);
match scan_result {
ScanStorageResult::Cached(cached_result) => cached_result,
ScanStorageResult::Stored(stored_result) => stored_result
.into_iter()
.map(|(_pubkey, (_latest_write_version, val))| val)
.collect(),
}
}
pub fn load_by_program_slot(
&self,
slot: Slot,
program_id: Option<&Pubkey>,
) -> Vec<TransactionAccount> {
self.scan_slot(slot, |stored_account| {
let hit = match program_id {
None => true,
Some(program_id) => stored_account.owner() == program_id,
};
if hit {
Some((*stored_account.pubkey(), stored_account.take_account()))
} else {
None
}
})
}
pub fn load_largest_accounts(
&self,
ancestors: &Ancestors,
bank_id: BankId,
num: usize,
filter_by_address: &HashSet<Pubkey>,
filter: AccountAddressFilter,
) -> ScanResult<Vec<(Pubkey, u64)>> {
if num == 0 {
return Ok(vec![]);
}
let account_balances = self.accounts_db.scan_accounts(
ancestors,
bank_id,
|collector: &mut BinaryHeap<Reverse<(u64, Pubkey)>>, option| {
if let Some((pubkey, account, _slot)) = option {
if account.lamports() == 0 {
return;
}
let contains_address = filter_by_address.contains(pubkey);
let collect = match filter {
AccountAddressFilter::Exclude => !contains_address,
AccountAddressFilter::Include => contains_address,
};
if !collect {
return;
}
if collector.len() == num {
let Reverse(entry) = collector
.peek()
.expect("BinaryHeap::peek should succeed when len > 0");
if *entry >= (account.lamports(), *pubkey) {
return;
}
collector.pop();
}
collector.push(Reverse((account.lamports(), *pubkey)));
}
},
&ScanConfig::default(),
)?;
Ok(account_balances
.into_sorted_vec()
.into_iter()
.map(|Reverse((balance, pubkey))| (pubkey, balance))
.collect())
}
pub fn calculate_capitalization(
&self,
ancestors: &Ancestors,
slot: Slot,
can_cached_slot_be_unflushed: bool,
debug_verify: bool,
epoch_schedule: &EpochSchedule,
rent_collector: &RentCollector,
) -> u64 {
let use_index = false;
let is_startup = false; // there may be conditions where this is called at startup.
self.accounts_db
.update_accounts_hash_with_index_option(
use_index,
debug_verify,
slot,
ancestors,
None,
can_cached_slot_be_unflushed,
epoch_schedule,
rent_collector,
is_startup,
)
.1
}
/// Only called from startup or test code.
#[must_use]
pub fn verify_bank_hash_and_lamports(
&self,
slot: Slot,
ancestors: &Ancestors,
total_lamports: u64,
test_hash_calculation: bool,
epoch_schedule: &EpochSchedule,
rent_collector: &RentCollector,
) -> bool {
if let Err(err) = self.accounts_db.verify_bank_hash_and_lamports_new(
slot,
ancestors,
total_lamports,
test_hash_calculation,
epoch_schedule,
rent_collector,
) {
warn!("verify_bank_hash failed: {:?}", err);
false
} else {
true
}
}
fn is_loadable(lamports: u64) -> bool {
// Don't ever load zero lamport accounts into runtime because
// the existence of zero-lamport accounts are never deterministic!!
lamports > 0
}
fn load_while_filtering<F: Fn(&AccountSharedData) -> bool>(
collector: &mut Vec<TransactionAccount>,
some_account_tuple: Option<(&Pubkey, AccountSharedData, Slot)>,
filter: F,
) {
if let Some(mapped_account_tuple) = some_account_tuple
.filter(|(_, account, _)| Self::is_loadable(account.lamports()) && filter(account))
.map(|(pubkey, account, _slot)| (*pubkey, account))
{
collector.push(mapped_account_tuple)
}
}
pub fn load_by_program(
&self,
ancestors: &Ancestors,
bank_id: BankId,
program_id: &Pubkey,
config: &ScanConfig,
) -> ScanResult<Vec<TransactionAccount>> {
self.accounts_db.scan_accounts(
ancestors,
bank_id,
|collector: &mut Vec<TransactionAccount>, some_account_tuple| {
Self::load_while_filtering(collector, some_account_tuple, |account| {
account.owner() == program_id
})
},
config,
)
}
pub fn load_by_program_with_filter<F: Fn(&AccountSharedData) -> bool>(
&self,
ancestors: &Ancestors,
bank_id: BankId,
program_id: &Pubkey,
filter: F,
config: &ScanConfig,
) -> ScanResult<Vec<TransactionAccount>> {
self.accounts_db.scan_accounts(
ancestors,
bank_id,
|collector: &mut Vec<TransactionAccount>, some_account_tuple| {
Self::load_while_filtering(collector, some_account_tuple, |account| {
account.owner() == program_id && filter(account)
})
},
config,
)
}
fn calc_scan_result_size(account: &AccountSharedData) -> usize {
account.data().len()
+ std::mem::size_of::<AccountSharedData>()
+ std::mem::size_of::<Pubkey>()
}
/// Accumulate size of (pubkey + account) into sum.
/// Return true iff sum > 'byte_limit_for_scan'
fn accumulate_and_check_scan_result_size(
sum: &AtomicUsize,
account: &AccountSharedData,
byte_limit_for_scan: &Option<usize>,
) -> bool {
if let Some(byte_limit_for_scan) = byte_limit_for_scan.as_ref() {
let added = Self::calc_scan_result_size(account);
sum.fetch_add(added, Ordering::Relaxed)
.saturating_add(added)
> *byte_limit_for_scan
} else {
false
}
}
fn maybe_abort_scan(
result: ScanResult<Vec<TransactionAccount>>,
config: &ScanConfig,
) -> ScanResult<Vec<TransactionAccount>> {
if config.is_aborted() {
ScanResult::Err(ScanError::Aborted(
"The accumulated scan results exceeded the limit".to_string(),
))
} else {
result
}
}
pub fn load_by_index_key_with_filter<F: Fn(&AccountSharedData) -> bool>(
&self,
ancestors: &Ancestors,
bank_id: BankId,
index_key: &IndexKey,
filter: F,
config: &ScanConfig,
byte_limit_for_scan: Option<usize>,
) -> ScanResult<Vec<TransactionAccount>> {
let sum = AtomicUsize::default();
let config = config.recreate_with_abort();
let result = self
.accounts_db
.index_scan_accounts(
ancestors,
bank_id,
*index_key,
|collector: &mut Vec<TransactionAccount>, some_account_tuple| {
Self::load_while_filtering(collector, some_account_tuple, |account| {
let use_account = filter(account);
if use_account
&& Self::accumulate_and_check_scan_result_size(
&sum,
account,
&byte_limit_for_scan,
)
{
// total size of results exceeds size limit, so abort scan
config.abort();
}
use_account
});
},
&config,
)
.map(|result| result.0);
Self::maybe_abort_scan(result, &config)
}
pub fn account_indexes_include_key(&self, key: &Pubkey) -> bool {
self.accounts_db.account_indexes.include_key(key)
}
pub fn load_all(
&self,
ancestors: &Ancestors,
bank_id: BankId,
) -> ScanResult<Vec<(Pubkey, AccountSharedData, Slot)>> {
self.accounts_db.scan_accounts(
ancestors,
bank_id,
|collector: &mut Vec<(Pubkey, AccountSharedData, Slot)>, some_account_tuple| {
if let Some((pubkey, account, slot)) = some_account_tuple
.filter(|(_, account, _)| Self::is_loadable(account.lamports()))
{
collector.push((*pubkey, account, slot))
}
},
&ScanConfig::default(),
)
}
pub fn hold_range_in_memory<R>(
&self,
range: &R,
start_holding: bool,
thread_pool: &rayon::ThreadPool,
) where
R: RangeBounds<Pubkey> + std::fmt::Debug + Sync,
{
self.accounts_db
.accounts_index
.hold_range_in_memory(range, start_holding, thread_pool)
}
pub fn load_to_collect_rent_eagerly<R: RangeBounds<Pubkey> + std::fmt::Debug>(
&self,
ancestors: &Ancestors,
range: R,
) -> Vec<TransactionAccount> {
self.accounts_db.range_scan_accounts(
"load_to_collect_rent_eagerly_scan_elapsed",
ancestors,
range,
&ScanConfig::new(true),
|collector: &mut Vec<TransactionAccount>, option| {
Self::load_while_filtering(collector, option, |_| true)
},
)
}
/// Slow because lock is held for 1 operation instead of many.
/// WARNING: This noncached version is only to be used for tests/benchmarking
/// as bypassing the cache in general is not supported
pub fn store_slow_uncached(&self, slot: Slot, pubkey: &Pubkey, account: &AccountSharedData) {
self.accounts_db.store_uncached(slot, &[(pubkey, account)]);
}
pub fn store_slow_cached(&self, slot: Slot, pubkey: &Pubkey, account: &AccountSharedData) {
self.accounts_db.store_cached(slot, &[(pubkey, account)]);
}
fn lock_account(
&self,
account_locks: &mut AccountLocks,
writable_keys: Vec<&Pubkey>,
readonly_keys: Vec<&Pubkey>,
) -> Result<()> {
for k in writable_keys.iter() {
if account_locks.is_locked_write(k) || account_locks.is_locked_readonly(k) {
debug!("Writable account in use: {:?}", k);
return Err(TransactionError::AccountInUse);
}
}
for k in readonly_keys.iter() {
if account_locks.is_locked_write(k) {
debug!("Read-only account in use: {:?}", k);
return Err(TransactionError::AccountInUse);
}
}
for k in writable_keys {
account_locks.write_locks.insert(*k);
}
for k in readonly_keys {
if !account_locks.lock_readonly(k) {
account_locks.insert_new_readonly(k);
}
}
Ok(())
}
fn unlock_account(
&self,
account_locks: &mut AccountLocks,
writable_keys: Vec<&Pubkey>,
readonly_keys: Vec<&Pubkey>,
) {
for k in writable_keys {
account_locks.unlock_write(k);
}
for k in readonly_keys {
account_locks.unlock_readonly(k);
}
}
pub fn bank_hash_at(&self, slot: Slot) -> Hash {
self.bank_hash_info_at(slot).hash
}
pub fn bank_hash_info_at(&self, slot: Slot) -> BankHashInfo {
let delta_hash = self.accounts_db.get_accounts_delta_hash(slot);
let bank_hashes = self.accounts_db.bank_hashes.read().unwrap();
let mut hash_info = bank_hashes
.get(&slot)
.expect("No bank hash was found for this bank, that should not be possible")
.clone();
hash_info.hash = delta_hash;
hash_info
}
/// This function will prevent multiple threads from modifying the same account state at the
/// same time
#[must_use]
#[allow(clippy::needless_collect)]
pub fn lock_accounts<'a>(
&self,
txs: impl Iterator<Item = &'a SanitizedTransaction>,
feature_set: &FeatureSet,
) -> Vec<Result<()>> {
let tx_account_locks_results: Vec<Result<_>> =
txs.map(|tx| tx.get_account_locks(feature_set)).collect();
self.lock_accounts_inner(tx_account_locks_results)
}
#[must_use]
#[allow(clippy::needless_collect)]
pub fn lock_accounts_with_results<'a>(
&self,
txs: impl Iterator<Item = &'a SanitizedTransaction>,
results: impl Iterator<Item = Result<()>>,
feature_set: &FeatureSet,
) -> Vec<Result<()>> {
let tx_account_locks_results: Vec<Result<_>> = txs
.zip(results)
.map(|(tx, result)| match result {
Ok(()) => tx.get_account_locks(feature_set),
Err(err) => Err(err),
})
.collect();
self.lock_accounts_inner(tx_account_locks_results)
}
#[must_use]
fn lock_accounts_inner(
&self,
tx_account_locks_results: Vec<Result<TransactionAccountLocks>>,
) -> Vec<Result<()>> {
let account_locks = &mut self.account_locks.lock().unwrap();
tx_account_locks_results
.into_iter()
.map(|tx_account_locks_result| match tx_account_locks_result {
Ok(tx_account_locks) => self.lock_account(
account_locks,
tx_account_locks.writable,
tx_account_locks.readonly,
),
Err(err) => Err(err),
})
.collect()
}
/// Once accounts are unlocked, new transactions that modify that state can enter the pipeline
#[allow(clippy::needless_collect)]
pub fn unlock_accounts<'a>(
&self,
txs: impl Iterator<Item = &'a SanitizedTransaction>,
results: &[Result<()>],
) {
let keys: Vec<_> = txs
.zip(results)
.filter_map(|(tx, res)| match res {
Err(TransactionError::AccountLoadedTwice)
| Err(TransactionError::AccountInUse)
| Err(TransactionError::SanitizeFailure)
| Err(TransactionError::TooManyAccountLocks)
| Err(TransactionError::WouldExceedMaxBlockCostLimit)
| Err(TransactionError::WouldExceedMaxVoteCostLimit)
| Err(TransactionError::WouldExceedMaxAccountCostLimit)
| Err(TransactionError::WouldExceedAccountDataBlockLimit)
| Err(TransactionError::WouldExceedAccountDataTotalLimit) => None,
_ => Some(tx.get_account_locks_unchecked()),
})
.collect();
let mut account_locks = self.account_locks.lock().unwrap();
debug!("bank unlock accounts");
keys.into_iter().for_each(|keys| {
self.unlock_account(&mut account_locks, keys.writable, keys.readonly);
});
}
/// Store the accounts into the DB
// allow(clippy) needed for various gating flags
#[allow(clippy::too_many_arguments)]
pub fn store_cached<'a>(
&self,
slot: Slot,
txs: &'a [SanitizedTransaction],
res: &'a [TransactionExecutionResult],
loaded: &'a mut [TransactionLoadResult],
rent_collector: &RentCollector,
blockhash: &Hash,
lamports_per_signature: u64,
leave_nonce_on_success: bool,
) {
let accounts_to_store = self.collect_accounts_to_store(
txs,
res,
loaded,
rent_collector,
blockhash,
lamports_per_signature,
leave_nonce_on_success,
);
self.accounts_db.store_cached(slot, &accounts_to_store);
}
/// Purge a slot if it is not a root
/// Root slots cannot be purged
/// `is_from_abs` is true if the caller is the AccountsBackgroundService
pub fn purge_slot(&self, slot: Slot, bank_id: BankId, is_from_abs: bool) {
self.accounts_db.purge_slot(slot, bank_id, is_from_abs);
}
/// Add a slot to root. Root slots cannot be purged
pub fn add_root(&self, slot: Slot) -> AccountsAddRootTiming {
self.accounts_db.add_root(slot)
}
#[allow(clippy::too_many_arguments)]
fn collect_accounts_to_store<'a>(
&self,
txs: &'a [SanitizedTransaction],
execution_results: &'a [TransactionExecutionResult],
load_results: &'a mut [TransactionLoadResult],
rent_collector: &RentCollector,
blockhash: &Hash,
lamports_per_signature: u64,
leave_nonce_on_success: bool,
) -> Vec<(&'a Pubkey, &'a AccountSharedData)> {
let mut accounts = Vec::with_capacity(load_results.len());
for (i, ((tx_load_result, nonce), tx)) in load_results.iter_mut().zip(txs).enumerate() {
if tx_load_result.is_err() {
// Don't store any accounts if tx failed to load
continue;
}
let execution_status = match &execution_results[i] {
TransactionExecutionResult::Executed(details) => &details.status,
// Don't store any accounts if tx wasn't executed
TransactionExecutionResult::NotExecuted(_) => continue,
};
let maybe_nonce = match (execution_status, &*nonce) {
(Ok(()), Some(nonce)) => {
if leave_nonce_on_success {
None
} else {
Some((nonce, false /* rollback */))
}
}
(Err(_), Some(nonce)) => {
Some((nonce, true /* rollback */))
}
(Ok(_), None) => None, // Success, don't do any additional nonce processing
(Err(_), None) => {
// Fees for failed transactions which don't use durable nonces are
// deducted in Bank::filter_program_errors_and_collect_fee
continue;
}
};
let message = tx.message();
let loaded_transaction = tx_load_result.as_mut().unwrap();
let mut fee_payer_index = None;
for (i, (address, account)) in (0..message.account_keys().len())
.zip(loaded_transaction.accounts.iter_mut())
.filter(|(i, _)| message.is_non_loader_key(*i))
{
if fee_payer_index.is_none() {
fee_payer_index = Some(i);
}
let is_fee_payer = Some(i) == fee_payer_index;
if message.is_writable(i) {
let is_nonce_account = prepare_if_nonce_account(
address,
account,
execution_status,
is_fee_payer,
maybe_nonce,
blockhash,
lamports_per_signature,
);
if execution_status.is_ok() || is_nonce_account || is_fee_payer {
if account.rent_epoch() == INITIAL_RENT_EPOCH {
let rent = rent_collector
.collect_from_created_account(address, account)
.rent_amount;
loaded_transaction.rent += rent;
loaded_transaction.rent_debits.insert(
address,
rent,
account.lamports(),
);
}
// Add to the accounts to store
accounts.push((&*address, &*account));
}
}
}
}
accounts
}
}
pub fn prepare_if_nonce_account<'a>(
address: &Pubkey,
account: &mut AccountSharedData,
execution_result: &Result<()>,
is_fee_payer: bool,
maybe_nonce: Option<(&'a NonceFull, bool)>,
blockhash: &Hash,
lamports_per_signature: u64,
) -> bool {
if let Some((nonce, rollback)) = maybe_nonce {
if address == nonce.address() {
if rollback {
// The transaction failed which would normally drop the account
// processing changes, since this account is now being included
// in the accounts written back to the db, roll it back to
// pre-processing state.
*account = nonce.account().clone();
}
// Advance the stored blockhash to prevent fee theft by someone
// replaying nonce transactions that have failed with an
// `InstructionError`.
//
// Since we know we are dealing with a valid nonce account,
// unwrap is safe here
let state = StateMut::<NonceVersions>::state(nonce.account())
.unwrap()
.convert_to_current();
if let NonceState::Initialized(ref data) = state {
account
.set_state(&NonceVersions::new_current(NonceState::new_initialized(
&data.authority,
blockhash,
lamports_per_signature,
)))
.unwrap();
}
true
} else {
if execution_result.is_err() && is_fee_payer {
if let Some(fee_payer_account) = nonce.fee_payer_account() {
// Instruction error and fee-payer for this nonce tx is not
// the nonce account itself, rollback the fee payer to the
// fee-paid original state.
*account = fee_payer_account.clone();
}
}
false
}
} else {
false
}
}
pub fn create_test_accounts(
accounts: &Accounts,
pubkeys: &mut Vec<Pubkey>,
num: usize,
slot: Slot,
) {
for t in 0..num {
let pubkey = solana_sdk::pubkey::new_rand();
let account =
AccountSharedData::new((t + 1) as u64, 0, AccountSharedData::default().owner());
accounts.store_slow_uncached(slot, &pubkey, &account);
pubkeys.push(pubkey);
}
}
// Only used by bench, not safe to call otherwise accounts can conflict with the
// accounts cache!
pub fn update_accounts_bench(accounts: &Accounts, pubkeys: &[Pubkey], slot: u64) {
for pubkey in pubkeys {
let amount = thread_rng().gen_range(0, 10);
let account = AccountSharedData::new(amount, 0, AccountSharedData::default().owner());
accounts.store_slow_uncached(slot, pubkey, &account);
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::{
bank::{DurableNonceFee, TransactionExecutionDetails},
rent_collector::RentCollector,
},
solana_address_lookup_table_program::state::LookupTableMeta,
solana_sdk::{
account::{AccountSharedData, WritableAccount},
epoch_schedule::EpochSchedule,
genesis_config::ClusterType,
hash::Hash,
instruction::{CompiledInstruction, InstructionError},
message::{Message, MessageHeader},
nonce, nonce_account,
rent::Rent,
signature::{keypair_from_seed, signers::Signers, Keypair, Signer},
system_instruction, system_program,
transaction::{Transaction, MAX_TX_ACCOUNT_LOCKS},
},
std::{
borrow::Cow,
convert::TryFrom,
sync::atomic::{AtomicBool, AtomicU64, Ordering},
thread, time,
},
};
fn new_sanitized_tx<T: Signers>(
from_keypairs: &T,
message: Message,
recent_blockhash: Hash,
) -> SanitizedTransaction {
SanitizedTransaction::from_transaction_for_tests(Transaction::new(
from_keypairs,
message,
recent_blockhash,
))
}
fn new_execution_result(
status: Result<()>,
nonce: Option<&NonceFull>,
) -> TransactionExecutionResult {
TransactionExecutionResult::Executed(TransactionExecutionDetails {
status,
log_messages: None,
inner_instructions: None,
durable_nonce_fee: nonce.map(DurableNonceFee::from),
return_data: None,
})
}
fn load_accounts_with_fee_and_rent(
tx: Transaction,
ka: &[TransactionAccount],
lamports_per_signature: u64,
rent_collector: &RentCollector,
error_counters: &mut ErrorCounters,
feature_set: &FeatureSet,
fee_structure: &FeeStructure,
) -> Vec<TransactionLoadResult> {
let mut hash_queue = BlockhashQueue::new(100);
hash_queue.register_hash(&tx.message().recent_blockhash, lamports_per_signature);
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
for ka in ka.iter() {
accounts.store_slow_uncached(0, &ka.0, &ka.1);
}
let ancestors = vec![(0, 0)].into_iter().collect();
let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(tx);
accounts.load_accounts(
&ancestors,
&[sanitized_tx],
vec![(Ok(()), None)],
&hash_queue,
error_counters,
rent_collector,
feature_set,
fee_structure,
)
}
fn load_accounts_with_fee(
tx: Transaction,
ka: &[TransactionAccount],
lamports_per_signature: u64,
error_counters: &mut ErrorCounters,
) -> Vec<TransactionLoadResult> {
load_accounts_with_fee_and_rent(
tx,
ka,
lamports_per_signature,
&RentCollector::default(),
error_counters,
&FeatureSet::all_enabled(),
&FeeStructure::default(),
)
}
fn load_accounts(
tx: Transaction,
ka: &[TransactionAccount],
error_counters: &mut ErrorCounters,
) -> Vec<TransactionLoadResult> {
load_accounts_with_fee(tx, ka, 0, error_counters)
}
#[test]
fn test_hold_range_in_memory() {
let accts = Accounts::default_for_tests();
let range = Pubkey::new(&[0; 32])..=Pubkey::new(&[0xff; 32]);
accts.hold_range_in_memory(&range, true, &test_thread_pool());
accts.hold_range_in_memory(&range, false, &test_thread_pool());
accts.hold_range_in_memory(&range, true, &test_thread_pool());
accts.hold_range_in_memory(&range, true, &test_thread_pool());
accts.hold_range_in_memory(&range, false, &test_thread_pool());
accts.hold_range_in_memory(&range, false, &test_thread_pool());
}
#[test]
fn test_hold_range_in_memory2() {
let accts = Accounts::default_for_tests();
let range = Pubkey::new(&[0; 32])..=Pubkey::new(&[0xff; 32]);
let idx = &accts.accounts_db.accounts_index;
let bins = idx.account_maps.len();
// use bins * 2 to get the first half of the range within bin 0
let bins_2 = bins * 2;
let binner = crate::pubkey_bins::PubkeyBinCalculator24::new(bins_2);
let range2 =
binner.lowest_pubkey_from_bin(0, bins_2)..binner.lowest_pubkey_from_bin(1, bins_2);
let range2_inclusive = range2.start..=range2.end;
assert_eq!(0, idx.bin_calculator.bin_from_pubkey(&range2.start));
assert_eq!(0, idx.bin_calculator.bin_from_pubkey(&range2.end));
accts.hold_range_in_memory(&range, true, &test_thread_pool());
idx.account_maps.iter().enumerate().for_each(|(_bin, map)| {
let map = map.read().unwrap();
assert_eq!(
map.cache_ranges_held.read().unwrap().to_vec(),
vec![range.clone()]
);
});
accts.hold_range_in_memory(&range2, true, &test_thread_pool());
idx.account_maps.iter().enumerate().for_each(|(bin, map)| {
let map = map.read().unwrap();
let expected = if bin == 0 {
vec![range.clone(), range2_inclusive.clone()]
} else {
vec![range.clone()]
};
assert_eq!(
map.cache_ranges_held.read().unwrap().to_vec(),
expected,
"bin: {}",
bin
);
});
accts.hold_range_in_memory(&range, false, &test_thread_pool());
accts.hold_range_in_memory(&range2, false, &test_thread_pool());
}
fn test_thread_pool() -> rayon::ThreadPool {
crate::accounts_db::make_min_priority_thread_pool()
}
#[test]
fn test_load_accounts_no_account_0_exists() {
let accounts: Vec<TransactionAccount> = Vec::new();
let mut error_counters = ErrorCounters::default();
let keypair = Keypair::new();
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
let tx = Transaction::new_with_compiled_instructions(
&[&keypair],
&[],
Hash::default(),
vec![native_loader::id()],
instructions,
);
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.account_not_found, 1);
assert_eq!(loaded_accounts.len(), 1);
assert_eq!(
loaded_accounts[0],
(Err(TransactionError::AccountNotFound), None,),
);
}
#[test]
fn test_load_accounts_unknown_program_id() {
let mut accounts: Vec<TransactionAccount> = Vec::new();
let mut error_counters = ErrorCounters::default();
let keypair = Keypair::new();
let key0 = keypair.pubkey();
let key1 = Pubkey::new(&[5u8; 32]);
let account = AccountSharedData::new(1, 0, &Pubkey::default());
accounts.push((key0, account));
let account = AccountSharedData::new(2, 1, &Pubkey::default());
accounts.push((key1, account));
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
let tx = Transaction::new_with_compiled_instructions(
&[&keypair],
&[],
Hash::default(),
vec![Pubkey::default()],
instructions,
);
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.account_not_found, 1);
assert_eq!(loaded_accounts.len(), 1);
assert_eq!(
loaded_accounts[0],
(Err(TransactionError::ProgramAccountNotFound), None,)
);
}
#[test]
fn test_load_accounts_insufficient_funds() {
let mut accounts: Vec<TransactionAccount> = Vec::new();
let mut error_counters = ErrorCounters::default();
let keypair = Keypair::new();
let key0 = keypair.pubkey();
let account = AccountSharedData::new(1, 0, &Pubkey::default());
accounts.push((key0, account));
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
let tx = Transaction::new_with_compiled_instructions(
&[&keypair],
&[],
Hash::default(),
vec![native_loader::id()],
instructions,
);
let fee = Bank::calculate_fee(
&SanitizedMessage::try_from(tx.message().clone()).unwrap(),
10,
&FeeStructure::default(),
false,
);
assert_eq!(fee, 10);
let loaded_accounts = load_accounts_with_fee(tx, &accounts, 10, &mut error_counters);
assert_eq!(error_counters.insufficient_funds, 1);
assert_eq!(loaded_accounts.len(), 1);
assert_eq!(
loaded_accounts[0].clone(),
(Err(TransactionError::InsufficientFundsForFee), None,),
);
}
#[test]
fn test_load_accounts_invalid_account_for_fee() {
let mut accounts: Vec<TransactionAccount> = Vec::new();
let mut error_counters = ErrorCounters::default();
let keypair = Keypair::new();
let key0 = keypair.pubkey();
let account = AccountSharedData::new(1, 1, &solana_sdk::pubkey::new_rand()); // <-- owner is not the system program
accounts.push((key0, account));
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
let tx = Transaction::new_with_compiled_instructions(
&[&keypair],
&[],
Hash::default(),
vec![native_loader::id()],
instructions,
);
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.invalid_account_for_fee, 1);
assert_eq!(loaded_accounts.len(), 1);
assert_eq!(
loaded_accounts[0],
(Err(TransactionError::InvalidAccountForFee), None,),
);
}
#[test]
fn test_load_accounts_fee_payer_is_nonce() {
let mut error_counters = ErrorCounters::default();
let mut feature_set = FeatureSet::all_enabled();
feature_set.deactivate(&tx_wide_compute_cap::id());
let rent_collector = RentCollector::new(
0,
&EpochSchedule::default(),
500_000.0,
&Rent {
lamports_per_byte_year: 42,
..Rent::default()
},
);
let min_balance = rent_collector.rent.minimum_balance(NonceState::size());
let nonce = Keypair::new();
let mut accounts = vec![(
nonce.pubkey(),
AccountSharedData::new_data(
min_balance * 2,
&NonceVersions::new_current(NonceState::Initialized(nonce::state::Data::default())),
&system_program::id(),
)
.unwrap(),
)];
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
let tx = Transaction::new_with_compiled_instructions(
&[&nonce],
&[],
Hash::default(),
vec![native_loader::id()],
instructions,
);
// Fee leaves min_balance balance succeeds
let loaded_accounts = load_accounts_with_fee_and_rent(
tx.clone(),
&accounts,
min_balance,
&rent_collector,
&mut error_counters,
&feature_set,
&FeeStructure::default(),
);
assert_eq!(loaded_accounts.len(), 1);
let (load_res, _nonce) = &loaded_accounts[0];
let loaded_transaction = load_res.as_ref().unwrap();
assert_eq!(loaded_transaction.accounts[0].1.lamports(), min_balance);
// Fee leaves zero balance fails
accounts[0].1.set_lamports(min_balance);
let loaded_accounts = load_accounts_with_fee_and_rent(
tx.clone(),
&accounts,
min_balance,
&rent_collector,
&mut error_counters,
&feature_set,
&FeeStructure::default(),
);
assert_eq!(loaded_accounts.len(), 1);
let (load_res, _nonce) = &loaded_accounts[0];
assert_eq!(*load_res, Err(TransactionError::InsufficientFundsForFee));
// Fee leaves non-zero, but sub-min_balance balance fails
accounts[0].1.set_lamports(3 * min_balance / 2);
let loaded_accounts = load_accounts_with_fee_and_rent(
tx,
&accounts,
min_balance,
&rent_collector,
&mut error_counters,
&feature_set,
&FeeStructure::default(),
);
assert_eq!(loaded_accounts.len(), 1);
let (load_res, _nonce) = &loaded_accounts[0];
assert_eq!(*load_res, Err(TransactionError::InsufficientFundsForFee));
}
#[test]
fn test_load_accounts_no_loaders() {
let mut accounts: Vec<TransactionAccount> = Vec::new();
let mut error_counters = ErrorCounters::default();
let keypair = Keypair::new();
let key0 = keypair.pubkey();
let key1 = Pubkey::new(&[5u8; 32]);
let mut account = AccountSharedData::new(1, 0, &Pubkey::default());
account.set_rent_epoch(1);
accounts.push((key0, account));
let mut account = AccountSharedData::new(2, 1, &Pubkey::default());
account.set_rent_epoch(1);
accounts.push((key1, account));
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let tx = Transaction::new_with_compiled_instructions(
&[&keypair],
&[key1],
Hash::default(),
vec![native_loader::id()],
instructions,
);
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.account_not_found, 0);
assert_eq!(loaded_accounts.len(), 1);
match &loaded_accounts[0] {
(Ok(loaded_transaction), _nonce) => {
assert_eq!(loaded_transaction.accounts.len(), 3);
assert_eq!(loaded_transaction.accounts[0].1, accounts[0].1);
assert_eq!(loaded_transaction.program_indices.len(), 1);
assert_eq!(loaded_transaction.program_indices[0].len(), 0);
}
(Err(e), _nonce) => Err(e).unwrap(),
}
}
#[test]
fn test_load_accounts_max_call_depth() {
let mut accounts: Vec<TransactionAccount> = Vec::new();
let mut error_counters = ErrorCounters::default();
let keypair = Keypair::new();
let key0 = keypair.pubkey();
let key1 = Pubkey::new(&[5u8; 32]);
let key2 = Pubkey::new(&[6u8; 32]);
let key3 = Pubkey::new(&[7u8; 32]);
let key4 = Pubkey::new(&[8u8; 32]);
let key5 = Pubkey::new(&[9u8; 32]);
let key6 = Pubkey::new(&[10u8; 32]);
let account = AccountSharedData::new(1, 0, &Pubkey::default());
accounts.push((key0, account));
let mut account = AccountSharedData::new(40, 1, &Pubkey::default());
account.set_executable(true);
account.set_owner(native_loader::id());
accounts.push((key1, account));
let mut account = AccountSharedData::new(41, 1, &Pubkey::default());
account.set_executable(true);
account.set_owner(key1);
accounts.push((key2, account));
let mut account = AccountSharedData::new(42, 1, &Pubkey::default());
account.set_executable(true);
account.set_owner(key2);
accounts.push((key3, account));
let mut account = AccountSharedData::new(43, 1, &Pubkey::default());
account.set_executable(true);
account.set_owner(key3);
accounts.push((key4, account));
let mut account = AccountSharedData::new(44, 1, &Pubkey::default());
account.set_executable(true);
account.set_owner(key4);
accounts.push((key5, account));
let mut account = AccountSharedData::new(45, 1, &Pubkey::default());
account.set_executable(true);
account.set_owner(key5);
accounts.push((key6, account));
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
let tx = Transaction::new_with_compiled_instructions(
&[&keypair],
&[],
Hash::default(),
vec![key6],
instructions,
);
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.call_chain_too_deep, 1);
assert_eq!(loaded_accounts.len(), 1);
assert_eq!(
loaded_accounts[0],
(Err(TransactionError::CallChainTooDeep), None,)
);
}
#[test]
fn test_load_accounts_bad_owner() {
let mut accounts: Vec<TransactionAccount> = Vec::new();
let mut error_counters = ErrorCounters::default();
let keypair = Keypair::new();
let key0 = keypair.pubkey();
let key1 = Pubkey::new(&[5u8; 32]);
let account = AccountSharedData::new(1, 0, &Pubkey::default());
accounts.push((key0, account));
let mut account = AccountSharedData::new(40, 1, &Pubkey::default());
account.set_executable(true);
accounts.push((key1, account));
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
let tx = Transaction::new_with_compiled_instructions(
&[&keypair],
&[],
Hash::default(),
vec![key1],
instructions,
);
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.account_not_found, 1);
assert_eq!(loaded_accounts.len(), 1);
assert_eq!(
loaded_accounts[0],
(Err(TransactionError::ProgramAccountNotFound), None,)
);
}
#[test]
fn test_load_accounts_not_executable() {
let mut accounts: Vec<TransactionAccount> = Vec::new();
let mut error_counters = ErrorCounters::default();
let keypair = Keypair::new();
let key0 = keypair.pubkey();
let key1 = Pubkey::new(&[5u8; 32]);
let account = AccountSharedData::new(1, 0, &Pubkey::default());
accounts.push((key0, account));
let account = AccountSharedData::new(40, 1, &native_loader::id());
accounts.push((key1, account));
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
let tx = Transaction::new_with_compiled_instructions(
&[&keypair],
&[],
Hash::default(),
vec![key1],
instructions,
);
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.invalid_program_for_execution, 1);
assert_eq!(loaded_accounts.len(), 1);
assert_eq!(
loaded_accounts[0],
(Err(TransactionError::InvalidProgramForExecution), None,)
);
}
#[test]
fn test_load_accounts_multiple_loaders() {
let mut accounts: Vec<TransactionAccount> = Vec::new();
let mut error_counters = ErrorCounters::default();
let keypair = Keypair::new();
let key0 = keypair.pubkey();
let key1 = Pubkey::new(&[5u8; 32]);
let key2 = Pubkey::new(&[6u8; 32]);
let mut account = AccountSharedData::new(1, 0, &Pubkey::default());
account.set_rent_epoch(1);
accounts.push((key0, account));
let mut account = AccountSharedData::new(40, 1, &Pubkey::default());
account.set_executable(true);
account.set_rent_epoch(1);
account.set_owner(native_loader::id());
accounts.push((key1, account));
let mut account = AccountSharedData::new(41, 1, &Pubkey::default());
account.set_executable(true);
account.set_rent_epoch(1);
account.set_owner(key1);
accounts.push((key2, account));
let instructions = vec![
CompiledInstruction::new(1, &(), vec![0]),
CompiledInstruction::new(2, &(), vec![0]),
];
let tx = Transaction::new_with_compiled_instructions(
&[&keypair],
&[],
Hash::default(),
vec![key1, key2],
instructions,
);
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.account_not_found, 0);
assert_eq!(loaded_accounts.len(), 1);
match &loaded_accounts[0] {
(Ok(loaded_transaction), _nonce) => {
assert_eq!(loaded_transaction.accounts.len(), 6);
assert_eq!(loaded_transaction.accounts[0].1, accounts[0].1);
assert_eq!(loaded_transaction.program_indices.len(), 2);
assert_eq!(loaded_transaction.program_indices[0].len(), 1);
assert_eq!(loaded_transaction.program_indices[1].len(), 2);
for program_indices in loaded_transaction.program_indices.iter() {
for (i, program_index) in program_indices.iter().enumerate() {
// +1 to skip first not loader account
assert_eq!(
loaded_transaction.accounts[*program_index].0,
accounts[i + 1].0
);
assert_eq!(
loaded_transaction.accounts[*program_index].1,
accounts[i + 1].1
);
}
}
}
(Err(e), _nonce) => Err(e).unwrap(),
}
}
#[test]
fn test_load_lookup_table_addresses_account_not_found() {
let ancestors = vec![(0, 0)].into_iter().collect();
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
let invalid_table_key = Pubkey::new_unique();
let address_table_lookup = MessageAddressTableLookup {
account_key: invalid_table_key,
writable_indexes: vec![],
readonly_indexes: vec![],
};
assert_eq!(
accounts.load_lookup_table_addresses(
&ancestors,
&address_table_lookup,
&SlotHashes::default(),
),
Err(AddressLookupError::LookupTableAccountNotFound),
);
}
#[test]
fn test_load_lookup_table_addresses_invalid_account_owner() {
let ancestors = vec![(0, 0)].into_iter().collect();
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
let invalid_table_key = Pubkey::new_unique();
let invalid_table_account = AccountSharedData::default();
accounts.store_slow_uncached(0, &invalid_table_key, &invalid_table_account);
let address_table_lookup = MessageAddressTableLookup {
account_key: invalid_table_key,
writable_indexes: vec![],
readonly_indexes: vec![],
};
assert_eq!(
accounts.load_lookup_table_addresses(
&ancestors,
&address_table_lookup,
&SlotHashes::default(),
),
Err(AddressLookupError::InvalidAccountOwner),
);
}
#[test]
fn test_load_lookup_table_addresses_invalid_account_data() {
let ancestors = vec![(0, 0)].into_iter().collect();
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
let invalid_table_key = Pubkey::new_unique();
let invalid_table_account =
AccountSharedData::new(1, 0, &solana_address_lookup_table_program::id());
accounts.store_slow_uncached(0, &invalid_table_key, &invalid_table_account);
let address_table_lookup = MessageAddressTableLookup {
account_key: invalid_table_key,
writable_indexes: vec![],
readonly_indexes: vec![],
};
assert_eq!(
accounts.load_lookup_table_addresses(
&ancestors,
&address_table_lookup,
&SlotHashes::default(),
),
Err(AddressLookupError::InvalidAccountData),
);
}
#[test]
fn test_load_lookup_table_addresses() {
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
let table_key = Pubkey::new_unique();
let table_addresses = vec![Pubkey::new_unique(), Pubkey::new_unique()];
let table_account = {
let table_state = AddressLookupTable {
meta: LookupTableMeta::default(),
addresses: Cow::Owned(table_addresses.clone()),
};
AccountSharedData::create(
1,
table_state.serialize_for_tests().unwrap(),
solana_address_lookup_table_program::id(),
false,
0,
)
};
accounts.store_slow_uncached(0, &table_key, &table_account);
let address_table_lookup = MessageAddressTableLookup {
account_key: table_key,
writable_indexes: vec![0],
readonly_indexes: vec![1],
};
assert_eq!(
accounts.load_lookup_table_addresses(
&ancestors,
&address_table_lookup,
&SlotHashes::default(),
),
Ok(LoadedAddresses {
writable: vec![table_addresses[0]],
readonly: vec![table_addresses[1]],
}),
);
}
#[test]
fn test_load_by_program_slot() {
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
// Load accounts owned by various programs into AccountsDb
let pubkey0 = solana_sdk::pubkey::new_rand();
let account0 = AccountSharedData::new(1, 0, &Pubkey::new(&[2; 32]));
accounts.store_slow_uncached(0, &pubkey0, &account0);
let pubkey1 = solana_sdk::pubkey::new_rand();
let account1 = AccountSharedData::new(1, 0, &Pubkey::new(&[2; 32]));
accounts.store_slow_uncached(0, &pubkey1, &account1);
let pubkey2 = solana_sdk::pubkey::new_rand();
let account2 = AccountSharedData::new(1, 0, &Pubkey::new(&[3; 32]));
accounts.store_slow_uncached(0, &pubkey2, &account2);
let loaded = accounts.load_by_program_slot(0, Some(&Pubkey::new(&[2; 32])));
assert_eq!(loaded.len(), 2);
let loaded = accounts.load_by_program_slot(0, Some(&Pubkey::new(&[3; 32])));
assert_eq!(loaded, vec![(pubkey2, account2)]);
let loaded = accounts.load_by_program_slot(0, Some(&Pubkey::new(&[4; 32])));
assert_eq!(loaded, vec![]);
}
#[test]
fn test_load_accounts_executable_with_write_lock() {
let mut accounts: Vec<TransactionAccount> = Vec::new();
let mut error_counters = ErrorCounters::default();
let keypair = Keypair::new();
let key0 = keypair.pubkey();
let key1 = Pubkey::new(&[5u8; 32]);
let key2 = Pubkey::new(&[6u8; 32]);
let mut account = AccountSharedData::new(1, 0, &Pubkey::default());
account.set_rent_epoch(1);
accounts.push((key0, account));
let mut account = AccountSharedData::new(40, 1, &native_loader::id());
account.set_executable(true);
account.set_rent_epoch(1);
accounts.push((key1, account));
let mut account = AccountSharedData::new(40, 1, &native_loader::id());
account.set_executable(true);
account.set_rent_epoch(1);
accounts.push((key2, account));
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let mut message = Message::new_with_compiled_instructions(
1,
0,
1, // only one executable marked as readonly
vec![key0, key1, key2],
Hash::default(),
instructions,
);
let tx = Transaction::new(&[&keypair], message.clone(), Hash::default());
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.invalid_writable_account, 1);
assert_eq!(loaded_accounts.len(), 1);
assert_eq!(
loaded_accounts[0],
(Err(TransactionError::InvalidWritableAccount), None)
);
// Mark executables as readonly
message.account_keys = vec![key0, key1, key2]; // revert key change
message.header.num_readonly_unsigned_accounts = 2; // mark both executables as readonly
let tx = Transaction::new(&[&keypair], message, Hash::default());
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.invalid_writable_account, 1);
assert_eq!(loaded_accounts.len(), 1);
let result = loaded_accounts[0].0.as_ref().unwrap();
assert_eq!(result.accounts[..2], accounts[..2]);
assert_eq!(result.accounts[result.program_indices[0][0]], accounts[2]);
}
#[test]
fn test_load_accounts_upgradeable_with_write_lock() {
let mut accounts: Vec<TransactionAccount> = Vec::new();
let mut error_counters = ErrorCounters::default();
let keypair = Keypair::new();
let key0 = keypair.pubkey();
let key1 = Pubkey::new(&[5u8; 32]);
let key2 = Pubkey::new(&[6u8; 32]);
let programdata_key1 = Pubkey::new(&[7u8; 32]);
let programdata_key2 = Pubkey::new(&[8u8; 32]);
let mut account = AccountSharedData::new(1, 0, &Pubkey::default());
account.set_rent_epoch(1);
accounts.push((key0, account));
let program_data = UpgradeableLoaderState::ProgramData {
slot: 42,
upgrade_authority_address: None,
};
let program = UpgradeableLoaderState::Program {
programdata_address: programdata_key1,
};
let mut account =
AccountSharedData::new_data(40, &program, &bpf_loader_upgradeable::id()).unwrap();
account.set_executable(true);
account.set_rent_epoch(1);
accounts.push((key1, account));
let mut account =
AccountSharedData::new_data(40, &program_data, &bpf_loader_upgradeable::id()).unwrap();
account.set_rent_epoch(1);
accounts.push((programdata_key1, account));
let program = UpgradeableLoaderState::Program {
programdata_address: programdata_key2,
};
let mut account =
AccountSharedData::new_data(40, &program, &bpf_loader_upgradeable::id()).unwrap();
account.set_executable(true);
account.set_rent_epoch(1);
accounts.push((key2, account));
let mut account =
AccountSharedData::new_data(40, &program_data, &bpf_loader_upgradeable::id()).unwrap();
account.set_rent_epoch(1);
accounts.push((programdata_key2, account));
let mut account = AccountSharedData::new(40, 1, &native_loader::id()); // create mock bpf_loader_upgradeable
account.set_executable(true);
account.set_rent_epoch(1);
accounts.push((bpf_loader_upgradeable::id(), account));
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let mut message = Message::new_with_compiled_instructions(
1,
0,
1, // only one executable marked as readonly
vec![key0, key1, key2],
Hash::default(),
instructions,
);
let tx = Transaction::new(&[&keypair], message.clone(), Hash::default());
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.invalid_writable_account, 1);
assert_eq!(loaded_accounts.len(), 1);
assert_eq!(
loaded_accounts[0],
(Err(TransactionError::InvalidWritableAccount), None)
);
// Solution 1: include bpf_loader_upgradeable account
message.account_keys = vec![key0, key1, bpf_loader_upgradeable::id()];
let tx = Transaction::new(&[&keypair], message.clone(), Hash::default());
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.invalid_writable_account, 1);
assert_eq!(loaded_accounts.len(), 1);
let result = loaded_accounts[0].0.as_ref().unwrap();
assert_eq!(result.accounts[..2], accounts[..2]);
assert_eq!(result.accounts[result.program_indices[0][0]], accounts[5]);
// Solution 2: mark programdata as readonly
message.account_keys = vec![key0, key1, key2]; // revert key change
message.header.num_readonly_unsigned_accounts = 2; // mark both executables as readonly
let tx = Transaction::new(&[&keypair], message, Hash::default());
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.invalid_writable_account, 1);
assert_eq!(loaded_accounts.len(), 1);
let result = loaded_accounts[0].0.as_ref().unwrap();
assert_eq!(result.accounts[..2], accounts[..2]);
assert_eq!(result.accounts[result.program_indices[0][0]], accounts[5]);
assert_eq!(result.accounts[result.program_indices[0][1]], accounts[4]);
assert_eq!(result.accounts[result.program_indices[0][2]], accounts[3]);
}
#[test]
fn test_load_accounts_programdata_with_write_lock() {
let mut accounts: Vec<TransactionAccount> = Vec::new();
let mut error_counters = ErrorCounters::default();
let keypair = Keypair::new();
let key0 = keypair.pubkey();
let key1 = Pubkey::new(&[5u8; 32]);
let key2 = Pubkey::new(&[6u8; 32]);
let mut account = AccountSharedData::new(1, 0, &Pubkey::default());
account.set_rent_epoch(1);
accounts.push((key0, account));
let program_data = UpgradeableLoaderState::ProgramData {
slot: 42,
upgrade_authority_address: None,
};
let mut account =
AccountSharedData::new_data(40, &program_data, &bpf_loader_upgradeable::id()).unwrap();
account.set_rent_epoch(1);
accounts.push((key1, account));
let mut account = AccountSharedData::new(40, 1, &native_loader::id());
account.set_executable(true);
account.set_rent_epoch(1);
accounts.push((key2, account));
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let mut message = Message::new_with_compiled_instructions(
1,
0,
1, // only the program marked as readonly
vec![key0, key1, key2],
Hash::default(),
instructions,
);
let tx = Transaction::new(&[&keypair], message.clone(), Hash::default());
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.invalid_writable_account, 1);
assert_eq!(loaded_accounts.len(), 1);
assert_eq!(
loaded_accounts[0],
(Err(TransactionError::InvalidWritableAccount), None)
);
// Solution 1: include bpf_loader_upgradeable account
let mut account = AccountSharedData::new(40, 1, &native_loader::id()); // create mock bpf_loader_upgradeable
account.set_executable(true);
account.set_rent_epoch(1);
let accounts_with_upgradeable_loader = vec![
accounts[0].clone(),
accounts[1].clone(),
(bpf_loader_upgradeable::id(), account),
];
message.account_keys = vec![key0, key1, bpf_loader_upgradeable::id()];
let tx = Transaction::new(&[&keypair], message.clone(), Hash::default());
let loaded_accounts =
load_accounts(tx, &accounts_with_upgradeable_loader, &mut error_counters);
assert_eq!(error_counters.invalid_writable_account, 1);
assert_eq!(loaded_accounts.len(), 1);
let result = loaded_accounts[0].0.as_ref().unwrap();
assert_eq!(result.accounts[..2], accounts_with_upgradeable_loader[..2]);
assert_eq!(
result.accounts[result.program_indices[0][0]],
accounts_with_upgradeable_loader[2]
);
// Solution 2: mark programdata as readonly
message.account_keys = vec![key0, key1, key2]; // revert key change
message.header.num_readonly_unsigned_accounts = 2; // extend readonly set to include programdata
let tx = Transaction::new(&[&keypair], message, Hash::default());
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
assert_eq!(error_counters.invalid_writable_account, 1);
assert_eq!(loaded_accounts.len(), 1);
let result = loaded_accounts[0].0.as_ref().unwrap();
assert_eq!(result.accounts[..2], accounts[..2]);
assert_eq!(result.accounts[result.program_indices[0][0]], accounts[2]);
}
#[test]
fn test_accounts_account_not_found() {
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
let mut error_counters = ErrorCounters::default();
let ancestors = vec![(0, 0)].into_iter().collect();
let keypair = Keypair::new();
let mut account = AccountSharedData::new(1, 0, &Pubkey::default());
account.set_executable(true);
accounts.store_slow_uncached(0, &keypair.pubkey(), &account);
assert_eq!(
accounts.load_executable_accounts(
&ancestors,
&mut vec![(keypair.pubkey(), account)],
0,
&mut error_counters,
),
Err(TransactionError::ProgramAccountNotFound)
);
assert_eq!(error_counters.account_not_found, 1);
}
#[test]
#[should_panic]
fn test_accounts_empty_bank_hash() {
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
accounts.bank_hash_at(1);
}
#[test]
fn test_lock_accounts_with_duplicates() {
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
let keypair = Keypair::new();
let message = Message {
header: MessageHeader {
num_required_signatures: 1,
..MessageHeader::default()
},
account_keys: vec![keypair.pubkey(), keypair.pubkey()],
..Message::default()
};
let tx = new_sanitized_tx(&[&keypair], message, Hash::default());
let results = accounts.lock_accounts([tx].iter(), &FeatureSet::all_enabled());
assert_eq!(results[0], Err(TransactionError::AccountLoadedTwice));
}
#[test]
fn test_lock_accounts_with_too_many_accounts() {
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
let keypair = Keypair::new();
// Allow up to MAX_TX_ACCOUNT_LOCKS
{
let num_account_keys = MAX_TX_ACCOUNT_LOCKS;
let mut account_keys: Vec<_> = (0..num_account_keys)
.map(|_| Pubkey::new_unique())
.collect();
account_keys[0] = keypair.pubkey();
let message = Message {
header: MessageHeader {
num_required_signatures: 1,
..MessageHeader::default()
},
account_keys,
..Message::default()
};
let txs = vec![new_sanitized_tx(&[&keypair], message, Hash::default())];
let results = accounts.lock_accounts(txs.iter(), &FeatureSet::all_enabled());
assert_eq!(results[0], Ok(()));
accounts.unlock_accounts(txs.iter(), &results);
}
// Allow over MAX_TX_ACCOUNT_LOCKS before feature activation
{
let num_account_keys = MAX_TX_ACCOUNT_LOCKS + 1;
let mut account_keys: Vec<_> = (0..num_account_keys)
.map(|_| Pubkey::new_unique())
.collect();
account_keys[0] = keypair.pubkey();
let message = Message {
header: MessageHeader {
num_required_signatures: 1,
..MessageHeader::default()
},
account_keys,
..Message::default()
};
let txs = vec![new_sanitized_tx(&[&keypair], message, Hash::default())];
let results = accounts.lock_accounts(txs.iter(), &FeatureSet::default());
assert_eq!(results[0], Ok(()));
accounts.unlock_accounts(txs.iter(), &results);
}
// Disallow over MAX_TX_ACCOUNT_LOCKS after feature activation
{
let num_account_keys = MAX_TX_ACCOUNT_LOCKS + 1;
let mut account_keys: Vec<_> = (0..num_account_keys)
.map(|_| Pubkey::new_unique())
.collect();
account_keys[0] = keypair.pubkey();
let message = Message {
header: MessageHeader {
num_required_signatures: 1,
..MessageHeader::default()
},
account_keys,
..Message::default()
};
let txs = vec![new_sanitized_tx(&[&keypair], message, Hash::default())];
let results = accounts.lock_accounts(txs.iter(), &FeatureSet::all_enabled());
assert_eq!(results[0], Err(TransactionError::TooManyAccountLocks));
}
}
#[test]
fn test_accounts_locks() {
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let account0 = AccountSharedData::new(1, 0, &Pubkey::default());
let account1 = AccountSharedData::new(2, 0, &Pubkey::default());
let account2 = AccountSharedData::new(3, 0, &Pubkey::default());
let account3 = AccountSharedData::new(4, 0, &Pubkey::default());
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
accounts.store_slow_uncached(0, &keypair0.pubkey(), &account0);
accounts.store_slow_uncached(0, &keypair1.pubkey(), &account1);
accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2);
accounts.store_slow_uncached(0, &keypair3.pubkey(), &account3);
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let message = Message::new_with_compiled_instructions(
1,
0,
2,
vec![keypair0.pubkey(), keypair1.pubkey(), native_loader::id()],
Hash::default(),
instructions,
);
let tx = new_sanitized_tx(&[&keypair0], message, Hash::default());
let results0 = accounts.lock_accounts([tx.clone()].iter(), &FeatureSet::all_enabled());
assert!(results0[0].is_ok());
assert_eq!(
*accounts
.account_locks
.lock()
.unwrap()
.readonly_locks
.get(&keypair1.pubkey())
.unwrap(),
1
);
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let message = Message::new_with_compiled_instructions(
1,
0,
2,
vec![keypair2.pubkey(), keypair1.pubkey(), native_loader::id()],
Hash::default(),
instructions,
);
let tx0 = new_sanitized_tx(&[&keypair2], message, Hash::default());
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let message = Message::new_with_compiled_instructions(
1,
0,
2,
vec![keypair1.pubkey(), keypair3.pubkey(), native_loader::id()],
Hash::default(),
instructions,
);
let tx1 = new_sanitized_tx(&[&keypair1], message, Hash::default());
let txs = vec![tx0, tx1];
let results1 = accounts.lock_accounts(txs.iter(), &FeatureSet::all_enabled());
assert!(results1[0].is_ok()); // Read-only account (keypair1) can be referenced multiple times
assert!(results1[1].is_err()); // Read-only account (keypair1) cannot also be locked as writable
assert_eq!(
*accounts
.account_locks
.lock()
.unwrap()
.readonly_locks
.get(&keypair1.pubkey())
.unwrap(),
2
);
accounts.unlock_accounts([tx].iter(), &results0);
accounts.unlock_accounts(txs.iter(), &results1);
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let message = Message::new_with_compiled_instructions(
1,
0,
2,
vec![keypair1.pubkey(), keypair3.pubkey(), native_loader::id()],
Hash::default(),
instructions,
);
let tx = new_sanitized_tx(&[&keypair1], message, Hash::default());
let results2 = accounts.lock_accounts([tx].iter(), &FeatureSet::all_enabled());
assert!(results2[0].is_ok()); // Now keypair1 account can be locked as writable
// Check that read-only lock with zero references is deleted
assert!(accounts
.account_locks
.lock()
.unwrap()
.readonly_locks
.get(&keypair1.pubkey())
.is_none());
}
#[test]
fn test_accounts_locks_multithreaded() {
let counter = Arc::new(AtomicU64::new(0));
let exit = Arc::new(AtomicBool::new(false));
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let account0 = AccountSharedData::new(1, 0, &Pubkey::default());
let account1 = AccountSharedData::new(2, 0, &Pubkey::default());
let account2 = AccountSharedData::new(3, 0, &Pubkey::default());
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
accounts.store_slow_uncached(0, &keypair0.pubkey(), &account0);
accounts.store_slow_uncached(0, &keypair1.pubkey(), &account1);
accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2);
let accounts_arc = Arc::new(accounts);
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let readonly_message = Message::new_with_compiled_instructions(
1,
0,
2,
vec![keypair0.pubkey(), keypair1.pubkey(), native_loader::id()],
Hash::default(),
instructions,
);
let readonly_tx = new_sanitized_tx(&[&keypair0], readonly_message, Hash::default());
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let writable_message = Message::new_with_compiled_instructions(
1,
0,
2,
vec![keypair1.pubkey(), keypair2.pubkey(), native_loader::id()],
Hash::default(),
instructions,
);
let writable_tx = new_sanitized_tx(&[&keypair1], writable_message, Hash::default());
let counter_clone = counter.clone();
let accounts_clone = accounts_arc.clone();
let exit_clone = exit.clone();
thread::spawn(move || {
let counter_clone = counter_clone.clone();
let exit_clone = exit_clone.clone();
loop {
let txs = vec![writable_tx.clone()];
let results = accounts_clone
.clone()
.lock_accounts(txs.iter(), &FeatureSet::all_enabled());
for result in results.iter() {
if result.is_ok() {
counter_clone.clone().fetch_add(1, Ordering::SeqCst);
}
}
accounts_clone.unlock_accounts(txs.iter(), &results);
if exit_clone.clone().load(Ordering::Relaxed) {
break;
}
}
});
let counter_clone = counter;
for _ in 0..5 {
let txs = vec![readonly_tx.clone()];
let results = accounts_arc
.clone()
.lock_accounts(txs.iter(), &FeatureSet::all_enabled());
if results[0].is_ok() {
let counter_value = counter_clone.clone().load(Ordering::SeqCst);
thread::sleep(time::Duration::from_millis(50));
assert_eq!(counter_value, counter_clone.clone().load(Ordering::SeqCst));
}
accounts_arc.unlock_accounts(txs.iter(), &results);
thread::sleep(time::Duration::from_millis(50));
}
exit.store(true, Ordering::Relaxed);
}
#[test]
fn test_demote_program_write_locks() {
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let account0 = AccountSharedData::new(1, 0, &Pubkey::default());
let account1 = AccountSharedData::new(2, 0, &Pubkey::default());
let account2 = AccountSharedData::new(3, 0, &Pubkey::default());
let account3 = AccountSharedData::new(4, 0, &Pubkey::default());
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
accounts.store_slow_uncached(0, &keypair0.pubkey(), &account0);
accounts.store_slow_uncached(0, &keypair1.pubkey(), &account1);
accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2);
accounts.store_slow_uncached(0, &keypair3.pubkey(), &account3);
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let message = Message::new_with_compiled_instructions(
1,
0,
0, // All accounts marked as writable
vec![keypair0.pubkey(), keypair1.pubkey(), native_loader::id()],
Hash::default(),
instructions,
);
let tx = new_sanitized_tx(&[&keypair0], message, Hash::default());
let results0 = accounts.lock_accounts([tx].iter(), &FeatureSet::all_enabled());
assert!(results0[0].is_ok());
// Instruction program-id account demoted to readonly
assert_eq!(
*accounts
.account_locks
.lock()
.unwrap()
.readonly_locks
.get(&native_loader::id())
.unwrap(),
1
);
// Non-program accounts remain writable
assert!(accounts
.account_locks
.lock()
.unwrap()
.write_locks
.contains(&keypair0.pubkey()));
assert!(accounts
.account_locks
.lock()
.unwrap()
.write_locks
.contains(&keypair1.pubkey()));
}
#[test]
fn test_accounts_locks_with_results() {
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let account0 = AccountSharedData::new(1, 0, &Pubkey::default());
let account1 = AccountSharedData::new(2, 0, &Pubkey::default());
let account2 = AccountSharedData::new(3, 0, &Pubkey::default());
let account3 = AccountSharedData::new(4, 0, &Pubkey::default());
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
accounts.store_slow_uncached(0, &keypair0.pubkey(), &account0);
accounts.store_slow_uncached(0, &keypair1.pubkey(), &account1);
accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2);
accounts.store_slow_uncached(0, &keypair3.pubkey(), &account3);
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let message = Message::new_with_compiled_instructions(
1,
0,
2,
vec![keypair1.pubkey(), keypair0.pubkey(), native_loader::id()],
Hash::default(),
instructions,
);
let tx0 = new_sanitized_tx(&[&keypair1], message, Hash::default());
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let message = Message::new_with_compiled_instructions(
1,
0,
2,
vec![keypair2.pubkey(), keypair0.pubkey(), native_loader::id()],
Hash::default(),
instructions,
);
let tx1 = new_sanitized_tx(&[&keypair2], message, Hash::default());
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let message = Message::new_with_compiled_instructions(
1,
0,
2,
vec![keypair3.pubkey(), keypair0.pubkey(), native_loader::id()],
Hash::default(),
instructions,
);
let tx2 = new_sanitized_tx(&[&keypair3], message, Hash::default());
let txs = vec![tx0, tx1, tx2];
let qos_results = vec![
Ok(()),
Err(TransactionError::WouldExceedMaxBlockCostLimit),
Ok(()),
];
let results = accounts.lock_accounts_with_results(
txs.iter(),
qos_results.into_iter(),
&FeatureSet::all_enabled(),
);
assert!(results[0].is_ok()); // Read-only account (keypair0) can be referenced multiple times
assert!(results[1].is_err()); // is not locked due to !qos_results[1].is_ok()
assert!(results[2].is_ok()); // Read-only account (keypair0) can be referenced multiple times
// verify that keypair0 read-only lock twice (for tx0 and tx2)
assert_eq!(
*accounts
.account_locks
.lock()
.unwrap()
.readonly_locks
.get(&keypair0.pubkey())
.unwrap(),
2
);
// verify that keypair2 (for tx1) is not write-locked
assert!(accounts
.account_locks
.lock()
.unwrap()
.write_locks
.get(&keypair2.pubkey())
.is_none());
accounts.unlock_accounts(txs.iter(), &results);
// check all locks to be removed
assert!(accounts
.account_locks
.lock()
.unwrap()
.readonly_locks
.is_empty());
assert!(accounts
.account_locks
.lock()
.unwrap()
.write_locks
.is_empty());
}
#[test]
fn test_collect_accounts_to_store() {
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let pubkey = solana_sdk::pubkey::new_rand();
let account0 = AccountSharedData::new(1, 0, &Pubkey::default());
let account1 = AccountSharedData::new(2, 0, &Pubkey::default());
let account2 = AccountSharedData::new(3, 0, &Pubkey::default());
let rent_collector = RentCollector::default();
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let message = Message::new_with_compiled_instructions(
1,
0,
2,
vec![keypair0.pubkey(), pubkey, native_loader::id()],
Hash::default(),
instructions,
);
let transaction_accounts0 = vec![
(message.account_keys[0], account0),
(message.account_keys[1], account2.clone()),
];
let tx0 = new_sanitized_tx(&[&keypair0], message, Hash::default());
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
let message = Message::new_with_compiled_instructions(
1,
0,
2,
vec![keypair1.pubkey(), pubkey, native_loader::id()],
Hash::default(),
instructions,
);
let transaction_accounts1 = vec![
(message.account_keys[0], account1),
(message.account_keys[1], account2),
];
let tx1 = new_sanitized_tx(&[&keypair1], message, Hash::default());
let loaded0 = (
Ok(LoadedTransaction {
accounts: transaction_accounts0,
program_indices: vec![],
rent: 0,
rent_debits: RentDebits::default(),
}),
None,
);
let loaded1 = (
Ok(LoadedTransaction {
accounts: transaction_accounts1,
program_indices: vec![],
rent: 0,
rent_debits: RentDebits::default(),
}),
None,
);
let mut loaded = vec![loaded0, loaded1];
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
{
accounts
.account_locks
.lock()
.unwrap()
.insert_new_readonly(&pubkey);
}
let txs = vec![tx0, tx1];
let execution_results = vec![new_execution_result(Ok(()), None); 2];
let collected_accounts = accounts.collect_accounts_to_store(
&txs,
&execution_results,
loaded.as_mut_slice(),
&rent_collector,
&Hash::default(),
0,
true, // leave_nonce_on_success
);
assert_eq!(collected_accounts.len(), 2);
assert!(collected_accounts
.iter()
.any(|(pubkey, _account)| *pubkey == &keypair0.pubkey()));
assert!(collected_accounts
.iter()
.any(|(pubkey, _account)| *pubkey == &keypair1.pubkey()));
// Ensure readonly_lock reflects lock
assert_eq!(
*accounts
.account_locks
.lock()
.unwrap()
.readonly_locks
.get(&pubkey)
.unwrap(),
1
);
}
#[test]
fn huge_clean() {
solana_logger::setup();
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
let mut old_pubkey = Pubkey::default();
let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
info!("storing..");
for i in 0..2_000 {
let pubkey = solana_sdk::pubkey::new_rand();
let account =
AccountSharedData::new((i + 1) as u64, 0, AccountSharedData::default().owner());
accounts.store_slow_uncached(i, &pubkey, &account);
accounts.store_slow_uncached(i, &old_pubkey, &zero_account);
old_pubkey = pubkey;
accounts.add_root(i);
if i % 1_000 == 0 {
info!(" store {}", i);
}
}
info!("done..cleaning..");
accounts.accounts_db.clean_accounts(None, false, None);
}
fn load_accounts_no_store(accounts: &Accounts, tx: Transaction) -> Vec<TransactionLoadResult> {
let tx = SanitizedTransaction::from_transaction_for_tests(tx);
let rent_collector = RentCollector::default();
let mut hash_queue = BlockhashQueue::new(100);
hash_queue.register_hash(tx.message().recent_blockhash(), 10);
let ancestors = vec![(0, 0)].into_iter().collect();
let mut error_counters = ErrorCounters::default();
accounts.load_accounts(
&ancestors,
&[tx],
vec![(Ok(()), None)],
&hash_queue,
&mut error_counters,
&rent_collector,
&FeatureSet::all_enabled(),
&FeeStructure::default(),
)
}
#[test]
fn test_instructions() {
solana_logger::setup();
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
let instructions_key = solana_sdk::sysvar::instructions::id();
let keypair = Keypair::new();
let instructions = vec![CompiledInstruction::new(1, &(), vec![0, 1])];
let tx = Transaction::new_with_compiled_instructions(
&[&keypair],
&[solana_sdk::pubkey::new_rand(), instructions_key],
Hash::default(),
vec![native_loader::id()],
instructions,
);
let loaded_accounts = load_accounts_no_store(&accounts, tx);
assert_eq!(loaded_accounts.len(), 1);
assert!(loaded_accounts[0].0.is_err());
}
fn create_accounts_prepare_if_nonce_account() -> (
Pubkey,
AccountSharedData,
AccountSharedData,
Hash,
u64,
Option<AccountSharedData>,
) {
let data =
NonceVersions::new_current(NonceState::Initialized(nonce::state::Data::default()));
let account = AccountSharedData::new_data(42, &data, &system_program::id()).unwrap();
let mut pre_account = account.clone();
pre_account.set_lamports(43);
(
Pubkey::default(),
pre_account,
account,
Hash::new(&[1u8; 32]),
1234,
None,
)
}
fn run_prepare_if_nonce_account_test(
account_address: &Pubkey,
account: &mut AccountSharedData,
tx_result: &Result<()>,
is_fee_payer: bool,
maybe_nonce: Option<(&NonceFull, bool)>,
blockhash: &Hash,
lamports_per_signature: u64,
expect_account: &AccountSharedData,
) -> bool {
// Verify expect_account's relationship
if !is_fee_payer {
match maybe_nonce {
Some((nonce, _)) if nonce.address() == account_address => {
assert_ne!(expect_account, nonce.account())
}
_ => assert_eq!(expect_account, account),
}
}
prepare_if_nonce_account(
account_address,
account,
tx_result,
is_fee_payer,
maybe_nonce,
blockhash,
lamports_per_signature,
);
assert_eq!(expect_account, account);
expect_account == account
}
#[test]
fn test_prepare_if_nonce_account_expected() {
let (
pre_account_address,
pre_account,
mut post_account,
blockhash,
lamports_per_signature,
maybe_fee_payer_account,
) = create_accounts_prepare_if_nonce_account();
let post_account_address = pre_account_address;
let nonce = NonceFull::new(
pre_account_address,
pre_account.clone(),
maybe_fee_payer_account,
);
let mut expect_account = pre_account;
expect_account
.set_state(&NonceVersions::new_current(NonceState::Initialized(
nonce::state::Data::new(Pubkey::default(), blockhash, lamports_per_signature),
)))
.unwrap();
assert!(run_prepare_if_nonce_account_test(
&post_account_address,
&mut post_account,
&Ok(()),
false,
Some((&nonce, true)),
&blockhash,
lamports_per_signature,
&expect_account,
));
}
#[test]
fn test_prepare_if_nonce_account_not_nonce_tx() {
let (
pre_account_address,
_pre_account,
_post_account,
blockhash,
lamports_per_signature,
_maybe_fee_payer_account,
) = create_accounts_prepare_if_nonce_account();
let post_account_address = pre_account_address;
let mut post_account = AccountSharedData::default();
let expect_account = post_account.clone();
assert!(run_prepare_if_nonce_account_test(
&post_account_address,
&mut post_account,
&Ok(()),
false,
None,
&blockhash,
lamports_per_signature,
&expect_account,
));
}
#[test]
fn test_prepare_if_nonce_account_not_nonce_address() {
let (
pre_account_address,
pre_account,
mut post_account,
blockhash,
lamports_per_signature,
maybe_fee_payer_account,
) = create_accounts_prepare_if_nonce_account();
let nonce = NonceFull::new(pre_account_address, pre_account, maybe_fee_payer_account);
let expect_account = post_account.clone();
// Wrong key
assert!(run_prepare_if_nonce_account_test(
&Pubkey::new(&[1u8; 32]),
&mut post_account,
&Ok(()),
false,
Some((&nonce, true)),
&blockhash,
lamports_per_signature,
&expect_account,
));
}
#[test]
fn test_prepare_if_nonce_account_tx_error() {
let (
pre_account_address,
pre_account,
mut post_account,
blockhash,
lamports_per_signature,
maybe_fee_payer_account,
) = create_accounts_prepare_if_nonce_account();
let post_account_address = pre_account_address;
let mut expect_account = pre_account.clone();
let nonce = NonceFull::new(pre_account_address, pre_account, maybe_fee_payer_account);
expect_account
.set_state(&NonceVersions::new_current(NonceState::Initialized(
nonce::state::Data::new(Pubkey::default(), blockhash, lamports_per_signature),
)))
.unwrap();
assert!(run_prepare_if_nonce_account_test(
&post_account_address,
&mut post_account,
&Err(TransactionError::InstructionError(
0,
InstructionError::InvalidArgument,
)),
false,
Some((&nonce, true)),
&blockhash,
lamports_per_signature,
&expect_account,
));
}
#[test]
fn test_rollback_nonce_fee_payer() {
let nonce_account = AccountSharedData::new_data(1, &(), &system_program::id()).unwrap();
let pre_fee_payer_account =
AccountSharedData::new_data(42, &(), &system_program::id()).unwrap();
let mut post_fee_payer_account =
AccountSharedData::new_data(84, &[1, 2, 3, 4], &system_program::id()).unwrap();
let nonce = NonceFull::new(
Pubkey::new_unique(),
nonce_account,
Some(pre_fee_payer_account.clone()),
);
assert!(run_prepare_if_nonce_account_test(
&Pubkey::new_unique(),
&mut post_fee_payer_account.clone(),
&Err(TransactionError::InstructionError(
0,
InstructionError::InvalidArgument,
)),
false,
Some((&nonce, true)),
&Hash::default(),
1,
&post_fee_payer_account.clone(),
));
assert!(run_prepare_if_nonce_account_test(
&Pubkey::new_unique(),
&mut post_fee_payer_account.clone(),
&Ok(()),
true,
Some((&nonce, true)),
&Hash::default(),
1,
&post_fee_payer_account.clone(),
));
assert!(run_prepare_if_nonce_account_test(
&Pubkey::new_unique(),
&mut post_fee_payer_account.clone(),
&Err(TransactionError::InstructionError(
0,
InstructionError::InvalidArgument,
)),
true,
None,
&Hash::default(),
1,
&post_fee_payer_account.clone(),
));
assert!(run_prepare_if_nonce_account_test(
&Pubkey::new_unique(),
&mut post_fee_payer_account,
&Err(TransactionError::InstructionError(
0,
InstructionError::InvalidArgument,
)),
true,
Some((&nonce, true)),
&Hash::default(),
1,
&pre_fee_payer_account,
));
}
#[test]
fn test_nonced_failure_accounts_rollback_from_pays() {
let rent_collector = RentCollector::default();
let nonce_address = Pubkey::new_unique();
let nonce_authority = keypair_from_seed(&[0; 32]).unwrap();
let from = keypair_from_seed(&[1; 32]).unwrap();
let from_address = from.pubkey();
let to_address = Pubkey::new_unique();
let nonce_state = NonceVersions::new_current(NonceState::Initialized(
nonce::state::Data::new(nonce_authority.pubkey(), Hash::new_unique(), 0),
));
let nonce_account_post =
AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap();
let from_account_post = AccountSharedData::new(4199, 0, &Pubkey::default());
let to_account = AccountSharedData::new(2, 0, &Pubkey::default());
let nonce_authority_account = AccountSharedData::new(3, 0, &Pubkey::default());
let recent_blockhashes_sysvar_account = AccountSharedData::new(4, 0, &Pubkey::default());
let instructions = vec![
system_instruction::advance_nonce_account(&nonce_address, &nonce_authority.pubkey()),
system_instruction::transfer(&from_address, &to_address, 42),
];
let message = Message::new(&instructions, Some(&from_address));
let blockhash = Hash::new_unique();
let transaction_accounts = vec![
(message.account_keys[0], from_account_post),
(message.account_keys[1], nonce_authority_account),
(message.account_keys[2], nonce_account_post),
(message.account_keys[3], to_account),
(message.account_keys[4], recent_blockhashes_sysvar_account),
];
let tx = new_sanitized_tx(&[&nonce_authority, &from], message, blockhash);
let nonce_state = NonceVersions::new_current(NonceState::Initialized(
nonce::state::Data::new(nonce_authority.pubkey(), Hash::new_unique(), 0),
));
let nonce_account_pre =
AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap();
let from_account_pre = AccountSharedData::new(4242, 0, &Pubkey::default());
let nonce = Some(NonceFull::new(
nonce_address,
nonce_account_pre.clone(),
Some(from_account_pre.clone()),
));
let loaded = (
Ok(LoadedTransaction {
accounts: transaction_accounts,
program_indices: vec![],
rent: 0,
rent_debits: RentDebits::default(),
}),
nonce.clone(),
);
let mut loaded = vec![loaded];
let next_blockhash = Hash::new_unique();
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
let txs = vec![tx];
let execution_results = vec![new_execution_result(
Err(TransactionError::InstructionError(
1,
InstructionError::InvalidArgument,
)),
nonce.as_ref(),
)];
let collected_accounts = accounts.collect_accounts_to_store(
&txs,
&execution_results,
loaded.as_mut_slice(),
&rent_collector,
&next_blockhash,
0,
true, // leave_nonce_on_success
);
assert_eq!(collected_accounts.len(), 2);
assert_eq!(
collected_accounts
.iter()
.find(|(pubkey, _account)| *pubkey == &from_address)
.map(|(_pubkey, account)| *account)
.cloned()
.unwrap(),
from_account_pre,
);
let collected_nonce_account = collected_accounts
.iter()
.find(|(pubkey, _account)| *pubkey == &nonce_address)
.map(|(_pubkey, account)| *account)
.cloned()
.unwrap();
assert_eq!(
collected_nonce_account.lamports(),
nonce_account_pre.lamports(),
);
assert!(nonce_account::verify_nonce_account(
&collected_nonce_account,
&next_blockhash
));
}
#[test]
fn test_nonced_failure_accounts_rollback_nonce_pays() {
let rent_collector = RentCollector::default();
let nonce_authority = keypair_from_seed(&[0; 32]).unwrap();
let nonce_address = nonce_authority.pubkey();
let from = keypair_from_seed(&[1; 32]).unwrap();
let from_address = from.pubkey();
let to_address = Pubkey::new_unique();
let nonce_state = NonceVersions::new_current(NonceState::Initialized(
nonce::state::Data::new(nonce_authority.pubkey(), Hash::new_unique(), 0),
));
let nonce_account_post =
AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap();
let from_account_post = AccountSharedData::new(4200, 0, &Pubkey::default());
let to_account = AccountSharedData::new(2, 0, &Pubkey::default());
let nonce_authority_account = AccountSharedData::new(3, 0, &Pubkey::default());
let recent_blockhashes_sysvar_account = AccountSharedData::new(4, 0, &Pubkey::default());
let instructions = vec![
system_instruction::advance_nonce_account(&nonce_address, &nonce_authority.pubkey()),
system_instruction::transfer(&from_address, &to_address, 42),
];
let message = Message::new(&instructions, Some(&nonce_address));
let blockhash = Hash::new_unique();
let transaction_accounts = vec![
(message.account_keys[0], from_account_post),
(message.account_keys[1], nonce_authority_account),
(message.account_keys[2], nonce_account_post),
(message.account_keys[3], to_account),
(message.account_keys[4], recent_blockhashes_sysvar_account),
];
let tx = new_sanitized_tx(&[&nonce_authority, &from], message, blockhash);
let nonce_state = NonceVersions::new_current(NonceState::Initialized(
nonce::state::Data::new(nonce_authority.pubkey(), Hash::new_unique(), 0),
));
let nonce_account_pre =
AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap();
let nonce = Some(NonceFull::new(
nonce_address,
nonce_account_pre.clone(),
None,
));
let loaded = (
Ok(LoadedTransaction {
accounts: transaction_accounts,
program_indices: vec![],
rent: 0,
rent_debits: RentDebits::default(),
}),
nonce.clone(),
);
let mut loaded = vec![loaded];
let next_blockhash = Hash::new_unique();
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
let txs = vec![tx];
let execution_results = vec![new_execution_result(
Err(TransactionError::InstructionError(
1,
InstructionError::InvalidArgument,
)),
nonce.as_ref(),
)];
let collected_accounts = accounts.collect_accounts_to_store(
&txs,
&execution_results,
loaded.as_mut_slice(),
&rent_collector,
&next_blockhash,
0,
true, // leave_nonce_on_success
);
assert_eq!(collected_accounts.len(), 1);
let collected_nonce_account = collected_accounts
.iter()
.find(|(pubkey, _account)| *pubkey == &nonce_address)
.map(|(_pubkey, account)| *account)
.cloned()
.unwrap();
assert_eq!(
collected_nonce_account.lamports(),
nonce_account_pre.lamports()
);
assert!(nonce_account::verify_nonce_account(
&collected_nonce_account,
&next_blockhash
));
}
#[test]
fn test_load_largest_accounts() {
let accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
);
let pubkey0 = Pubkey::new_unique();
let account0 = AccountSharedData::new(42, 0, &Pubkey::default());
accounts.store_slow_uncached(0, &pubkey0, &account0);
let pubkey1 = Pubkey::new_unique();
let account1 = AccountSharedData::new(42, 0, &Pubkey::default());
accounts.store_slow_uncached(0, &pubkey1, &account1);
let pubkey2 = Pubkey::new_unique();
let account2 = AccountSharedData::new(41, 0, &Pubkey::default());
accounts.store_slow_uncached(0, &pubkey2, &account2);
let ancestors = vec![(0, 0)].into_iter().collect();
let all_pubkeys: HashSet<_> = vec![pubkey0, pubkey1, pubkey2].into_iter().collect();
// num == 0 should always return empty set
let bank_id = 0;
assert_eq!(
accounts
.load_largest_accounts(
&ancestors,
bank_id,
0,
&HashSet::new(),
AccountAddressFilter::Exclude
)
.unwrap(),
vec![]
);
assert_eq!(
accounts
.load_largest_accounts(
&ancestors,
bank_id,
0,
&all_pubkeys,
AccountAddressFilter::Include
)
.unwrap(),
vec![]
);
// list should be sorted by balance, then pubkey, descending
assert!(pubkey1 > pubkey0);
assert_eq!(
accounts
.load_largest_accounts(
&ancestors,
bank_id,
1,
&HashSet::new(),
AccountAddressFilter::Exclude
)
.unwrap(),
vec![(pubkey1, 42)]
);
assert_eq!(
accounts
.load_largest_accounts(
&ancestors,
bank_id,
2,
&HashSet::new(),
AccountAddressFilter::Exclude
)
.unwrap(),
vec![(pubkey1, 42), (pubkey0, 42)]
);
assert_eq!(
accounts
.load_largest_accounts(
&ancestors,
bank_id,
3,
&HashSet::new(),
AccountAddressFilter::Exclude
)
.unwrap(),
vec![(pubkey1, 42), (pubkey0, 42), (pubkey2, 41)]
);
// larger num should not affect results
assert_eq!(
accounts
.load_largest_accounts(
&ancestors,
bank_id,
6,
&HashSet::new(),
AccountAddressFilter::Exclude
)
.unwrap(),
vec![(pubkey1, 42), (pubkey0, 42), (pubkey2, 41)]
);
// AccountAddressFilter::Exclude should exclude entry
let exclude1: HashSet<_> = vec![pubkey1].into_iter().collect();
assert_eq!(
accounts
.load_largest_accounts(
&ancestors,
bank_id,
1,
&exclude1,
AccountAddressFilter::Exclude
)
.unwrap(),
vec![(pubkey0, 42)]
);
assert_eq!(
accounts
.load_largest_accounts(
&ancestors,
bank_id,
2,
&exclude1,
AccountAddressFilter::Exclude
)
.unwrap(),
vec![(pubkey0, 42), (pubkey2, 41)]
);
assert_eq!(
accounts
.load_largest_accounts(
&ancestors,
bank_id,
3,
&exclude1,
AccountAddressFilter::Exclude
)
.unwrap(),
vec![(pubkey0, 42), (pubkey2, 41)]
);
// AccountAddressFilter::Include should limit entries
let include1_2: HashSet<_> = vec![pubkey1, pubkey2].into_iter().collect();
assert_eq!(
accounts
.load_largest_accounts(
&ancestors,
bank_id,
1,
&include1_2,
AccountAddressFilter::Include
)
.unwrap(),
vec![(pubkey1, 42)]
);
assert_eq!(
accounts
.load_largest_accounts(
&ancestors,
bank_id,
2,
&include1_2,
AccountAddressFilter::Include
)
.unwrap(),
vec![(pubkey1, 42), (pubkey2, 41)]
);
assert_eq!(
accounts
.load_largest_accounts(
&ancestors,
bank_id,
3,
&include1_2,
AccountAddressFilter::Include
)
.unwrap(),
vec![(pubkey1, 42), (pubkey2, 41)]
);
}
fn zero_len_account_size() -> usize {
std::mem::size_of::<AccountSharedData>() + std::mem::size_of::<Pubkey>()
}
#[test]
fn test_calc_scan_result_size() {
for len in 0..3 {
assert_eq!(
Accounts::calc_scan_result_size(&AccountSharedData::new(
0,
len,
&Pubkey::default()
)),
zero_len_account_size() + len
);
}
}
#[test]
fn test_maybe_abort_scan() {
assert!(Accounts::maybe_abort_scan(ScanResult::Ok(vec![]), &ScanConfig::default()).is_ok());
let config = ScanConfig::default().recreate_with_abort();
assert!(Accounts::maybe_abort_scan(ScanResult::Ok(vec![]), &config).is_ok());
config.abort();
assert!(Accounts::maybe_abort_scan(ScanResult::Ok(vec![]), &config).is_err());
}
#[test]
fn test_accumulate_and_check_scan_result_size() {
for (account, byte_limit_for_scan, result) in [
(AccountSharedData::default(), zero_len_account_size(), false),
(
AccountSharedData::new(0, 1, &Pubkey::default()),
zero_len_account_size(),
true,
),
(
AccountSharedData::new(0, 2, &Pubkey::default()),
zero_len_account_size() + 3,
false,
),
] {
let sum = AtomicUsize::default();
assert_eq!(
result,
Accounts::accumulate_and_check_scan_result_size(
&sum,
&account,
&Some(byte_limit_for_scan)
)
);
// calling a second time should accumulate above the threshold
assert!(Accounts::accumulate_and_check_scan_result_size(
&sum,
&account,
&Some(byte_limit_for_scan)
));
assert!(!Accounts::accumulate_and_check_scan_result_size(
&sum, &account, &None
));
}
}
}
| 36.568622 | 123 | 0.54259 |
fb14a11da0e204480cb8528f8e7d2dfd220bf3d7
| 911 |
#![cfg_attr(feature = "cargo-clippy", allow(redundant_closure))]
use std::io;
use cobalt;
use liquid;
use serde_json;
use serde_yaml;
use toml;
error_chain! {
links {
}
foreign_links {
Io(io::Error);
Cobalt(cobalt::Error);
Liquid(liquid::Error);
SerdeYaml(serde_yaml::Error);
SerdeJson(serde_json::Error);
Toml(toml::de::Error);
}
errors {
ConfigFileMissingFields {
description("missing fields in config file")
display("name, description and link need to be defined in the config file to \
generate RSS")
}
UnsupportedPlatform(functionality: &'static str, platform: &'static str) {
description("functionality is not implemented for this platform")
display("{} is not implemented for the {} platform", functionality, platform)
}
}
}
| 23.973684 | 90 | 0.603732 |
bffd9f3612b0a978ffb6426b9fc3932bfb728f87
| 9,113 |
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::higher;
use clippy_utils::ty::is_type_diagnostic_item;
use clippy_utils::{differing_macro_contexts, usage::is_potentially_mutated};
use if_chain::if_chain;
use rustc_hir::intravisit::{walk_expr, walk_fn, FnKind, NestedVisitorMap, Visitor};
use rustc_hir::{BinOpKind, Body, Expr, ExprKind, FnDecl, HirId, Path, QPath, UnOp};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::Map;
use rustc_middle::lint::in_external_macro;
use rustc_middle::ty::Ty;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::source_map::Span;
use rustc_span::sym;
declare_clippy_lint! {
/// ### What it does
/// Checks for calls of `unwrap[_err]()` that cannot fail.
///
/// ### Why is this bad?
/// Using `if let` or `match` is more idiomatic.
///
/// ### Example
/// ```rust
/// # let option = Some(0);
/// # fn do_something_with(_x: usize) {}
/// if option.is_some() {
/// do_something_with(option.unwrap())
/// }
/// ```
///
/// Could be written:
///
/// ```rust
/// # let option = Some(0);
/// # fn do_something_with(_x: usize) {}
/// if let Some(value) = option {
/// do_something_with(value)
/// }
/// ```
pub UNNECESSARY_UNWRAP,
complexity,
"checks for calls of `unwrap[_err]()` that cannot fail"
}
declare_clippy_lint! {
/// ### What it does
/// Checks for calls of `unwrap[_err]()` that will always fail.
///
/// ### Why is this bad?
/// If panicking is desired, an explicit `panic!()` should be used.
///
/// ### Known problems
/// This lint only checks `if` conditions not assignments.
/// So something like `let x: Option<()> = None; x.unwrap();` will not be recognized.
///
/// ### Example
/// ```rust
/// # let option = Some(0);
/// # fn do_something_with(_x: usize) {}
/// if option.is_none() {
/// do_something_with(option.unwrap())
/// }
/// ```
///
/// This code will always panic. The if condition should probably be inverted.
pub PANICKING_UNWRAP,
correctness,
"checks for calls of `unwrap[_err]()` that will always fail"
}
/// Visitor that keeps track of which variables are unwrappable.
struct UnwrappableVariablesVisitor<'a, 'tcx> {
unwrappables: Vec<UnwrapInfo<'tcx>>,
cx: &'a LateContext<'tcx>,
}
/// Contains information about whether a variable can be unwrapped.
#[derive(Copy, Clone, Debug)]
struct UnwrapInfo<'tcx> {
/// The variable that is checked
ident: &'tcx Path<'tcx>,
/// The check, like `x.is_ok()`
check: &'tcx Expr<'tcx>,
/// The branch where the check takes place, like `if x.is_ok() { .. }`
branch: &'tcx Expr<'tcx>,
/// Whether `is_some()` or `is_ok()` was called (as opposed to `is_err()` or `is_none()`).
safe_to_unwrap: bool,
}
/// Collects the information about unwrappable variables from an if condition
/// The `invert` argument tells us whether the condition is negated.
fn collect_unwrap_info<'tcx>(
cx: &LateContext<'tcx>,
expr: &'tcx Expr<'_>,
branch: &'tcx Expr<'_>,
invert: bool,
) -> Vec<UnwrapInfo<'tcx>> {
fn is_relevant_option_call(cx: &LateContext<'_>, ty: Ty<'_>, method_name: &str) -> bool {
is_type_diagnostic_item(cx, ty, sym::option_type) && ["is_some", "is_none"].contains(&method_name)
}
fn is_relevant_result_call(cx: &LateContext<'_>, ty: Ty<'_>, method_name: &str) -> bool {
is_type_diagnostic_item(cx, ty, sym::result_type) && ["is_ok", "is_err"].contains(&method_name)
}
if let ExprKind::Binary(op, left, right) = &expr.kind {
match (invert, op.node) {
(false, BinOpKind::And | BinOpKind::BitAnd) | (true, BinOpKind::Or | BinOpKind::BitOr) => {
let mut unwrap_info = collect_unwrap_info(cx, left, branch, invert);
unwrap_info.append(&mut collect_unwrap_info(cx, right, branch, invert));
return unwrap_info;
},
_ => (),
}
} else if let ExprKind::Unary(UnOp::Not, expr) = &expr.kind {
return collect_unwrap_info(cx, expr, branch, !invert);
} else {
if_chain! {
if let ExprKind::MethodCall(method_name, _, args, _) = &expr.kind;
if let ExprKind::Path(QPath::Resolved(None, path)) = &args[0].kind;
let ty = cx.typeck_results().expr_ty(&args[0]);
let name = method_name.ident.as_str();
if is_relevant_option_call(cx, ty, &name) || is_relevant_result_call(cx, ty, &name);
then {
assert!(args.len() == 1);
let unwrappable = match name.as_ref() {
"is_some" | "is_ok" => true,
"is_err" | "is_none" => false,
_ => unreachable!(),
};
let safe_to_unwrap = unwrappable != invert;
return vec![UnwrapInfo { ident: path, check: expr, branch, safe_to_unwrap }];
}
}
}
Vec::new()
}
impl<'a, 'tcx> UnwrappableVariablesVisitor<'a, 'tcx> {
fn visit_branch(&mut self, cond: &'tcx Expr<'_>, branch: &'tcx Expr<'_>, else_branch: bool) {
let prev_len = self.unwrappables.len();
for unwrap_info in collect_unwrap_info(self.cx, cond, branch, else_branch) {
if is_potentially_mutated(unwrap_info.ident, cond, self.cx)
|| is_potentially_mutated(unwrap_info.ident, branch, self.cx)
{
// if the variable is mutated, we don't know whether it can be unwrapped:
continue;
}
self.unwrappables.push(unwrap_info);
}
walk_expr(self, branch);
self.unwrappables.truncate(prev_len);
}
}
impl<'a, 'tcx> Visitor<'tcx> for UnwrappableVariablesVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx Expr<'_>) {
// Shouldn't lint when `expr` is in macro.
if in_external_macro(self.cx.tcx.sess, expr.span) {
return;
}
if let Some(higher::If { cond, then, r#else }) = higher::If::hir(expr) {
walk_expr(self, cond);
self.visit_branch(cond, then, false);
if let Some(else_inner) = r#else {
self.visit_branch(cond, else_inner, true);
}
} else {
// find `unwrap[_err]()` calls:
if_chain! {
if let ExprKind::MethodCall(method_name, _, args, _) = expr.kind;
if let ExprKind::Path(QPath::Resolved(None, path)) = args[0].kind;
if [sym::unwrap, sym!(unwrap_err)].contains(&method_name.ident.name);
let call_to_unwrap = method_name.ident.name == sym::unwrap;
if let Some(unwrappable) = self.unwrappables.iter()
.find(|u| u.ident.res == path.res);
// Span contexts should not differ with the conditional branch
if !differing_macro_contexts(unwrappable.branch.span, expr.span);
if !differing_macro_contexts(unwrappable.branch.span, unwrappable.check.span);
then {
if call_to_unwrap == unwrappable.safe_to_unwrap {
span_lint_and_then(
self.cx,
UNNECESSARY_UNWRAP,
expr.span,
&format!("you checked before that `{}()` cannot fail, \
instead of checking and unwrapping, it's better to use `if let` or `match`",
method_name.ident.name),
|diag| { diag.span_label(unwrappable.check.span, "the check is happening here"); },
);
} else {
span_lint_and_then(
self.cx,
PANICKING_UNWRAP,
expr.span,
&format!("this call to `{}()` will always panic",
method_name.ident.name),
|diag| { diag.span_label(unwrappable.check.span, "because of this check"); },
);
}
}
}
walk_expr(self, expr);
}
}
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::OnlyBodies(self.cx.tcx.hir())
}
}
declare_lint_pass!(Unwrap => [PANICKING_UNWRAP, UNNECESSARY_UNWRAP]);
impl<'tcx> LateLintPass<'tcx> for Unwrap {
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
kind: FnKind<'tcx>,
decl: &'tcx FnDecl<'_>,
body: &'tcx Body<'_>,
span: Span,
fn_id: HirId,
) {
if span.from_expansion() {
return;
}
let mut v = UnwrappableVariablesVisitor {
cx,
unwrappables: Vec::new(),
};
walk_fn(&mut v, kind, decl, body.id(), span, fn_id);
}
}
| 38.289916 | 111 | 0.557336 |
09423b90a4c8e707188f0dfa56b82fc89af728f3
| 14,480 |
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
use crate::configuration::{
PciBarRegionType, PciBridgeSubclass, PciClassCode, PciConfiguration, PciHeaderType,
};
use crate::device::{DeviceRelocation, Error as PciDeviceError, PciDevice};
use byteorder::{ByteOrder, LittleEndian};
use devices::BusDevice;
use std::any::Any;
use std::collections::HashMap;
use std::ops::DerefMut;
use std::sync::{Arc, Mutex};
use vm_memory::{Address, GuestAddress, GuestUsize};
const VENDOR_ID_INTEL: u16 = 0x8086;
const DEVICE_ID_INTEL_VIRT_PCIE_HOST: u16 = 0x0d57;
const NUM_DEVICE_IDS: usize = 32;
/// Errors for device manager.
#[derive(Debug)]
pub enum PciRootError {
/// Could not allocate device address space for the device.
AllocateDeviceAddrs(PciDeviceError),
/// Could not allocate an IRQ number.
AllocateIrq,
/// Could not add a device to the port io bus.
PioInsert(devices::BusError),
/// Could not add a device to the mmio bus.
MmioInsert(devices::BusError),
/// Could not find an available device slot on the PCI bus.
NoPciDeviceSlotAvailable,
/// Invalid PCI device identifier provided.
InvalidPciDeviceSlot(usize),
/// Valid PCI device identifier but already used.
AlreadyInUsePciDeviceSlot(usize),
}
pub type Result<T> = std::result::Result<T, PciRootError>;
/// Emulates the PCI Root bridge device.
pub struct PciRoot {
/// Configuration space.
config: PciConfiguration,
}
impl PciRoot {
/// Create an empty PCI root bridge.
pub fn new(config: Option<PciConfiguration>) -> Self {
if let Some(config) = config {
PciRoot { config }
} else {
PciRoot {
config: PciConfiguration::new(
VENDOR_ID_INTEL,
DEVICE_ID_INTEL_VIRT_PCIE_HOST,
0,
PciClassCode::BridgeDevice,
&PciBridgeSubclass::HostBridge,
None,
PciHeaderType::Device,
0,
0,
None,
),
}
}
}
}
impl BusDevice for PciRoot {}
impl PciDevice for PciRoot {
fn write_config_register(&mut self, reg_idx: usize, offset: u64, data: &[u8]) {
self.config.write_config_register(reg_idx, offset, data);
}
fn read_config_register(&mut self, reg_idx: usize) -> u32 {
self.config.read_reg(reg_idx)
}
fn as_any(&mut self) -> &mut dyn Any {
self
}
}
pub struct PciBus {
/// Devices attached to this bus.
/// Device 0 is host bridge.
devices: HashMap<u32, Arc<Mutex<dyn PciDevice>>>,
device_reloc: Arc<dyn DeviceRelocation>,
device_ids: Vec<bool>,
}
impl PciBus {
pub fn new(pci_root: PciRoot, device_reloc: Arc<dyn DeviceRelocation>) -> Self {
let mut devices: HashMap<u32, Arc<Mutex<dyn PciDevice>>> = HashMap::new();
let mut device_ids: Vec<bool> = vec![false; NUM_DEVICE_IDS];
devices.insert(0, Arc::new(Mutex::new(pci_root)));
device_ids[0] = true;
PciBus {
devices,
device_reloc,
device_ids,
}
}
pub fn register_mapping(
&self,
dev: Arc<Mutex<dyn BusDevice>>,
io_bus: &devices::Bus,
mmio_bus: &devices::Bus,
bars: Vec<(GuestAddress, GuestUsize, PciBarRegionType)>,
) -> Result<()> {
for (address, size, type_) in bars {
match type_ {
PciBarRegionType::IORegion => {
io_bus
.insert(dev.clone(), address.raw_value(), size)
.map_err(PciRootError::PioInsert)?;
}
PciBarRegionType::Memory32BitRegion | PciBarRegionType::Memory64BitRegion => {
mmio_bus
.insert(dev.clone(), address.raw_value(), size)
.map_err(PciRootError::MmioInsert)?;
}
}
}
Ok(())
}
pub fn add_device(
&mut self,
pci_device_bdf: u32,
device: Arc<Mutex<dyn PciDevice>>,
) -> Result<()> {
self.devices.insert(pci_device_bdf >> 3, device);
Ok(())
}
pub fn remove_by_device(&mut self, device: &Arc<Mutex<dyn PciDevice>>) -> Result<()> {
self.devices.retain(|_, dev| !Arc::ptr_eq(dev, device));
Ok(())
}
pub fn next_device_id(&mut self) -> Result<u32> {
for (idx, device_id) in self.device_ids.iter_mut().enumerate() {
if !(*device_id) {
*device_id = true;
return Ok(idx as u32);
}
}
Err(PciRootError::NoPciDeviceSlotAvailable)
}
pub fn get_device_id(&mut self, id: usize) -> Result<()> {
if id < NUM_DEVICE_IDS {
if !self.device_ids[id] {
self.device_ids[id] = true;
Ok(())
} else {
Err(PciRootError::AlreadyInUsePciDeviceSlot(id))
}
} else {
Err(PciRootError::InvalidPciDeviceSlot(id))
}
}
pub fn put_device_id(&mut self, id: usize) -> Result<()> {
if id < NUM_DEVICE_IDS {
self.device_ids[id] = false;
Ok(())
} else {
Err(PciRootError::InvalidPciDeviceSlot(id))
}
}
}
pub struct PciConfigIo {
/// Config space register.
config_address: u32,
pci_bus: Arc<Mutex<PciBus>>,
}
impl PciConfigIo {
pub fn new(pci_bus: Arc<Mutex<PciBus>>) -> Self {
PciConfigIo {
pci_bus,
config_address: 0,
}
}
pub fn config_space_read(&self) -> u32 {
let enabled = (self.config_address & 0x8000_0000) != 0;
if !enabled {
return 0xffff_ffff;
}
let (bus, device, function, register) =
parse_io_config_address(self.config_address & !0x8000_0000);
// Only support one bus.
if bus != 0 {
return 0xffff_ffff;
}
// Don't support multi-function devices.
if function > 0 {
return 0xffff_ffff;
}
self.pci_bus
.lock()
.unwrap()
.devices
.get(&(device as u32))
.map_or(0xffff_ffff, |d| {
d.lock().unwrap().read_config_register(register)
})
}
pub fn config_space_write(&mut self, offset: u64, data: &[u8]) {
if offset as usize + data.len() > 4 {
return;
}
let enabled = (self.config_address & 0x8000_0000) != 0;
if !enabled {
return;
}
let (bus, device, _function, register) =
parse_io_config_address(self.config_address & !0x8000_0000);
// Only support one bus.
if bus != 0 {
return;
}
let pci_bus = self.pci_bus.lock().unwrap();
if let Some(d) = pci_bus.devices.get(&(device as u32)) {
let mut device = d.lock().unwrap();
// Find out if one of the device's BAR is being reprogrammed, and
// reprogram it if needed.
if let Some(params) = device.detect_bar_reprogramming(register, data) {
if let Err(e) = pci_bus.device_reloc.move_bar(
params.old_base,
params.new_base,
params.len,
device.deref_mut(),
params.region_type,
) {
error!(
"Failed moving device BAR: {}: 0x{:x}->0x{:x}(0x{:x})",
e, params.old_base, params.new_base, params.len
);
}
}
// Update the register value
device.write_config_register(register, offset, data);
}
}
fn set_config_address(&mut self, offset: u64, data: &[u8]) {
if offset as usize + data.len() > 4 {
return;
}
let (mask, value): (u32, u32) = match data.len() {
1 => (
0x0000_00ff << (offset * 8),
u32::from(data[0]) << (offset * 8),
),
2 => (
0x0000_ffff << (offset * 16),
(u32::from(data[1]) << 8 | u32::from(data[0])) << (offset * 16),
),
4 => (0xffff_ffff, LittleEndian::read_u32(data)),
_ => return,
};
self.config_address = (self.config_address & !mask) | value;
}
}
impl BusDevice for PciConfigIo {
fn read(&mut self, _base: u64, offset: u64, data: &mut [u8]) {
// `offset` is relative to 0xcf8
let value = match offset {
0..=3 => self.config_address,
4..=7 => self.config_space_read(),
_ => 0xffff_ffff,
};
// Only allow reads to the register boundary.
let start = offset as usize % 4;
let end = start + data.len();
if end <= 4 {
for i in start..end {
data[i - start] = (value >> (i * 8)) as u8;
}
} else {
for d in data {
*d = 0xff;
}
}
}
fn write(&mut self, _base: u64, offset: u64, data: &[u8]) {
// `offset` is relative to 0xcf8
match offset {
o @ 0..=3 => self.set_config_address(o, data),
o @ 4..=7 => self.config_space_write(o - 4, data),
_ => (),
};
}
}
/// Emulates PCI memory-mapped configuration access mechanism.
pub struct PciConfigMmio {
pci_bus: Arc<Mutex<PciBus>>,
}
impl PciConfigMmio {
pub fn new(pci_bus: Arc<Mutex<PciBus>>) -> Self {
PciConfigMmio { pci_bus }
}
fn config_space_read(&self, config_address: u32) -> u32 {
let (bus, device, _function, register) = parse_mmio_config_address(config_address);
// Only support one bus.
if bus != 0 {
return 0xffff_ffff;
}
self.pci_bus
.lock()
.unwrap()
.devices
.get(&(device as u32))
.map_or(0xffff_ffff, |d| {
d.lock().unwrap().read_config_register(register)
})
}
fn config_space_write(&mut self, config_address: u32, offset: u64, data: &[u8]) {
if offset as usize + data.len() > 4 {
return;
}
let (bus, device, _function, register) = parse_mmio_config_address(config_address);
// Only support one bus.
if bus != 0 {
return;
}
let pci_bus = self.pci_bus.lock().unwrap();
if let Some(d) = pci_bus.devices.get(&(device as u32)) {
let mut device = d.lock().unwrap();
// Find out if one of the device's BAR is being reprogrammed, and
// reprogram it if needed.
if let Some(params) = device.detect_bar_reprogramming(register, data) {
if let Err(e) = pci_bus.device_reloc.move_bar(
params.old_base,
params.new_base,
params.len,
device.deref_mut(),
params.region_type,
) {
error!(
"Failed moving device BAR: {}: 0x{:x}->0x{:x}(0x{:x})",
e, params.old_base, params.new_base, params.len
);
}
}
// Update the register value
device.write_config_register(register, offset, data);
}
}
}
impl BusDevice for PciConfigMmio {
fn read(&mut self, _base: u64, offset: u64, data: &mut [u8]) {
// Only allow reads to the register boundary.
let start = offset as usize % 4;
let end = start + data.len();
if end > 4 || offset > u64::from(u32::max_value()) {
for d in data {
*d = 0xff;
}
return;
}
let value = self.config_space_read(offset as u32);
for i in start..end {
data[i - start] = (value >> (i * 8)) as u8;
}
}
fn write(&mut self, _base: u64, offset: u64, data: &[u8]) {
if offset > u64::from(u32::max_value()) {
return;
}
self.config_space_write(offset as u32, offset % 4, data)
}
}
fn shift_and_mask(value: u32, offset: usize, mask: u32) -> usize {
((value >> offset) & mask) as usize
}
// Parse the MMIO address offset to a (bus, device, function, register) tuple.
// See section 7.2.2 PCI Express Enhanced Configuration Access Mechanism (ECAM)
// from the Pci Express Base Specification Revision 5.0 Version 1.0.
fn parse_mmio_config_address(config_address: u32) -> (usize, usize, usize, usize) {
const BUS_NUMBER_OFFSET: usize = 20;
const BUS_NUMBER_MASK: u32 = 0x00ff;
const DEVICE_NUMBER_OFFSET: usize = 15;
const DEVICE_NUMBER_MASK: u32 = 0x1f;
const FUNCTION_NUMBER_OFFSET: usize = 12;
const FUNCTION_NUMBER_MASK: u32 = 0x07;
const REGISTER_NUMBER_OFFSET: usize = 2;
const REGISTER_NUMBER_MASK: u32 = 0x3ff;
(
shift_and_mask(config_address, BUS_NUMBER_OFFSET, BUS_NUMBER_MASK),
shift_and_mask(config_address, DEVICE_NUMBER_OFFSET, DEVICE_NUMBER_MASK),
shift_and_mask(config_address, FUNCTION_NUMBER_OFFSET, FUNCTION_NUMBER_MASK),
shift_and_mask(config_address, REGISTER_NUMBER_OFFSET, REGISTER_NUMBER_MASK),
)
}
// Parse the CONFIG_ADDRESS register to a (bus, device, function, register) tuple.
fn parse_io_config_address(config_address: u32) -> (usize, usize, usize, usize) {
const BUS_NUMBER_OFFSET: usize = 16;
const BUS_NUMBER_MASK: u32 = 0x00ff;
const DEVICE_NUMBER_OFFSET: usize = 11;
const DEVICE_NUMBER_MASK: u32 = 0x1f;
const FUNCTION_NUMBER_OFFSET: usize = 8;
const FUNCTION_NUMBER_MASK: u32 = 0x07;
const REGISTER_NUMBER_OFFSET: usize = 2;
const REGISTER_NUMBER_MASK: u32 = 0x3f;
(
shift_and_mask(config_address, BUS_NUMBER_OFFSET, BUS_NUMBER_MASK),
shift_and_mask(config_address, DEVICE_NUMBER_OFFSET, DEVICE_NUMBER_MASK),
shift_and_mask(config_address, FUNCTION_NUMBER_OFFSET, FUNCTION_NUMBER_MASK),
shift_and_mask(config_address, REGISTER_NUMBER_OFFSET, REGISTER_NUMBER_MASK),
)
}
| 31.615721 | 94 | 0.553177 |
fb960d14ca5d72830c70e1ffa4da88465ddcaf9f
| 43,359 |
//! Array class
use crate::avm2::activation::Activation;
use crate::avm2::array::ArrayStorage;
use crate::avm2::class::Class;
use crate::avm2::method::Method;
use crate::avm2::names::{Multiname, Namespace, QName};
use crate::avm2::object::{ArrayObject, Object, TObject};
use crate::avm2::string::AvmString;
use crate::avm2::traits::Trait;
use crate::avm2::value::Value;
use crate::avm2::Error;
use bitflags::bitflags;
use gc_arena::{GcCell, MutationContext};
use std::cmp::{min, Ordering};
use std::mem::swap;
/// Implements `Array`'s instance initializer.
pub fn instance_init<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
activation.super_init(this, &[])?;
if let Some(mut array) = this.as_array_storage_mut(activation.context.gc_context) {
if args.len() == 1 {
if let Some(expected_len) = args
.get(0)
.and_then(|v| v.as_number(activation.context.gc_context).ok())
{
if expected_len < 0.0 || expected_len.is_nan() {
return Err("Length must be a positive integer".into());
}
array.set_length(expected_len as usize);
return Ok(Value::Undefined);
}
}
for (i, arg) in args.iter().enumerate() {
array.set(i, arg.clone());
}
}
}
Ok(Value::Undefined)
}
/// Implements `Array`'s class initializer.
pub fn class_init<'gc>(
_activation: &mut Activation<'_, 'gc, '_>,
_this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
Ok(Value::Undefined)
}
/// Implements `Array.length`'s getter
pub fn length<'gc>(
_activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(array) = this.as_array_storage() {
return Ok(array.length().into());
}
}
Ok(Value::Undefined)
}
/// Implements `Array.length`'s setter
pub fn set_length<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut array) = this.as_array_storage_mut(activation.context.gc_context) {
let size = args
.get(0)
.unwrap_or(&Value::Undefined)
.coerce_to_u32(activation)?;
array.set_length(size as usize);
}
}
Ok(Value::Undefined)
}
/// Bundle an already-constructed `ArrayStorage` in an `Object`.
pub fn build_array<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
array: ArrayStorage<'gc>,
) -> Result<Value<'gc>, Error> {
Ok(ArrayObject::from_array(
array,
activation
.context
.avm2
.system_prototypes
.as_ref()
.map(|sp| sp.array)
.unwrap(),
activation.context.gc_context,
)
.into())
}
/// Implements `Array.concat`
#[allow(clippy::map_clone)] //You can't clone `Option<Ref<T>>` without it
pub fn concat<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
let mut base_array = this
.and_then(|this| this.as_array_storage().map(|a| a.clone()))
.unwrap_or_else(|| ArrayStorage::new(0));
for arg in args {
if let Some(other_array) = arg.coerce_to_object(activation)?.as_array_storage() {
base_array.append(&other_array);
} else {
base_array.push(arg.clone());
}
}
build_array(activation, base_array)
}
/// Resolves array holes.
pub fn resolve_array_hole<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Object<'gc>,
i: usize,
item: Option<Value<'gc>>,
) -> Result<Value<'gc>, Error> {
item.map(Ok).unwrap_or_else(|| {
this.proto()
.map(|mut p| {
p.get_property(
p,
&QName::new(
Namespace::public(),
AvmString::new(activation.context.gc_context, i.to_string()),
),
activation,
)
})
.unwrap_or(Ok(Value::Undefined))
})
}
pub fn join_inner<'gc, 'a, 'ctxt, C>(
activation: &mut Activation<'a, 'gc, 'ctxt>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
mut conv: C,
) -> Result<Value<'gc>, Error>
where
C: for<'b> FnMut(Value<'gc>, &'b mut Activation<'a, 'gc, 'ctxt>) -> Result<Value<'gc>, Error>,
{
let mut separator = args.get(0).cloned().unwrap_or(Value::Undefined);
if separator == Value::Undefined {
separator = ",".into();
}
if let Some(this) = this {
if let Some(array) = this.as_array_storage() {
let string_separator = separator.coerce_to_string(activation)?;
let mut accum = Vec::with_capacity(array.length());
for (i, item) in array.iter().enumerate() {
let item = resolve_array_hole(activation, this, i, item)?;
if matches!(item, Value::Undefined) || matches!(item, Value::Null) {
accum.push("".into());
} else {
accum.push(
conv(item, activation)?
.coerce_to_string(activation)?
.to_string(),
);
}
}
return Ok(AvmString::new(
activation.context.gc_context,
accum.join(&string_separator),
)
.into());
}
}
Ok(Value::Undefined)
}
/// Implements `Array.join`
pub fn join<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
join_inner(activation, this, args, |v, _act| Ok(v))
}
/// Implements `Array.toString`
pub fn to_string<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
join_inner(activation, this, &[",".into()], |v, _act| Ok(v))
}
/// Implements `Array.toLocaleString`
pub fn to_locale_string<'gc>(
act: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
join_inner(act, this, &[",".into()], |v, activation| {
let mut o = v.coerce_to_object(activation)?;
let tls = o.get_property(
o,
&QName::new(Namespace::public(), "toLocaleString"),
activation,
)?;
tls.coerce_to_object(activation)?
.call(Some(o), &[], activation, o.proto())
})
}
/// Implements `Array.valueOf`
pub fn value_of<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
join_inner(activation, this, &[",".into()], |v, _act| Ok(v))
}
/// An iterator that allows iterating over the contents of an array whilst also
/// executing user code.
///
/// Note that this does not actually implement `Iterator` as this struct needs
/// to share access to the activation with you. We can't claim your activation
/// and give it back in `next`, so we instead ask for it in `next`, which is
/// incompatible with the trait.
///
/// This technically works with Array-shaped, non-Array objects, since we
/// access arrays in this iterator the same way user code would. If it is
/// necessary to only work with Arrays, you must first check for array storage
/// before creating this iterator.
///
/// The primary purpose of `ArrayIter` is to maintain lock safety in the
/// presence of arbitrary user code. It is legal for, say, a method callback to
/// mutate the array under iteration. Normally, holding an `Iterator` on the
/// array while this happens would cause a panic; this code exists to prevent
/// that.
struct ArrayIter<'gc> {
array_object: Object<'gc>,
index: u32,
length: u32,
}
impl<'gc> ArrayIter<'gc> {
/// Construct a new `ArrayIter`.
pub fn new(
activation: &mut Activation<'_, 'gc, '_>,
mut array_object: Object<'gc>,
) -> Result<Self, Error> {
let length = array_object
.get_property(
array_object,
&QName::new(Namespace::public(), "length"),
activation,
)?
.coerce_to_u32(activation)?;
Ok(Self {
array_object,
index: 0,
length,
})
}
/// Get the next item in the array.
///
/// Since this isn't a real iterator, this comes pre-enumerated; it yields
/// a pair of the index and then the value.
fn next(
&mut self,
activation: &mut Activation<'_, 'gc, '_>,
) -> Option<Result<(u32, Value<'gc>), Error>> {
if self.index < self.length {
let i = self.index;
self.index += 1;
Some(
self.array_object
.get_property(
self.array_object,
&QName::new(
Namespace::public(),
AvmString::new(activation.context.gc_context, i.to_string()),
),
activation,
)
.map(|val| (i, val)),
)
} else {
None
}
}
}
/// Implements `Array.forEach`
pub fn for_each<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
let callback = args
.get(0)
.cloned()
.unwrap_or(Value::Undefined)
.coerce_to_object(activation)?;
let receiver = args
.get(1)
.cloned()
.unwrap_or(Value::Null)
.coerce_to_object(activation)
.ok();
let mut iter = ArrayIter::new(activation, this)?;
while let Some(r) = iter.next(activation) {
let (i, item) = r?;
callback.call(
receiver,
&[item, i.into(), this.into()],
activation,
receiver.and_then(|r| r.proto()),
)?;
}
}
Ok(Value::Undefined)
}
/// Implements `Array.map`
pub fn map<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
let callback = args
.get(0)
.cloned()
.unwrap_or(Value::Undefined)
.coerce_to_object(activation)?;
let receiver = args
.get(1)
.cloned()
.unwrap_or(Value::Null)
.coerce_to_object(activation)
.ok();
let mut new_array = ArrayStorage::new(0);
let mut iter = ArrayIter::new(activation, this)?;
while let Some(r) = iter.next(activation) {
let (i, item) = r?;
let new_item = callback.call(
receiver,
&[item, i.into(), this.into()],
activation,
receiver.and_then(|r| r.proto()),
)?;
new_array.push(new_item);
}
return build_array(activation, new_array);
}
Ok(Value::Undefined)
}
/// Implements `Array.filter`
pub fn filter<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
let callback = args
.get(0)
.cloned()
.unwrap_or(Value::Undefined)
.coerce_to_object(activation)?;
let receiver = args
.get(1)
.cloned()
.unwrap_or(Value::Null)
.coerce_to_object(activation)
.ok();
let mut new_array = ArrayStorage::new(0);
let mut iter = ArrayIter::new(activation, this)?;
while let Some(r) = iter.next(activation) {
let (i, item) = r?;
let is_allowed = callback
.call(
receiver,
&[item.clone(), i.into(), this.into()],
activation,
receiver.and_then(|r| r.proto()),
)?
.coerce_to_boolean();
if is_allowed {
new_array.push(item);
}
}
return build_array(activation, new_array);
}
Ok(Value::Undefined)
}
/// Implements `Array.every`
pub fn every<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
let callback = args
.get(0)
.cloned()
.unwrap_or(Value::Undefined)
.coerce_to_object(activation)?;
let receiver = args
.get(1)
.cloned()
.unwrap_or(Value::Null)
.coerce_to_object(activation)
.ok();
let mut is_every = true;
let mut iter = ArrayIter::new(activation, this)?;
while let Some(r) = iter.next(activation) {
let (i, item) = r?;
is_every &= callback
.call(
receiver,
&[item, i.into(), this.into()],
activation,
receiver.and_then(|r| r.proto()),
)?
.coerce_to_boolean();
}
return Ok(is_every.into());
}
Ok(Value::Undefined)
}
/// Implements `Array.some`
pub fn some<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
let callback = args
.get(0)
.cloned()
.unwrap_or(Value::Undefined)
.coerce_to_object(activation)?;
let receiver = args
.get(1)
.cloned()
.unwrap_or(Value::Null)
.coerce_to_object(activation)
.ok();
let mut is_some = false;
let mut iter = ArrayIter::new(activation, this)?;
while let Some(r) = iter.next(activation) {
let (i, item) = r?;
is_some |= callback
.call(
receiver,
&[item, i.into(), this.into()],
activation,
receiver.and_then(|r| r.proto()),
)?
.coerce_to_boolean();
}
return Ok(is_some.into());
}
Ok(Value::Undefined)
}
/// Implements `Array.indexOf`
pub fn index_of<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(array) = this.as_array_storage() {
let search_val = args.get(0).cloned().unwrap_or(Value::Undefined);
let from = args
.get(1)
.cloned()
.unwrap_or_else(|| 0.into())
.coerce_to_u32(activation)?;
for (i, val) in array.iter().enumerate() {
let val = resolve_array_hole(activation, this, i, val)?;
if i >= from as usize && val == search_val {
return Ok(i.into());
}
}
return Ok((-1).into());
}
}
Ok(Value::Undefined)
}
/// Implements `Array.lastIndexOf`
pub fn last_index_of<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(array) = this.as_array_storage() {
let search_val = args.get(0).cloned().unwrap_or(Value::Undefined);
let from = args
.get(1)
.cloned()
.unwrap_or_else(|| i32::MAX.into())
.coerce_to_u32(activation)?;
for (i, val) in array.iter().enumerate().rev() {
let val = resolve_array_hole(activation, this, i, val)?;
if i <= from as usize && val == search_val {
return Ok(i.into());
}
}
return Ok((-1).into());
}
}
Ok(Value::Undefined)
}
/// Implements `Array.pop`
pub fn pop<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut array) = this.as_array_storage_mut(activation.context.gc_context) {
return Ok(array.pop());
}
}
Ok(Value::Undefined)
}
/// Implements `Array.push`
pub fn push<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut array) = this.as_array_storage_mut(activation.context.gc_context) {
for arg in args {
array.push(arg.clone())
}
}
}
Ok(Value::Undefined)
}
pub fn reverse<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut array) = this.as_array_storage_mut(activation.context.gc_context) {
let mut last_non_hole_index = None;
for (i, val) in array.iter().enumerate() {
if val.is_some() {
last_non_hole_index = Some(i + 1);
}
}
let mut new_array = ArrayStorage::new(0);
for i in
(0..last_non_hole_index.unwrap_or_else(|| array.length().saturating_sub(1))).rev()
{
if let Some(value) = array.get(i) {
new_array.push(value)
} else {
new_array.push_hole()
}
}
new_array.set_length(array.length());
swap(&mut *array, &mut new_array);
return Ok(this.into());
}
}
Ok(Value::Undefined)
}
/// Implements `Array.shift`
pub fn shift<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut array) = this.as_array_storage_mut(activation.context.gc_context) {
return Ok(array.shift());
}
}
Ok(Value::Undefined)
}
/// Implements `Array.unshift`
pub fn unshift<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut array) = this.as_array_storage_mut(activation.context.gc_context) {
for arg in args.iter().rev() {
array.unshift(arg.clone())
}
}
}
Ok(Value::Undefined)
}
/// Resolve a possibly-negative array index to something guaranteed to be positive.
pub fn resolve_index<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
index: Value<'gc>,
length: usize,
) -> Result<usize, Error> {
let index = index.coerce_to_i32(activation)?;
Ok(if index < 0 {
(length as isize).saturating_add(index as isize) as usize
} else {
index as usize
})
}
/// Implements `Array.slice`
pub fn slice<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
let array_length = this.as_array_storage().map(|a| a.length());
if let Some(array_length) = array_length {
let actual_start = resolve_index(
activation,
args.get(0).cloned().unwrap_or_else(|| 0.into()),
array_length,
)?;
let actual_end = resolve_index(
activation,
args.get(1).cloned().unwrap_or_else(|| 0xFFFFFF.into()),
array_length,
)?;
let mut new_array = ArrayStorage::new(0);
for i in actual_start..actual_end {
if i >= array_length {
break;
}
new_array.push(resolve_array_hole(
activation,
this,
i,
this.as_array_storage().unwrap().get(i),
)?);
}
return build_array(activation, new_array);
}
}
Ok(Value::Undefined)
}
/// Implements `Array.splice`
pub fn splice<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
let array_length = this.as_array_storage().map(|a| a.length());
if let Some(array_length) = array_length {
if let Some(start) = args.get(0).cloned() {
let actual_start = resolve_index(activation, start, array_length)?;
let delete_count = args
.get(1)
.cloned()
.unwrap_or_else(|| array_length.into())
.coerce_to_i32(activation)?;
let mut removed_array = ArrayStorage::new(0);
if delete_count > 0 {
let actual_end = min(array_length, actual_start + delete_count as usize);
let args_slice = if args.len() > 2 {
args[2..].iter().cloned()
} else {
[].iter().cloned()
};
let contents = this
.as_array_storage()
.map(|a| a.iter().collect::<Vec<Option<Value<'gc>>>>())
.unwrap();
let mut resolved = Vec::new();
for (i, v) in contents.iter().enumerate() {
resolved.push(resolve_array_hole(activation, this, i, v.clone())?);
}
let removed = resolved
.splice(actual_start..actual_end, args_slice)
.collect::<Vec<Value<'gc>>>();
removed_array = ArrayStorage::from_args(&removed[..]);
let mut resolved_array = ArrayStorage::from_args(&resolved[..]);
if let Some(mut array) =
this.as_array_storage_mut(activation.context.gc_context)
{
swap(&mut *array, &mut resolved_array)
}
}
return build_array(activation, removed_array);
}
}
}
Ok(Value::Undefined)
}
bitflags! {
/// The array options that a given sort operation may use.
///
/// These are provided as a number by the VM and converted into bitflags.
struct SortOptions: u8 {
/// Request case-insensitive string value sort.
const CASE_INSENSITIVE = 1 << 0;
/// Reverse the order of sorting.
const DESCENDING = 1 << 1;
/// Reject sorting on arrays with multiple equivalent values.
const UNIQUE_SORT = 1 << 2;
/// Yield a list of indices rather than sorting the array in-place.
const RETURN_INDEXED_ARRAY = 1 << 3;
/// Request numeric value sort.
const NUMERIC = 1 << 4;
}
}
/// Identity closure shim which exists purely to decorate closure types with
/// the HRTB necessary to accept an activation.
fn constrain<'a, 'gc, 'ctxt, F>(f: F) -> F
where
F: FnMut(&mut Activation<'a, 'gc, 'ctxt>, Value<'gc>, Value<'gc>) -> Result<Ordering, Error>,
{
f
}
/// Sort array storage.
///
/// This function expects its values to have been pre-enumerated and
/// pre-resolved. They will be sorted in-place. It is the caller's
/// responsibility to place the resulting half of the sorted array wherever.
///
/// This function will reverse the sort order if `Descending` sort is requested.
///
/// This function will return `false` in the event that the `UniqueSort`
/// constraint has been violated (`sort_func` returned `Ordering::Equal`). In
/// this case, you should cancel the in-place sorting operation and return 0 to
/// the caller. In the event that this function yields a runtime error, the
/// contents of the `values` array will be sorted in a random order.
fn sort_inner<'a, 'gc, 'ctxt, C>(
activation: &mut Activation<'a, 'gc, 'ctxt>,
values: &mut [(usize, Value<'gc>)],
options: SortOptions,
mut sort_func: C,
) -> Result<bool, Error>
where
C: FnMut(&mut Activation<'a, 'gc, 'ctxt>, Value<'gc>, Value<'gc>) -> Result<Ordering, Error>,
{
let mut unique_sort_satisfied = true;
let mut error_signal = Ok(());
values.sort_unstable_by(|(_a_index, a), (_b_index, b)| {
let unresolved_a = a.clone();
let unresolved_b = b.clone();
if matches!(unresolved_a, Value::Undefined) && matches!(unresolved_b, Value::Undefined) {
unique_sort_satisfied = false;
return Ordering::Equal;
} else if matches!(unresolved_a, Value::Undefined) {
return Ordering::Greater;
} else if matches!(unresolved_b, Value::Undefined) {
return Ordering::Less;
}
match sort_func(activation, a.clone(), b.clone()) {
Ok(Ordering::Equal) => {
unique_sort_satisfied = false;
Ordering::Equal
}
Ok(v) if options.contains(SortOptions::DESCENDING) => v.reverse(),
Ok(v) => v,
Err(e) => {
error_signal = Err(e);
Ordering::Less
}
}
});
error_signal?;
Ok(!options.contains(SortOptions::UNIQUE_SORT) || unique_sort_satisfied)
}
fn compare_string_case_sensitive<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
a: Value<'gc>,
b: Value<'gc>,
) -> Result<Ordering, Error> {
let string_a = a.coerce_to_string(activation)?;
let string_b = b.coerce_to_string(activation)?;
Ok(string_a.cmp(&string_b))
}
fn compare_string_case_insensitive<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
a: Value<'gc>,
b: Value<'gc>,
) -> Result<Ordering, Error> {
let string_a = a.coerce_to_string(activation)?.to_lowercase();
let string_b = b.coerce_to_string(activation)?.to_lowercase();
Ok(string_a.cmp(&string_b))
}
fn compare_numeric<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
a: Value<'gc>,
b: Value<'gc>,
) -> Result<Ordering, Error> {
let num_a = a.coerce_to_number(activation)?;
let num_b = b.coerce_to_number(activation)?;
if num_a.is_nan() && num_b.is_nan() {
Ok(Ordering::Equal)
} else if num_a.is_nan() {
Ok(Ordering::Greater)
} else if num_b.is_nan() {
Ok(Ordering::Less)
} else {
Ok(num_a.partial_cmp(&num_b).unwrap())
}
}
/// Take a sorted set of values and produce the result requested by the caller.
fn sort_postprocess<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Object<'gc>,
options: SortOptions,
unique_satisfied: bool,
values: Vec<(usize, Value<'gc>)>,
) -> Result<Value<'gc>, Error> {
if unique_satisfied {
if options.contains(SortOptions::RETURN_INDEXED_ARRAY) {
return build_array(
activation,
ArrayStorage::from_storage(
values.iter().map(|(i, _v)| Some((*i).into())).collect(),
),
);
} else {
if let Some(mut old_array) = this.as_array_storage_mut(activation.context.gc_context) {
let mut new_vec = Vec::new();
for (src, v) in values.iter() {
if old_array.get(*src).is_none() && !matches!(v, Value::Undefined) {
new_vec.push(Some(v.clone()));
} else {
new_vec.push(old_array.get(*src).clone());
}
}
let mut new_array = ArrayStorage::from_storage(new_vec);
swap(&mut *old_array, &mut new_array);
}
return Ok(this.into());
}
}
Ok(0.into())
}
/// Given a value, extract its array values.
///
/// If the value is not an array, this function yields `None`.
fn extract_array_values<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
value: Value<'gc>,
) -> Result<Option<Vec<Value<'gc>>>, Error> {
let object = value.coerce_to_object(activation).ok();
let holey_vec = if let Some(object) = object {
if let Some(field_array) = object.as_array_storage() {
let mut array = Vec::new();
for v in field_array.iter() {
array.push(v);
}
array
} else {
return Ok(None);
}
} else {
return Ok(None);
};
let mut unholey_vec = Vec::new();
for (i, v) in holey_vec.iter().enumerate() {
unholey_vec.push(resolve_array_hole(
activation,
object.unwrap(),
i,
v.clone(),
)?);
}
Ok(Some(unholey_vec))
}
/// Impl `Array.sort`
pub fn sort<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
let (compare_fnc, options) = if args.len() > 1 {
(
Some(
args.get(0)
.cloned()
.unwrap_or(Value::Undefined)
.coerce_to_object(activation)?,
),
SortOptions::from_bits_truncate(
args.get(1)
.cloned()
.unwrap_or_else(|| 0.into())
.coerce_to_u32(activation)? as u8,
),
)
} else {
(
None,
SortOptions::from_bits_truncate(
args.get(0)
.cloned()
.unwrap_or_else(|| 0.into())
.coerce_to_u32(activation)? as u8,
),
)
};
let mut values = if let Some(values) = extract_array_values(activation, this.into())? {
values
.iter()
.enumerate()
.map(|(i, v)| (i, v.clone()))
.collect::<Vec<(usize, Value<'gc>)>>()
} else {
return Ok(0.into());
};
let unique_satisfied = if let Some(v) = compare_fnc {
sort_inner(
activation,
&mut values,
options,
constrain(|activation, a, b| {
let order = v
.call(None, &[a, b], activation, None)?
.coerce_to_number(activation)?;
if order > 0.0 {
Ok(Ordering::Greater)
} else if order < 0.0 {
Ok(Ordering::Less)
} else {
Ok(Ordering::Equal)
}
}),
)?
} else if options.contains(SortOptions::NUMERIC) {
sort_inner(activation, &mut values, options, compare_numeric)?
} else if options.contains(SortOptions::CASE_INSENSITIVE) {
sort_inner(
activation,
&mut values,
options,
compare_string_case_insensitive,
)?
} else {
sort_inner(
activation,
&mut values,
options,
compare_string_case_sensitive,
)?
};
return sort_postprocess(activation, this, options, unique_satisfied, values);
}
Ok(0.into())
}
/// Given a value, extract its array values.
///
/// If the value is not an array, it will be returned as if it was present in a
/// one-element array containing itself. This is intended for use with parsing
/// parameters which are optionally arrays.
fn extract_maybe_array_values<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
value: Value<'gc>,
) -> Result<Vec<Value<'gc>>, Error> {
Ok(extract_array_values(activation, value.clone())?.unwrap_or_else(|| vec![value]))
}
/// Given a value, extract its array values and coerce them to strings.
///
/// If the value is not an array, it will be returned as if it was present in a
/// one-element array containing itself. This is intended for use with parsing
/// parameters which are optionally arrays. The returned value will still be
/// coerced into a string in this case.
fn extract_maybe_array_strings<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
value: Value<'gc>,
) -> Result<Vec<AvmString<'gc>>, Error> {
let mut out = Vec::new();
for value in extract_maybe_array_values(activation, value)? {
out.push(value.coerce_to_string(activation)?);
}
Ok(out)
}
/// Given a value, extract its array values and coerce them to SortOptions.
///
/// If the value is not an array, it will be returned as if it was present in a
/// one-element array containing itself. This is intended for use with parsing
/// parameters which are optionally arrays. The returned value will still be
/// coerced into a string in this case.
fn extract_maybe_array_sort_options<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
value: Value<'gc>,
) -> Result<Vec<SortOptions>, Error> {
let mut out = Vec::new();
for value in extract_maybe_array_values(activation, value)? {
out.push(SortOptions::from_bits_truncate(
value.coerce_to_u32(activation)? as u8,
));
}
Ok(out)
}
/// Impl `Array.sortOn`
pub fn sort_on<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(field_names_value) = args.get(0).cloned() {
let field_names = extract_maybe_array_strings(activation, field_names_value)?;
let mut options = extract_maybe_array_sort_options(
activation,
args.get(1).cloned().unwrap_or_else(|| 0.into()),
)?;
let first_option = options.get(0).cloned().unwrap_or_else(SortOptions::empty)
& (SortOptions::UNIQUE_SORT | SortOptions::RETURN_INDEXED_ARRAY);
let mut values = if let Some(values) = extract_array_values(activation, this.into())? {
values
.iter()
.enumerate()
.map(|(i, v)| (i, v.clone()))
.collect::<Vec<(usize, Value<'gc>)>>()
} else {
return Ok(0.into());
};
if options.len() < field_names.len() {
options.resize(
field_names.len(),
options.last().cloned().unwrap_or_else(SortOptions::empty),
);
}
let unique_satisfied = sort_inner(
activation,
&mut values,
first_option,
constrain(|activation, a, b| {
for (field_name, options) in field_names.iter().zip(options.iter()) {
let mut a_object = a.coerce_to_object(activation)?;
let a_field = a_object.get_property(
a_object,
&QName::new(Namespace::public(), *field_name),
activation,
)?;
let mut b_object = b.coerce_to_object(activation)?;
let b_field = b_object.get_property(
b_object,
&QName::new(Namespace::public(), *field_name),
activation,
)?;
let ord = if options.contains(SortOptions::NUMERIC) {
compare_numeric(activation, a_field, b_field)?
} else if options.contains(SortOptions::CASE_INSENSITIVE) {
compare_string_case_insensitive(activation, a_field, b_field)?
} else {
compare_string_case_sensitive(activation, a_field, b_field)?
};
if matches!(ord, Ordering::Equal) {
continue;
}
if options.contains(SortOptions::DESCENDING) {
return Ok(ord.reverse());
} else {
return Ok(ord);
}
}
Ok(Ordering::Equal)
}),
)?;
return sort_postprocess(activation, this, first_option, unique_satisfied, values);
}
}
Ok(0.into())
}
/// Construct `Array`'s class.
pub fn create_class<'gc>(mc: MutationContext<'gc, '_>) -> GcCell<'gc, Class<'gc>> {
let class = Class::new(
QName::new(Namespace::public(), "Array"),
Some(QName::new(Namespace::public(), "Object").into()),
Method::from_builtin(instance_init),
Method::from_builtin(class_init),
mc,
);
class.write(mc).define_instance_trait(Trait::from_getter(
QName::new(Namespace::public(), "length"),
Method::from_builtin(length),
));
class.write(mc).define_instance_trait(Trait::from_setter(
QName::new(Namespace::public(), "length"),
Method::from_builtin(set_length),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "concat"),
Method::from_builtin(concat),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "join"),
Method::from_builtin(join),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::public(), "toString"),
Method::from_builtin(to_string),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::public(), "toLocaleString"),
Method::from_builtin(to_locale_string),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::public(), "valueOf"),
Method::from_builtin(value_of),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "forEach"),
Method::from_builtin(for_each),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "map"),
Method::from_builtin(map),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "filter"),
Method::from_builtin(filter),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "every"),
Method::from_builtin(every),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "some"),
Method::from_builtin(some),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "indexOf"),
Method::from_builtin(index_of),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "lastIndexOf"),
Method::from_builtin(last_index_of),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "pop"),
Method::from_builtin(pop),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "push"),
Method::from_builtin(push),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "reverse"),
Method::from_builtin(reverse),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "shift"),
Method::from_builtin(shift),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "unshift"),
Method::from_builtin(unshift),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "slice"),
Method::from_builtin(slice),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "splice"),
Method::from_builtin(splice),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "sort"),
Method::from_builtin(sort),
));
class.write(mc).define_instance_trait(Trait::from_method(
QName::new(Namespace::as3_namespace(), "sortOn"),
Method::from_builtin(sort_on),
));
class.write(mc).define_class_trait(Trait::from_const(
QName::new(Namespace::public(), "CASEINSENSITIVE"),
Multiname::from(QName::new(Namespace::public(), "uint")),
Some(SortOptions::CASE_INSENSITIVE.bits().into()),
));
class.write(mc).define_class_trait(Trait::from_const(
QName::new(Namespace::public(), "DESCENDING"),
Multiname::from(QName::new(Namespace::public(), "uint")),
Some(SortOptions::DESCENDING.bits().into()),
));
class.write(mc).define_class_trait(Trait::from_const(
QName::new(Namespace::public(), "NUMERIC"),
Multiname::from(QName::new(Namespace::public(), "uint")),
Some(SortOptions::NUMERIC.bits().into()),
));
class.write(mc).define_class_trait(Trait::from_const(
QName::new(Namespace::public(), "RETURNINDEXEDARRAY"),
Multiname::from(QName::new(Namespace::public(), "uint")),
Some(SortOptions::RETURN_INDEXED_ARRAY.bits().into()),
));
class.write(mc).define_class_trait(Trait::from_const(
QName::new(Namespace::public(), "UNIQUESORT"),
Multiname::from(QName::new(Namespace::public(), "uint")),
Some(SortOptions::UNIQUE_SORT.bits().into()),
));
class
}
| 31.396814 | 99 | 0.532923 |
f96162d9255d66522885d2c655a78c8766772e57
| 11,436 |
use crate::builtins::dict::PyDictRef;
use crate::builtins::pystr::PyStrRef;
use crate::builtins::pytype::PyTypeRef;
use crate::builtins::tuple::PyTupleRef;
/// Implementation of the _thread module
use crate::exceptions::{self, IntoPyException};
use crate::function::{FuncArgs, KwArgs, OptionalArg};
use crate::py_io;
use crate::slots::{SlotGetattro, SlotSetattro};
use crate::utils::Either;
use crate::VirtualMachine;
use crate::{
IdProtocol, ItemProtocol, PyCallable, PyClassImpl, PyObjectRef, PyRef, PyResult, PyValue,
StaticType, TypeProtocol,
};
use parking_lot::{
lock_api::{RawMutex as RawMutexT, RawMutexTimed, RawReentrantMutex},
RawMutex, RawThreadId,
};
use thread_local::ThreadLocal;
use std::cell::RefCell;
use std::io::Write;
use std::time::Duration;
use std::{fmt, thread};
// PY_TIMEOUT_MAX is a value in microseconds
#[cfg(not(target_os = "windows"))]
const PY_TIMEOUT_MAX: i64 = i64::MAX / 1_000;
#[cfg(target_os = "windows")]
const PY_TIMEOUT_MAX: i64 = 0xffffffff * 1_000;
// this is a value in seconds
const TIMEOUT_MAX: f64 = (PY_TIMEOUT_MAX / 1_000_000) as f64;
#[derive(FromArgs)]
struct AcquireArgs {
#[pyarg(any, default = "true")]
blocking: bool,
#[pyarg(any, default = "Either::A(-1.0)")]
timeout: Either<f64, i64>,
}
macro_rules! acquire_lock_impl {
($mu:expr, $args:expr, $vm:expr) => {{
let (mu, args, vm) = ($mu, $args, $vm);
let timeout = match args.timeout {
Either::A(f) => f,
Either::B(i) => i as f64,
};
match args.blocking {
true if timeout == -1.0 => {
mu.lock();
Ok(true)
}
true if timeout < 0.0 => {
Err(vm.new_value_error("timeout value must be positive".to_owned()))
}
true => {
// modified from std::time::Duration::from_secs_f64 to avoid a panic.
// TODO: put this in the Duration::try_from_object impl, maybe?
let micros = timeout * 1_000_000.0;
let nanos = timeout * 1_000_000_000.0;
if micros > PY_TIMEOUT_MAX as f64 || nanos < 0.0 || !nanos.is_finite() {
return Err(vm.new_overflow_error(
"timestamp too large to convert to Rust Duration".to_owned(),
));
}
Ok(mu.try_lock_for(Duration::from_secs_f64(timeout)))
}
false if timeout != -1.0 => {
Err(vm
.new_value_error("can't specify a timeout for a non-blocking call".to_owned()))
}
false => Ok(mu.try_lock()),
}
}};
}
macro_rules! repr_lock_impl {
($zelf:expr) => {{
let status = if $zelf.mu.is_locked() {
"locked"
} else {
"unlocked"
};
format!(
"<{} {} object at {:#x}>",
status,
$zelf.class().name(),
$zelf.get_id()
)
}};
}
#[pyclass(module = "thread", name = "lock")]
struct PyLock {
mu: RawMutex,
}
type PyLockRef = PyRef<PyLock>;
impl PyValue for PyLock {
fn class(_vm: &VirtualMachine) -> &PyTypeRef {
Self::static_type()
}
}
impl fmt::Debug for PyLock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("PyLock")
}
}
#[pyimpl]
impl PyLock {
#[pymethod]
#[pymethod(name = "acquire_lock")]
#[pymethod(name = "__enter__")]
#[allow(clippy::float_cmp, clippy::match_bool)]
fn acquire(&self, args: AcquireArgs, vm: &VirtualMachine) -> PyResult<bool> {
acquire_lock_impl!(&self.mu, args, vm)
}
#[pymethod]
#[pymethod(name = "release_lock")]
fn release(&self, vm: &VirtualMachine) -> PyResult<()> {
if !self.mu.is_locked() {
return Err(vm.new_runtime_error("release unlocked lock".to_owned()));
}
unsafe { self.mu.unlock() };
Ok(())
}
#[pymethod(magic)]
fn exit(&self, _args: FuncArgs, vm: &VirtualMachine) -> PyResult<()> {
self.release(vm)
}
#[pymethod]
fn locked(&self) -> bool {
self.mu.is_locked()
}
#[pymethod(magic)]
fn repr(zelf: PyRef<Self>) -> String {
repr_lock_impl!(zelf)
}
}
pub type RawRMutex = RawReentrantMutex<RawMutex, RawThreadId>;
#[pyclass(module = "thread", name = "RLock")]
struct PyRLock {
mu: RawRMutex,
}
impl PyValue for PyRLock {
fn class(_vm: &VirtualMachine) -> &PyTypeRef {
Self::static_type()
}
}
impl fmt::Debug for PyRLock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("PyRLock")
}
}
#[pyimpl]
impl PyRLock {
#[pyslot]
fn tp_new(cls: PyTypeRef, _args: FuncArgs, vm: &VirtualMachine) -> PyResult {
PyRLock {
mu: RawRMutex::INIT,
}
.into_pyresult_with_type(vm, cls)
}
#[pymethod]
#[pymethod(name = "acquire_lock")]
#[pymethod(name = "__enter__")]
#[allow(clippy::float_cmp, clippy::match_bool)]
fn acquire(&self, args: AcquireArgs, vm: &VirtualMachine) -> PyResult<bool> {
acquire_lock_impl!(&self.mu, args, vm)
}
#[pymethod]
#[pymethod(name = "release_lock")]
fn release(&self, vm: &VirtualMachine) -> PyResult<()> {
if !self.mu.is_locked() {
return Err(vm.new_runtime_error("release unlocked lock".to_owned()));
}
unsafe { self.mu.unlock() };
Ok(())
}
#[pymethod]
fn _is_owned(&self) -> bool {
self.mu.is_owned_by_current_thread()
}
#[pymethod(magic)]
fn exit(&self, _args: FuncArgs, vm: &VirtualMachine) -> PyResult<()> {
self.release(vm)
}
#[pymethod(magic)]
fn repr(zelf: PyRef<Self>) -> String {
repr_lock_impl!(zelf)
}
}
fn _thread_get_ident() -> u64 {
thread_to_id(&thread::current())
}
fn thread_to_id(t: &thread::Thread) -> u64 {
// TODO: use id.as_u64() once it's stable, until then, ThreadId is just a wrapper
// around NonZeroU64, so this is safe
unsafe { std::mem::transmute(t.id()) }
}
fn _thread_allocate_lock() -> PyLock {
PyLock { mu: RawMutex::INIT }
}
fn _thread_start_new_thread(
func: PyCallable,
args: PyTupleRef,
kwargs: OptionalArg<PyDictRef>,
vm: &VirtualMachine,
) -> PyResult<u64> {
let args = FuncArgs::new(
args.as_slice().to_owned(),
kwargs
.map_or_else(Default::default, |k| k.to_attributes())
.into_iter()
.collect::<KwArgs>(),
);
let mut thread_builder = thread::Builder::new();
let stacksize = vm.state.stacksize.load();
if stacksize != 0 {
thread_builder = thread_builder.stack_size(stacksize);
}
thread_builder
.spawn(
vm.new_thread()
.make_spawn_func(move |vm| run_thread(func, args, vm)),
)
.map(|handle| {
vm.state.thread_count.fetch_add(1);
thread_to_id(handle.thread())
})
.map_err(|err| err.into_pyexception(vm))
}
fn run_thread(func: PyCallable, args: FuncArgs, vm: &VirtualMachine) {
match func.invoke(args, vm) {
Ok(_obj) => {}
Err(e) if e.isinstance(&vm.ctx.exceptions.system_exit) => {}
Err(exc) => {
// TODO: sys.unraisablehook
let stderr = std::io::stderr();
let mut stderr = py_io::IoWriter(stderr.lock());
let repr = vm.to_repr(&func.into_object()).ok();
let repr = repr
.as_ref()
.map_or("<object repr() failed>", |s| s.as_str());
writeln!(*stderr, "Exception ignored in thread started by: {}", repr)
.and_then(|()| exceptions::write_exception(&mut stderr, vm, &exc))
.ok();
}
}
SENTINELS.with(|sents| {
for lock in sents.replace(Default::default()) {
if lock.mu.is_locked() {
unsafe { lock.mu.unlock() };
}
}
});
vm.state.thread_count.fetch_sub(1);
}
fn _thread_exit(vm: &VirtualMachine) -> PyResult {
Err(vm.new_exception_empty(vm.ctx.exceptions.system_exit.clone()))
}
thread_local!(static SENTINELS: RefCell<Vec<PyLockRef>> = RefCell::default());
fn _thread_set_sentinel(vm: &VirtualMachine) -> PyLockRef {
let lock = PyLock { mu: RawMutex::INIT }.into_ref(vm);
SENTINELS.with(|sents| sents.borrow_mut().push(lock.clone()));
lock
}
fn _thread_stack_size(size: OptionalArg<usize>, vm: &VirtualMachine) -> usize {
let size = size.unwrap_or(0);
// TODO: do validation on this to make sure it's not too small
vm.state.stacksize.swap(size)
}
fn _thread_count(vm: &VirtualMachine) -> usize {
vm.state.thread_count.load()
}
#[pyclass(module = "thread", name = "_local")]
#[derive(Debug)]
struct PyLocal {
data: ThreadLocal<PyDictRef>,
}
impl PyValue for PyLocal {
fn class(_vm: &VirtualMachine) -> &PyTypeRef {
Self::static_type()
}
}
#[pyimpl(with(SlotGetattro, SlotSetattro), flags(BASETYPE))]
impl PyLocal {
fn ldict(&self, vm: &VirtualMachine) -> PyDictRef {
self.data.get_or(|| vm.ctx.new_dict()).clone()
}
#[pyslot]
fn tp_new(cls: PyTypeRef, _args: FuncArgs, vm: &VirtualMachine) -> PyResult {
PyLocal {
data: ThreadLocal::new(),
}
.into_pyresult_with_type(vm, cls)
}
}
impl SlotGetattro for PyLocal {
fn getattro(zelf: PyRef<Self>, attr: PyStrRef, vm: &VirtualMachine) -> PyResult {
let ldict = zelf.ldict(vm);
if attr.as_str() == "__dict__" {
Ok(ldict.into_object())
} else {
let zelf = zelf.into_object();
vm.generic_getattribute_opt(zelf.clone(), attr.clone(), Some(ldict))?
.ok_or_else(|| {
vm.new_attribute_error(format!("{} has no attribute '{}'", zelf, attr))
})
}
}
}
impl SlotSetattro for PyLocal {
fn setattro(
zelf: &PyRef<Self>,
attr: PyStrRef,
value: Option<PyObjectRef>,
vm: &VirtualMachine,
) -> PyResult<()> {
if attr.as_str() == "__dict__" {
Err(vm.new_attribute_error(format!(
"{} attribute '__dict__' is read-only",
zelf.as_object()
)))
} else {
let dict = zelf.ldict(vm);
if let Some(value) = value {
dict.set_item(attr, value, vm)?;
} else {
dict.del_item(attr, vm)?;
}
Ok(())
}
}
}
pub fn make_module(vm: &VirtualMachine) -> PyObjectRef {
let ctx = &vm.ctx;
py_module!(vm, "_thread", {
"RLock" => PyRLock::make_class(ctx),
"LockType" => PyLock::make_class(ctx),
"_local" => PyLocal::make_class(ctx),
"get_ident" => named_function!(ctx, _thread, get_ident),
"allocate_lock" => named_function!(ctx, _thread, allocate_lock),
"start_new_thread" => named_function!(ctx, _thread, start_new_thread),
"exit" => named_function!(ctx, _thread, exit),
"_set_sentinel" => named_function!(ctx, _thread, set_sentinel),
"stack_size" => named_function!(ctx, _thread, stack_size),
"_count" => named_function!(ctx, _thread, count),
"error" => ctx.exceptions.runtime_error.clone(),
"TIMEOUT_MAX" => ctx.new_float(TIMEOUT_MAX),
})
}
| 29.398458 | 99 | 0.572141 |
d95637c3b986828638e8ea34b1b90cc56d2b2b0f
| 22,992 |
//! For each definition, we track the following data. A definition
//! here is defined somewhat circularly as "something with a `DefId`",
//! but it generally corresponds to things like structs, enums, etc.
//! There are also some rather random cases (like const initializer
//! expressions) that are mostly just leftovers.
use crate::hir;
use crate::hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE, CRATE_DEF_INDEX};
use crate::ich::Fingerprint;
use crate::session::CrateDisambiguator;
use crate::util::nodemap::NodeMap;
use rustc_data_structures::fx::FxHashMap;
use rustc_index::vec::{IndexVec};
use rustc_data_structures::stable_hasher::StableHasher;
use std::borrow::Borrow;
use std::fmt::Write;
use std::hash::Hash;
use syntax::ast;
use syntax_expand::hygiene::ExpnId;
use syntax::symbol::{Symbol, sym, InternedString};
use syntax_pos::{Span, DUMMY_SP};
/// The `DefPathTable` maps `DefIndex`es to `DefKey`s and vice versa.
/// Internally the `DefPathTable` holds a tree of `DefKey`s, where each `DefKey`
/// stores the `DefIndex` of its parent.
/// There is one `DefPathTable` for each crate.
#[derive(Clone, Default, RustcDecodable, RustcEncodable)]
pub struct DefPathTable {
index_to_key: Vec<DefKey>,
def_path_hashes: Vec<DefPathHash>,
}
impl DefPathTable {
fn allocate(&mut self,
key: DefKey,
def_path_hash: DefPathHash)
-> DefIndex {
let index = {
let index = DefIndex::from(self.index_to_key.len());
debug!("DefPathTable::insert() - {:?} <-> {:?}", key, index);
self.index_to_key.push(key);
index
};
self.def_path_hashes.push(def_path_hash);
debug_assert!(self.def_path_hashes.len() == self.index_to_key.len());
index
}
pub fn next_id(&self) -> DefIndex {
DefIndex::from(self.index_to_key.len())
}
#[inline(always)]
pub fn def_key(&self, index: DefIndex) -> DefKey {
self.index_to_key[index.index()].clone()
}
#[inline(always)]
pub fn def_path_hash(&self, index: DefIndex) -> DefPathHash {
let ret = self.def_path_hashes[index.index()];
debug!("def_path_hash({:?}) = {:?}", index, ret);
return ret
}
pub fn add_def_path_hashes_to(&self,
cnum: CrateNum,
out: &mut FxHashMap<DefPathHash, DefId>) {
out.extend(
self.def_path_hashes
.iter()
.enumerate()
.map(|(index, &hash)| {
let def_id = DefId {
krate: cnum,
index: DefIndex::from(index),
};
(hash, def_id)
})
);
}
pub fn size(&self) -> usize {
self.index_to_key.len()
}
}
/// The definition table containing node definitions.
/// It holds the `DefPathTable` for local `DefId`s/`DefPath`s and it also stores a
/// mapping from `NodeId`s to local `DefId`s.
#[derive(Clone, Default)]
pub struct Definitions {
table: DefPathTable,
node_to_def_index: NodeMap<DefIndex>,
def_index_to_node: Vec<ast::NodeId>,
pub(super) node_to_hir_id: IndexVec<ast::NodeId, hir::HirId>,
/// If `ExpnId` is an ID of some macro expansion,
/// then `DefId` is the normal module (`mod`) in which the expanded macro was defined.
parent_modules_of_macro_defs: FxHashMap<ExpnId, DefId>,
/// Item with a given `DefIndex` was defined during macro expansion with ID `ExpnId`.
expansions_that_defined: FxHashMap<DefIndex, ExpnId>,
next_disambiguator: FxHashMap<(DefIndex, DefPathData), u32>,
def_index_to_span: FxHashMap<DefIndex, Span>,
/// When collecting definitions from an AST fragment produced by a macro invocation `ExpnId`
/// we know what parent node that fragment should be attached to thanks to this table.
invocation_parents: FxHashMap<ExpnId, DefIndex>,
/// Indices of unnamed struct or variant fields with unresolved attributes.
pub(super) placeholder_field_indices: NodeMap<usize>,
}
/// A unique identifier that we can use to lookup a definition
/// precisely. It combines the index of the definition's parent (if
/// any) with a `DisambiguatedDefPathData`.
#[derive(Clone, PartialEq, Debug, RustcEncodable, RustcDecodable)]
pub struct DefKey {
/// The parent path.
pub parent: Option<DefIndex>,
/// The identifier of this node.
pub disambiguated_data: DisambiguatedDefPathData,
}
impl DefKey {
fn compute_stable_hash(&self, parent_hash: DefPathHash) -> DefPathHash {
let mut hasher = StableHasher::new();
// We hash a `0u8` here to disambiguate between regular `DefPath` hashes,
// and the special "root_parent" below.
0u8.hash(&mut hasher);
parent_hash.hash(&mut hasher);
let DisambiguatedDefPathData {
ref data,
disambiguator,
} = self.disambiguated_data;
::std::mem::discriminant(data).hash(&mut hasher);
if let Some(name) = data.get_opt_name() {
name.hash(&mut hasher);
}
disambiguator.hash(&mut hasher);
DefPathHash(hasher.finish())
}
fn root_parent_stable_hash(crate_name: &str,
crate_disambiguator: CrateDisambiguator)
-> DefPathHash {
let mut hasher = StableHasher::new();
// Disambiguate this from a regular `DefPath` hash; see `compute_stable_hash()` above.
1u8.hash(&mut hasher);
crate_name.hash(&mut hasher);
crate_disambiguator.hash(&mut hasher);
DefPathHash(hasher.finish())
}
}
/// A pair of `DefPathData` and an integer disambiguator. The integer is
/// normally `0`, but in the event that there are multiple defs with the
/// same `parent` and `data`, we use this field to disambiguate
/// between them. This introduces some artificial ordering dependency
/// but means that if you have, e.g., two impls for the same type in
/// the same module, they do get distinct `DefId`s.
#[derive(Clone, PartialEq, Debug, RustcEncodable, RustcDecodable)]
pub struct DisambiguatedDefPathData {
pub data: DefPathData,
pub disambiguator: u32
}
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub struct DefPath {
/// The path leading from the crate root to the item.
pub data: Vec<DisambiguatedDefPathData>,
/// The crate root this path is relative to.
pub krate: CrateNum,
}
impl DefPath {
pub fn is_local(&self) -> bool {
self.krate == LOCAL_CRATE
}
pub fn make<FN>(krate: CrateNum,
start_index: DefIndex,
mut get_key: FN) -> DefPath
where FN: FnMut(DefIndex) -> DefKey
{
let mut data = vec![];
let mut index = Some(start_index);
loop {
debug!("DefPath::make: krate={:?} index={:?}", krate, index);
let p = index.unwrap();
let key = get_key(p);
debug!("DefPath::make: key={:?}", key);
match key.disambiguated_data.data {
DefPathData::CrateRoot => {
assert!(key.parent.is_none());
break;
}
_ => {
data.push(key.disambiguated_data);
index = key.parent;
}
}
}
data.reverse();
DefPath { data: data, krate: krate }
}
/// Returns a string representation of the `DefPath` without
/// the crate-prefix. This method is useful if you don't have
/// a `TyCtxt` available.
pub fn to_string_no_crate(&self) -> String {
let mut s = String::with_capacity(self.data.len() * 16);
for component in &self.data {
write!(s,
"::{}[{}]",
component.data.as_interned_str(),
component.disambiguator)
.unwrap();
}
s
}
/// Returns a filename-friendly string for the `DefPath`, with the
/// crate-prefix.
pub fn to_string_friendly<F>(&self, crate_imported_name: F) -> String
where F: FnOnce(CrateNum) -> Symbol
{
let crate_name_str = crate_imported_name(self.krate).as_str();
let mut s = String::with_capacity(crate_name_str.len() + self.data.len() * 16);
write!(s, "::{}", crate_name_str).unwrap();
for component in &self.data {
if component.disambiguator == 0 {
write!(s, "::{}", component.data.as_interned_str()).unwrap();
} else {
write!(s,
"{}[{}]",
component.data.as_interned_str(),
component.disambiguator)
.unwrap();
}
}
s
}
/// Returns a filename-friendly string of the `DefPath`, without
/// the crate-prefix. This method is useful if you don't have
/// a `TyCtxt` available.
pub fn to_filename_friendly_no_crate(&self) -> String {
let mut s = String::with_capacity(self.data.len() * 16);
let mut opt_delimiter = None;
for component in &self.data {
opt_delimiter.map(|d| s.push(d));
opt_delimiter = Some('-');
if component.disambiguator == 0 {
write!(s, "{}", component.data.as_interned_str()).unwrap();
} else {
write!(s,
"{}[{}]",
component.data.as_interned_str(),
component.disambiguator)
.unwrap();
}
}
s
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub enum DefPathData {
// Root: these should only be used for the root nodes, because
// they are treated specially by the `def_path` function.
/// The crate root (marker).
CrateRoot,
// Catch-all for random `DefId` things like `DUMMY_NODE_ID`.
Misc,
// Different kinds of items and item-like things:
/// An impl.
Impl,
/// Something in the type namespace.
TypeNs(InternedString),
/// Something in the value namespace.
ValueNs(InternedString),
/// Something in the macro namespace.
MacroNs(InternedString),
/// Something in the lifetime namespace.
LifetimeNs(InternedString),
/// A closure expression.
ClosureExpr,
// Subportions of items:
/// Implicit constructor for a unit or tuple-like struct or enum variant.
Ctor,
/// A constant expression (see `{ast,hir}::AnonConst`).
AnonConst,
/// An `impl Trait` type node.
ImplTrait,
/// Identifies a piece of crate metadata that is global to a whole crate
/// (as opposed to just one item). `GlobalMetaData` components are only
/// supposed to show up right below the crate root.
GlobalMetaData(InternedString),
}
#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Debug,
RustcEncodable, RustcDecodable)]
pub struct DefPathHash(pub Fingerprint);
impl_stable_hash_for!(tuple_struct DefPathHash { fingerprint });
impl Borrow<Fingerprint> for DefPathHash {
#[inline]
fn borrow(&self) -> &Fingerprint {
&self.0
}
}
impl Definitions {
pub fn def_path_table(&self) -> &DefPathTable {
&self.table
}
/// Gets the number of definitions.
pub fn def_index_count(&self) -> usize {
self.table.index_to_key.len()
}
pub fn def_key(&self, index: DefIndex) -> DefKey {
self.table.def_key(index)
}
#[inline(always)]
pub fn def_path_hash(&self, index: DefIndex) -> DefPathHash {
self.table.def_path_hash(index)
}
/// Returns the path from the crate root to `index`. The root
/// nodes are not included in the path (i.e., this will be an
/// empty vector for the crate root). For an inlined item, this
/// will be the path of the item in the external crate (but the
/// path will begin with the path to the external crate).
pub fn def_path(&self, index: DefIndex) -> DefPath {
DefPath::make(LOCAL_CRATE, index, |p| self.def_key(p))
}
#[inline]
pub fn opt_def_index(&self, node: ast::NodeId) -> Option<DefIndex> {
self.node_to_def_index.get(&node).cloned()
}
#[inline]
pub fn opt_local_def_id(&self, node: ast::NodeId) -> Option<DefId> {
self.opt_def_index(node).map(DefId::local)
}
#[inline]
pub fn local_def_id(&self, node: ast::NodeId) -> DefId {
self.opt_local_def_id(node).unwrap()
}
#[inline]
pub fn as_local_node_id(&self, def_id: DefId) -> Option<ast::NodeId> {
if def_id.krate == LOCAL_CRATE {
let node_id = self.def_index_to_node[def_id.index.index()];
if node_id != ast::DUMMY_NODE_ID {
return Some(node_id);
}
}
None
}
#[inline]
pub fn as_local_hir_id(&self, def_id: DefId) -> Option<hir::HirId> {
if def_id.krate == LOCAL_CRATE {
let hir_id = self.def_index_to_hir_id(def_id.index);
if hir_id != hir::DUMMY_HIR_ID {
Some(hir_id)
} else {
None
}
} else {
None
}
}
#[inline]
pub fn node_to_hir_id(&self, node_id: ast::NodeId) -> hir::HirId {
self.node_to_hir_id[node_id]
}
#[inline]
pub fn def_index_to_hir_id(&self, def_index: DefIndex) -> hir::HirId {
let node_id = self.def_index_to_node[def_index.index()];
self.node_to_hir_id[node_id]
}
/// Retrieves the span of the given `DefId` if `DefId` is in the local crate, the span exists
/// and it's not `DUMMY_SP`.
#[inline]
pub fn opt_span(&self, def_id: DefId) -> Option<Span> {
if def_id.krate == LOCAL_CRATE {
self.def_index_to_span.get(&def_id.index).cloned()
} else {
None
}
}
/// Adds a root definition (no parent) and a few other reserved definitions.
pub fn create_root_def(&mut self,
crate_name: &str,
crate_disambiguator: CrateDisambiguator)
-> DefIndex {
let key = DefKey {
parent: None,
disambiguated_data: DisambiguatedDefPathData {
data: DefPathData::CrateRoot,
disambiguator: 0
}
};
let parent_hash = DefKey::root_parent_stable_hash(crate_name,
crate_disambiguator);
let def_path_hash = key.compute_stable_hash(parent_hash);
// Create the definition.
let root_index = self.table.allocate(key, def_path_hash);
assert_eq!(root_index, CRATE_DEF_INDEX);
assert!(self.def_index_to_node.is_empty());
self.def_index_to_node.push(ast::CRATE_NODE_ID);
self.node_to_def_index.insert(ast::CRATE_NODE_ID, root_index);
self.set_invocation_parent(ExpnId::root(), root_index);
// Allocate some other `DefIndex`es that always must exist.
GlobalMetaDataKind::allocate_def_indices(self);
root_index
}
/// Adds a definition with a parent definition.
pub fn create_def_with_parent(&mut self,
parent: DefIndex,
node_id: ast::NodeId,
data: DefPathData,
expn_id: ExpnId,
span: Span)
-> DefIndex {
debug!("create_def_with_parent(parent={:?}, node_id={:?}, data={:?})",
parent, node_id, data);
assert!(!self.node_to_def_index.contains_key(&node_id),
"adding a def'n for node-id {:?} and data {:?} but a previous def'n exists: {:?}",
node_id,
data,
self.table.def_key(self.node_to_def_index[&node_id]));
// The root node must be created with `create_root_def()`.
assert!(data != DefPathData::CrateRoot);
// Find the next free disambiguator for this key.
let disambiguator = {
let next_disamb = self.next_disambiguator.entry((parent, data.clone())).or_insert(0);
let disambiguator = *next_disamb;
*next_disamb = next_disamb.checked_add(1).expect("disambiguator overflow");
disambiguator
};
let key = DefKey {
parent: Some(parent),
disambiguated_data: DisambiguatedDefPathData {
data, disambiguator
}
};
let parent_hash = self.table.def_path_hash(parent);
let def_path_hash = key.compute_stable_hash(parent_hash);
debug!("create_def_with_parent: after disambiguation, key = {:?}", key);
// Create the definition.
let index = self.table.allocate(key, def_path_hash);
assert_eq!(index.index(), self.def_index_to_node.len());
self.def_index_to_node.push(node_id);
// Some things for which we allocate `DefIndex`es don't correspond to
// anything in the AST, so they don't have a `NodeId`. For these cases
// we don't need a mapping from `NodeId` to `DefIndex`.
if node_id != ast::DUMMY_NODE_ID {
debug!("create_def_with_parent: def_index_to_node[{:?} <-> {:?}", index, node_id);
self.node_to_def_index.insert(node_id, index);
}
if expn_id != ExpnId::root() {
self.expansions_that_defined.insert(index, expn_id);
}
// The span is added if it isn't dummy.
if !span.is_dummy() {
self.def_index_to_span.insert(index, span);
}
index
}
/// Initializes the `ast::NodeId` to `HirId` mapping once it has been generated during
/// AST to HIR lowering.
pub fn init_node_id_to_hir_id_mapping(&mut self,
mapping: IndexVec<ast::NodeId, hir::HirId>) {
assert!(self.node_to_hir_id.is_empty(),
"trying to initialize `NodeId` -> `HirId` mapping twice");
self.node_to_hir_id = mapping;
}
pub fn expansion_that_defined(&self, index: DefIndex) -> ExpnId {
self.expansions_that_defined.get(&index).cloned().unwrap_or(ExpnId::root())
}
pub fn parent_module_of_macro_def(&self, expn_id: ExpnId) -> DefId {
self.parent_modules_of_macro_defs[&expn_id]
}
pub fn add_parent_module_of_macro_def(&mut self, expn_id: ExpnId, module: DefId) {
self.parent_modules_of_macro_defs.insert(expn_id, module);
}
pub fn invocation_parent(&self, invoc_id: ExpnId) -> DefIndex {
self.invocation_parents[&invoc_id]
}
pub fn set_invocation_parent(&mut self, invoc_id: ExpnId, parent: DefIndex) {
let old_parent = self.invocation_parents.insert(invoc_id, parent);
assert!(old_parent.is_none(), "parent `DefIndex` is reset for an invocation");
}
}
impl DefPathData {
pub fn get_opt_name(&self) -> Option<InternedString> {
use self::DefPathData::*;
match *self {
TypeNs(name) |
ValueNs(name) |
MacroNs(name) |
LifetimeNs(name) |
GlobalMetaData(name) => Some(name),
Impl |
CrateRoot |
Misc |
ClosureExpr |
Ctor |
AnonConst |
ImplTrait => None
}
}
pub fn as_interned_str(&self) -> InternedString {
use self::DefPathData::*;
let s = match *self {
TypeNs(name) |
ValueNs(name) |
MacroNs(name) |
LifetimeNs(name) |
GlobalMetaData(name) => {
return name
}
// Note that this does not show up in user print-outs.
CrateRoot => sym::double_braced_crate,
Impl => sym::double_braced_impl,
Misc => sym::double_braced_misc,
ClosureExpr => sym::double_braced_closure,
Ctor => sym::double_braced_constructor,
AnonConst => sym::double_braced_constant,
ImplTrait => sym::double_braced_opaque,
};
s.as_interned_str()
}
pub fn to_string(&self) -> String {
self.as_interned_str().to_string()
}
}
// We define the `GlobalMetaDataKind` enum with this macro because we want to
// make sure that we exhaustively iterate over all variants when registering
// the corresponding `DefIndex`es in the `DefTable`.
macro_rules! define_global_metadata_kind {
(pub enum GlobalMetaDataKind {
$($variant:ident),*
}) => (
pub enum GlobalMetaDataKind {
$($variant),*
}
impl GlobalMetaDataKind {
fn allocate_def_indices(definitions: &mut Definitions) {
$({
let instance = GlobalMetaDataKind::$variant;
definitions.create_def_with_parent(
CRATE_DEF_INDEX,
ast::DUMMY_NODE_ID,
DefPathData::GlobalMetaData(instance.name().as_interned_str()),
ExpnId::root(),
DUMMY_SP
);
// Make sure calling `def_index` does not crash.
instance.def_index(&definitions.table);
})*
}
pub fn def_index(&self, def_path_table: &DefPathTable) -> DefIndex {
let def_key = DefKey {
parent: Some(CRATE_DEF_INDEX),
disambiguated_data: DisambiguatedDefPathData {
data: DefPathData::GlobalMetaData(self.name().as_interned_str()),
disambiguator: 0,
}
};
// These `DefKey`s are all right after the root,
// so a linear search is fine.
let index = def_path_table.index_to_key
.iter()
.position(|k| *k == def_key)
.unwrap();
DefIndex::from(index)
}
fn name(&self) -> Symbol {
let string = match *self {
$(
GlobalMetaDataKind::$variant => {
concat!("{{GlobalMetaData::", stringify!($variant), "}}")
}
)*
};
Symbol::intern(string)
}
}
)
}
define_global_metadata_kind!(pub enum GlobalMetaDataKind {
Krate,
CrateDeps,
DylibDependencyFormats,
LangItems,
LangItemsMissing,
NativeLibraries,
SourceMap,
Impls,
ExportedSymbols
});
| 34.367713 | 98 | 0.576505 |
d6d1be767566d82bd1625a6f8cce8f0a61f642ba
| 1,605 |
use crate::cmd::stream::*;
/// Binds vertex buffers and issues draw call
#[derive(Debug, Clone, Copy)]
pub struct DrawVertices {
pub vertex_count: u32,
pub instance_count: u32,
pub first_vertex: u32,
pub first_instance: u32,
}
impl Default for DrawVertices {
fn default() -> Self {
Self {
vertex_count: 0,
instance_count: 1,
first_vertex: 0,
first_instance: 0,
}
}
}
impl DrawVertices {
pub fn with_vertices(vertex_count: u32) -> Self {
Self {
vertex_count,
instance_count: 1,
first_vertex: 0,
first_instance: 0,
}
}
pub fn first_vertex(mut self, first: u32) -> Self {
self.first_vertex = first;
self
}
pub fn vertex_count(mut self, count: u32) -> Self {
self.vertex_count = count;
self
}
pub fn vertices(mut self, first: u32, count: u32) -> Self {
self.first_vertex = first;
self.vertex_count = count;
self
}
pub fn first_instance(mut self, first: u32) -> Self {
self.first_instance = first;
self
}
pub fn instance_count(mut self, count: u32) -> Self {
self.instance_count = count;
self
}
pub fn instances(mut self, first: u32, count: u32) -> Self {
self.first_instance = first;
self.instance_count = count;
self
}
}
impl StreamPush for DrawVertices {
fn enqueue(&self, cs: CmdBuffer) -> CmdBuffer {
if self.vertex_count > 0 && self.instance_count > 0 {
vk::CmdDraw(
cs.buffer,
self.vertex_count,
self.instance_count,
self.first_vertex,
self.first_instance,
);
}
cs
}
}
| 21.118421 | 62 | 0.620561 |
bf587b973ba7581262698d0b30d98a777148dee4
| 887 |
use std::sync::{Arc, Mutex};
use std::thread;
#[allow(dead_code)]
fn sample_mutex() {
// The mutex (In fact, the term mutex is short for
// mutual exclusion) also known as spinlock
// is the simplest synchronization tool that is used
// to protect critical regions and thus prevent race conditions.
let m = Mutex::new(5);
{
let mut num = m.lock().unwrap();
*num = 6;
}
println!("m = {:?}", m);
}
fn main() {
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
let mut num = counter.lock().unwrap();
*num += 1;
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
println!("result: {}", *counter.lock().unwrap());
}
| 24.638889 | 68 | 0.54566 |
e9d7a4134c4d9553519f50f26e250ae08559a97b
| 3,326 |
use super::super::collision::{self, ContactManifold, AABB};
use super::super::collision::{Ray, Raycast};
use super::body_set::BodyHandle;
use glam::Vec2;
/// Describes a collider in the shape of `Shape`. Attached to a body.
#[derive(Clone, Debug)]
pub struct Collider<T> {
/// Currently the only shape is `AABB`
pub shape: AABB,
/// Offset from the body's position, 0 for centered
pub offset: Vec2,
/// Whether to treat the body as physical or not
pub state: ColliderState,
/// Ideally only one bit should be set
pub category_bits: u32,
/// Bodies only collide if both of their masks match
pub mask_bits: u32,
/// User supplied tag for identification
pub user_tag: T,
/// Body who owns the collider
pub owner: BodyHandle,
}
impl<T> Collider<T> {
pub fn new(
shape: AABB,
offset: Vec2,
state: ColliderState,
category_bits: u32,
mask_bits: u32,
user_tag: T,
owner: BodyHandle,
) -> Self {
Self {
shape,
offset,
state,
category_bits,
mask_bits,
user_tag,
owner,
}
}
pub fn overlaps_aabb(&self, own_position: Vec2, position: Vec2, half_exts: Vec2) -> bool {
let own_position = own_position + self.offset;
collision::intersection_aabb_aabb(own_position, self.shape.half_exts, position, half_exts)
}
pub fn ray_contact(&self, own_position: Vec2, ray: &Ray) -> Option<Raycast> {
let own_position = own_position + self.offset;
collision::contact_ray_aabb(ray, own_position, self.shape.half_exts)
}
}
/// Boolean test whether two `Colliders` collided.
pub fn is_colliding<T>(
collider1: &Collider<T>,
position1: Vec2,
collider2: &Collider<T>,
position2: Vec2,
) -> bool {
// apply offset
let position1 = position1 + collider1.offset;
let position2 = position2 + collider2.offset;
collision::intersection_aabb_aabb(
position1,
collider1.shape.half_exts,
position2,
collider2.shape.half_exts,
)
}
pub fn is_penetrating<T>(
collider1: &Collider<T>,
position1: Vec2,
collider2: &Collider<T>,
position2: Vec2,
tolerance: f32,
) -> bool {
let position1 = position1 + collider1.offset;
let position2 = position2 + collider2.offset;
collision::intersection_aabb_aabb(
position1,
collider1.shape.half_exts - Vec2::splat(tolerance),
position2,
collider2.shape.half_exts,
)
}
/// Generates a ContactManifold if two `Colliders` collided.
pub fn collision_manifold<T>(
collider1: &Collider<T>,
position1: Vec2,
collider2: &Collider<T>,
position2: Vec2,
) -> Option<ContactManifold> {
// apply offset
let position1 = position1 + collider1.offset;
let position2 = position2 + collider2.offset;
collision::contact_aabb_aabb(
position1,
collider1.shape.half_exts,
position2,
collider2.shape.half_exts,
)
}
/// State of the collider, determines default collision resolution and types of events sent.
#[derive(Copy, Clone, Debug)]
pub enum ColliderState {
/// Solid body resolves collision.
Solid,
/// Sensor sends events about possible overlap.
Sensor,
}
| 28.672414 | 98 | 0.64071 |
ac3ff7fff95c5efe06e2723e9b3f61e5ce86701d
| 5,287 |
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)]
pub struct EvntGrp {
/// NoEvents
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "864")]
pub events: Option<fix_common::RepeatingValues<Event>>,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)]
pub struct Event {
/// Required if NoEvents(864) > 0.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "865")]
pub event_type: Option<EventType>,
/// Conditionally required when EventTime(1145) is specified.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "866")]
pub event_date: Option<fix_common::LocalMktDate>,
/// <p></p>
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "1145")]
pub event_time: Option<fix_common::UTCTimestamp>,
/// EventPx
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(deserialize_with = "fix_common::workarounds::from_opt_str")]// https://github.com/serde-rs/serde/issues/1183
#[serde(default)]
#[serde(rename = "867")]
pub event_px: Option<f64>,
/// EventText
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "868")]
pub event_text: Option<String>,
/// Conditionally required when EventTimePeriod(1826) is specified.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "1827")]
pub event_time_unit: Option<EventTimeUnit>,
/// Conditionally required when EventTimeUnit(1827) is specified.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(deserialize_with = "fix_common::workarounds::from_opt_str")]// https://github.com/serde-rs/serde/issues/1183
#[serde(default)]
#[serde(rename = "1826")]
pub event_time_period: Option<i32>,
/// EventMonthYear
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "2340")]
pub event_month_year: Option<fix_common::MonthYear>,
/// Must be set if EncodedEventText(1579) field is specified and must immediately precede it.
#[serde(rename = "1578")]
/// Encoded (non-ASCII characters) representation of the EventText(868) field in the encoded format specified via the MessageEncoding(347)
/// field.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(alias = "1579")]
pub encoded_event_text: Option<fix_common::EncodedText<1579>>,
}
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq)]
pub enum EventType {
/// Put
#[serde(rename = "1")]
Put,
/// Call
#[serde(rename = "2")]
Call,
/// Tender
#[serde(rename = "3")]
Tender,
/// Sinking Fund Call
#[serde(rename = "4")]
SinkingFundCall,
/// Activation
#[serde(rename = "5")]
Activation,
/// Inactiviation
#[serde(rename = "6")]
Inactiviation,
/// Last Eligible Trade Date
#[serde(rename = "7")]
LastEligibleTradeDate,
/// Swap start date
#[serde(rename = "8")]
SwapStartDate,
/// Swap end date
#[serde(rename = "9")]
SwapEndDate,
/// Swap roll date
#[serde(rename = "10")]
SwapRollDate,
/// Swap next start date
#[serde(rename = "11")]
SwapNextStartDate,
/// Swap next roll date
#[serde(rename = "12")]
SwapNextRollDate,
/// First delivery date
#[serde(rename = "13")]
FirstDeliveryDate,
/// Last delivery date
#[serde(rename = "14")]
LastDeliveryDate,
/// Initiatl inventory due date
#[serde(rename = "15")]
InitiatlInventoryDueDate,
/// Final inventory due date
#[serde(rename = "16")]
FinalInventoryDueDate,
/// First intent date
#[serde(rename = "17")]
FirstIntentDate,
/// Last intent date
#[serde(rename = "18")]
LastIntentDate,
/// Position removal date
#[serde(rename = "19")]
PositionRemovalDate,
/// Other
#[serde(rename = "99")]
Other,
/// Minimum notice
#[serde(rename = "20")]
MinimumNotice,
/// Delivery start time
#[serde(rename = "21")]
DeliveryStartTime,
/// Delivery end time
#[serde(rename = "22")]
DeliveryEndTime,
/// First notice date (The first day that a notice of intent to deliver a commodity can be made by a clearing house to a buyer
/// in fulfillment of a given month's futures contract)
#[serde(rename = "23")]
FirstNoticeDate,
/// Last notice date (The last day on which a clearing house may inform an investor that a seller intends to make delivery of
/// a commodity that the investor previously bought in a futures contract. The date is governed by the rules of different exchanges
/// and clearing houses, but may also be stated in the futures contract itself)
#[serde(rename = "24")]
LastNoticeDate,
/// First exercise date
#[serde(rename = "25")]
FirstExerciseDate,
/// Redemption date
#[serde(rename = "26")]
RedemptionDate,
/// Trade continuation effective date
#[serde(rename = "27")]
TradeContinuationEffectiveDate,
}
impl Default for EventType {
fn default() -> Self {
EventType::Put
}
}
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq)]
pub enum EventTimeUnit {
/// Hour
#[serde(rename = "H")]
Hour,
/// Minute
#[serde(rename = "Min")]
Minute,
/// Second
#[serde(rename = "S")]
Second,
/// Day
#[serde(rename = "D")]
Day,
/// Week
#[serde(rename = "Wk")]
Week,
/// Month
#[serde(rename = "Mo")]
Month,
/// Year
#[serde(rename = "Yr")]
Year,
/// Quarter
#[serde(rename = "Q")]
Quarter,
}
impl Default for EventTimeUnit {
fn default() -> Self {
EventTimeUnit::Hour
}
}
| 27.973545 | 139 | 0.686779 |
64b6df3567a4a07cd70f69d5956902726d67b08c
| 147,349 |
// ignore-tidy-filelength
use crate::common::{expected_output_path, UI_EXTENSIONS, UI_FIXED, UI_STDERR, UI_STDOUT};
use crate::common::{incremental_dir, output_base_dir, output_base_name, output_testname_unique};
use crate::common::{Assembly, Incremental, JsDocTest, MirOpt, RunMake, RustdocJson, Ui};
use crate::common::{Codegen, CodegenUnits, DebugInfo, Debugger, Rustdoc};
use crate::common::{CompareMode, FailMode, PassMode};
use crate::common::{Config, TestPaths};
use crate::common::{Pretty, RunPassValgrind};
use crate::common::{UI_RUN_STDERR, UI_RUN_STDOUT};
use crate::compute_diff::{write_diff, write_filtered_diff};
use crate::errors::{self, Error, ErrorKind};
use crate::header::TestProps;
use crate::json;
use crate::read2::read2_abbreviated;
use crate::util::get_pointer_width;
use crate::util::{logv, PathBufExt};
use crate::ColorConfig;
use regex::{Captures, Regex};
use rustfix::{apply_suggestions, get_suggestions_from_json, Filter};
use std::collections::hash_map::DefaultHasher;
use std::collections::{HashMap, HashSet};
use std::env;
use std::ffi::{OsStr, OsString};
use std::fs::{self, create_dir_all, File, OpenOptions};
use std::hash::{Hash, Hasher};
use std::io::prelude::*;
use std::io::{self, BufReader};
use std::path::{Path, PathBuf};
use std::process::{Command, ExitStatus, Output, Stdio};
use std::str;
use glob::glob;
use lazy_static::lazy_static;
use tracing::*;
use crate::extract_gdb_version;
use crate::is_android_gdb_target;
mod debugger;
use debugger::{check_debugger_output, DebuggerCommands};
#[cfg(test)]
mod tests;
#[cfg(windows)]
fn disable_error_reporting<F: FnOnce() -> R, R>(f: F) -> R {
use std::sync::Mutex;
use winapi::um::errhandlingapi::SetErrorMode;
use winapi::um::winbase::SEM_NOGPFAULTERRORBOX;
lazy_static! {
static ref LOCK: Mutex<()> = Mutex::new(());
}
// Error mode is a global variable, so lock it so only one thread will change it
let _lock = LOCK.lock().unwrap();
// Tell Windows to not show any UI on errors (such as terminating abnormally).
// This is important for running tests, since some of them use abnormal
// termination by design. This mode is inherited by all child processes.
unsafe {
let old_mode = SetErrorMode(SEM_NOGPFAULTERRORBOX); // read inherited flags
SetErrorMode(old_mode | SEM_NOGPFAULTERRORBOX);
let r = f();
SetErrorMode(old_mode);
r
}
}
#[cfg(not(windows))]
fn disable_error_reporting<F: FnOnce() -> R, R>(f: F) -> R {
f()
}
/// The name of the environment variable that holds dynamic library locations.
pub fn dylib_env_var() -> &'static str {
if cfg!(windows) {
"PATH"
} else if cfg!(target_os = "macos") {
"DYLD_LIBRARY_PATH"
} else if cfg!(target_os = "haiku") {
"LIBRARY_PATH"
} else {
"LD_LIBRARY_PATH"
}
}
/// The platform-specific library name
pub fn get_lib_name(lib: &str, dylib: bool) -> String {
// In some casess (e.g. MUSL), we build a static
// library, rather than a dynamic library.
// In this case, the only path we can pass
// with '--extern-meta' is the '.lib' file
if !dylib {
return format!("lib{}.rlib", lib);
}
if cfg!(windows) {
format!("{}.dll", lib)
} else if cfg!(target_os = "macos") {
format!("lib{}.dylib", lib)
} else {
format!("lib{}.so", lib)
}
}
pub fn run(config: Config, testpaths: &TestPaths, revision: Option<&str>) {
match &*config.target {
"arm-linux-androideabi"
| "armv7-linux-androideabi"
| "thumbv7neon-linux-androideabi"
| "aarch64-linux-android" => {
if !config.adb_device_status {
panic!("android device not available");
}
}
_ => {
// android has its own gdb handling
if config.debugger == Some(Debugger::Gdb) && config.gdb.is_none() {
panic!("gdb not available but debuginfo gdb debuginfo test requested");
}
}
}
if config.verbose {
// We're going to be dumping a lot of info. Start on a new line.
print!("\n\n");
}
debug!("running {:?}", testpaths.file.display());
let mut props = TestProps::from_file(&testpaths.file, revision, &config);
if props.incremental {
props.incremental_dir = Some(incremental_dir(&config, testpaths));
}
let cx = TestCx { config: &config, props: &props, testpaths, revision };
create_dir_all(&cx.output_base_dir()).unwrap();
if props.incremental {
cx.init_incremental_test();
}
if config.mode == Incremental {
// Incremental tests are special because they cannot be run in
// parallel.
assert!(!props.revisions.is_empty(), "Incremental tests require revisions.");
for revision in &props.revisions {
let mut revision_props = TestProps::from_file(&testpaths.file, Some(revision), &config);
revision_props.incremental_dir = props.incremental_dir.clone();
let rev_cx = TestCx {
config: &config,
props: &revision_props,
testpaths,
revision: Some(revision),
};
rev_cx.run_revision();
}
} else {
cx.run_revision();
}
cx.create_stamp();
}
pub fn compute_stamp_hash(config: &Config) -> String {
let mut hash = DefaultHasher::new();
config.stage_id.hash(&mut hash);
config.run.hash(&mut hash);
match config.debugger {
Some(Debugger::Cdb) => {
config.cdb.hash(&mut hash);
}
Some(Debugger::Gdb) => {
config.gdb.hash(&mut hash);
env::var_os("PATH").hash(&mut hash);
env::var_os("PYTHONPATH").hash(&mut hash);
}
Some(Debugger::Lldb) => {
config.python.hash(&mut hash);
config.lldb_python_dir.hash(&mut hash);
env::var_os("PATH").hash(&mut hash);
env::var_os("PYTHONPATH").hash(&mut hash);
}
None => {}
}
if let Ui = config.mode {
config.force_pass_mode.hash(&mut hash);
}
format!("{:x}", hash.finish())
}
#[derive(Copy, Clone)]
struct TestCx<'test> {
config: &'test Config,
props: &'test TestProps,
testpaths: &'test TestPaths,
revision: Option<&'test str>,
}
enum ReadFrom {
Path,
Stdin(String),
}
enum TestOutput {
Compile,
Run,
}
/// Will this test be executed? Should we use `make_exe_name`?
#[derive(Copy, Clone, PartialEq)]
enum WillExecute {
Yes,
No,
Disabled,
}
/// Should `--emit metadata` be used?
#[derive(Copy, Clone)]
enum EmitMetadata {
Yes,
No,
}
impl<'test> TestCx<'test> {
/// Code executed for each revision in turn (or, if there are no
/// revisions, exactly once, with revision == None).
fn run_revision(&self) {
if self.props.should_ice && self.config.mode != Incremental {
self.fatal("cannot use should-ice in a test that is not cfail");
}
match self.config.mode {
RunPassValgrind => self.run_valgrind_test(),
Pretty => self.run_pretty_test(),
DebugInfo => self.run_debuginfo_test(),
Codegen => self.run_codegen_test(),
Rustdoc => self.run_rustdoc_test(),
RustdocJson => self.run_rustdoc_json_test(),
CodegenUnits => self.run_codegen_units_test(),
Incremental => self.run_incremental_test(),
RunMake => self.run_rmake_test(),
Ui => self.run_ui_test(),
MirOpt => self.run_mir_opt_test(),
Assembly => self.run_assembly_test(),
JsDocTest => self.run_js_doc_test(),
}
}
fn pass_mode(&self) -> Option<PassMode> {
self.props.pass_mode(self.config)
}
fn should_run(&self, pm: Option<PassMode>) -> WillExecute {
let test_should_run = match self.config.mode {
Ui if pm == Some(PassMode::Run) || self.props.fail_mode == Some(FailMode::Run) => true,
MirOpt if pm == Some(PassMode::Run) => true,
Ui | MirOpt => false,
mode => panic!("unimplemented for mode {:?}", mode),
};
if test_should_run { self.run_if_enabled() } else { WillExecute::No }
}
fn run_if_enabled(&self) -> WillExecute {
if self.config.run_enabled() { WillExecute::Yes } else { WillExecute::Disabled }
}
fn should_run_successfully(&self, pm: Option<PassMode>) -> bool {
match self.config.mode {
Ui | MirOpt => pm == Some(PassMode::Run),
mode => panic!("unimplemented for mode {:?}", mode),
}
}
fn should_compile_successfully(&self, pm: Option<PassMode>) -> bool {
match self.config.mode {
JsDocTest => true,
Ui => pm.is_some() || self.props.fail_mode > Some(FailMode::Build),
Incremental => {
let revision =
self.revision.expect("incremental tests require a list of revisions");
if revision.starts_with("rpass") || revision.starts_with("rfail") {
true
} else if revision.starts_with("cfail") {
// FIXME: would be nice if incremental revs could start with "cpass"
pm.is_some()
} else {
panic!("revision name must begin with rpass, rfail, or cfail");
}
}
mode => panic!("unimplemented for mode {:?}", mode),
}
}
fn check_if_test_should_compile(&self, proc_res: &ProcRes, pm: Option<PassMode>) {
if self.should_compile_successfully(pm) {
if !proc_res.status.success() {
self.fatal_proc_rec("test compilation failed although it shouldn't!", proc_res);
}
} else {
if proc_res.status.success() {
self.fatal_proc_rec(
&format!("{} test compiled successfully!", self.config.mode)[..],
proc_res,
);
}
self.check_correct_failure_status(proc_res);
}
}
fn run_cfail_test(&self) {
let pm = self.pass_mode();
let proc_res = self.compile_test(WillExecute::No, self.should_emit_metadata(pm));
self.check_if_test_should_compile(&proc_res, pm);
self.check_no_compiler_crash(&proc_res, self.props.should_ice);
let output_to_check = self.get_output(&proc_res);
let expected_errors = errors::load_errors(&self.testpaths.file, self.revision);
if !expected_errors.is_empty() {
if !self.props.error_patterns.is_empty() {
self.fatal("both error pattern and expected errors specified");
}
self.check_expected_errors(expected_errors, &proc_res);
} else {
self.check_error_patterns(&output_to_check, &proc_res, pm);
}
if self.props.should_ice {
match proc_res.status.code() {
Some(101) => (),
_ => self.fatal("expected ICE"),
}
}
self.check_forbid_output(&output_to_check, &proc_res);
}
fn run_rfail_test(&self) {
let pm = self.pass_mode();
let should_run = self.run_if_enabled();
let proc_res = self.compile_test(should_run, self.should_emit_metadata(pm));
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
if let WillExecute::Disabled = should_run {
return;
}
let proc_res = self.exec_compiled_test();
// The value our Makefile configures valgrind to return on failure
const VALGRIND_ERR: i32 = 100;
if proc_res.status.code() == Some(VALGRIND_ERR) {
self.fatal_proc_rec("run-fail test isn't valgrind-clean!", &proc_res);
}
let output_to_check = self.get_output(&proc_res);
self.check_correct_failure_status(&proc_res);
self.check_error_patterns(&output_to_check, &proc_res, pm);
}
fn get_output(&self, proc_res: &ProcRes) -> String {
if self.props.check_stdout {
format!("{}{}", proc_res.stdout, proc_res.stderr)
} else {
proc_res.stderr.clone()
}
}
fn check_correct_failure_status(&self, proc_res: &ProcRes) {
let expected_status = Some(self.props.failure_status);
let received_status = proc_res.status.code();
if expected_status != received_status {
self.fatal_proc_rec(
&format!(
"Error: expected failure status ({:?}) but received status {:?}.",
expected_status, received_status
),
proc_res,
);
}
}
fn run_rpass_test(&self) {
let emit_metadata = self.should_emit_metadata(self.pass_mode());
let should_run = self.run_if_enabled();
let proc_res = self.compile_test(should_run, emit_metadata);
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
if let WillExecute::Disabled = should_run {
return;
}
// FIXME(#41968): Move this check to tidy?
let expected_errors = errors::load_errors(&self.testpaths.file, self.revision);
assert!(
expected_errors.is_empty(),
"run-pass tests with expected warnings should be moved to ui/"
);
let proc_res = self.exec_compiled_test();
if !proc_res.status.success() {
self.fatal_proc_rec("test run failed!", &proc_res);
}
}
fn run_valgrind_test(&self) {
assert!(self.revision.is_none(), "revisions not relevant here");
if self.config.valgrind_path.is_none() {
assert!(!self.config.force_valgrind);
return self.run_rpass_test();
}
let should_run = self.run_if_enabled();
let mut proc_res = self.compile_test(should_run, EmitMetadata::No);
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
if let WillExecute::Disabled = should_run {
return;
}
let mut new_config = self.config.clone();
new_config.runtool = new_config.valgrind_path.clone();
let new_cx = TestCx { config: &new_config, ..*self };
proc_res = new_cx.exec_compiled_test();
if !proc_res.status.success() {
self.fatal_proc_rec("test run failed!", &proc_res);
}
}
fn run_pretty_test(&self) {
if self.props.pp_exact.is_some() {
logv(self.config, "testing for exact pretty-printing".to_owned());
} else {
logv(self.config, "testing for converging pretty-printing".to_owned());
}
let rounds = match self.props.pp_exact {
Some(_) => 1,
None => 2,
};
let src = fs::read_to_string(&self.testpaths.file).unwrap();
let mut srcs = vec![src];
let mut round = 0;
while round < rounds {
logv(
self.config,
format!("pretty-printing round {} revision {:?}", round, self.revision),
);
let read_from =
if round == 0 { ReadFrom::Path } else { ReadFrom::Stdin(srcs[round].to_owned()) };
let proc_res = self.print_source(read_from, &self.props.pretty_mode);
if !proc_res.status.success() {
self.fatal_proc_rec(
&format!(
"pretty-printing failed in round {} revision {:?}",
round, self.revision
),
&proc_res,
);
}
let ProcRes { stdout, .. } = proc_res;
srcs.push(stdout);
round += 1;
}
let mut expected = match self.props.pp_exact {
Some(ref file) => {
let filepath = self.testpaths.file.parent().unwrap().join(file);
fs::read_to_string(&filepath).unwrap()
}
None => srcs[srcs.len() - 2].clone(),
};
let mut actual = srcs[srcs.len() - 1].clone();
if self.props.pp_exact.is_some() {
// Now we have to care about line endings
let cr = "\r".to_owned();
actual = actual.replace(&cr, "");
expected = expected.replace(&cr, "");
}
if !self.config.bless {
self.compare_source(&expected, &actual);
} else if expected != actual {
let filepath_buf;
let filepath = match &self.props.pp_exact {
Some(file) => {
filepath_buf = self.testpaths.file.parent().unwrap().join(file);
&filepath_buf
}
None => &self.testpaths.file,
};
fs::write(filepath, &actual).unwrap();
}
// If we're only making sure that the output matches then just stop here
if self.props.pretty_compare_only {
return;
}
// Finally, let's make sure it actually appears to remain valid code
let proc_res = self.typecheck_source(actual);
if !proc_res.status.success() {
self.fatal_proc_rec("pretty-printed source does not typecheck", &proc_res);
}
if !self.props.pretty_expanded {
return;
}
// additionally, run `-Zunpretty=expanded` and try to build it.
let proc_res = self.print_source(ReadFrom::Path, "expanded");
if !proc_res.status.success() {
self.fatal_proc_rec("pretty-printing (expanded) failed", &proc_res);
}
let ProcRes { stdout: expanded_src, .. } = proc_res;
let proc_res = self.typecheck_source(expanded_src);
if !proc_res.status.success() {
self.fatal_proc_rec("pretty-printed source (expanded) does not typecheck", &proc_res);
}
}
fn print_source(&self, read_from: ReadFrom, pretty_type: &str) -> ProcRes {
let aux_dir = self.aux_output_dir_name();
let input: &str = match read_from {
ReadFrom::Stdin(_) => "-",
ReadFrom::Path => self.testpaths.file.to_str().unwrap(),
};
let mut rustc = Command::new(&self.config.rustc_path);
rustc
.arg(input)
.args(&["-Z", &format!("unpretty={}", pretty_type)])
.args(&["--target", &self.config.target])
.arg("-L")
.arg(&aux_dir)
.args(&self.props.compile_flags)
.envs(self.props.rustc_env.clone());
self.maybe_add_external_args(
&mut rustc,
self.split_maybe_args(&self.config.target_rustcflags),
);
let src = match read_from {
ReadFrom::Stdin(src) => Some(src),
ReadFrom::Path => None,
};
self.compose_and_run(
rustc,
self.config.compile_lib_path.to_str().unwrap(),
Some(aux_dir.to_str().unwrap()),
src,
)
}
fn compare_source(&self, expected: &str, actual: &str) {
if expected != actual {
self.fatal(&format!(
"pretty-printed source does not match expected source\n\
expected:\n\
------------------------------------------\n\
{}\n\
------------------------------------------\n\
actual:\n\
------------------------------------------\n\
{}\n\
------------------------------------------\n\
diff:\n\
------------------------------------------\n\
{}\n",
expected,
actual,
write_diff(expected, actual, 3),
));
}
}
fn set_revision_flags(&self, cmd: &mut Command) {
if let Some(revision) = self.revision {
// Normalize revisions to be lowercase and replace `-`s with `_`s.
// Otherwise the `--cfg` flag is not valid.
let normalized_revision = revision.to_lowercase().replace("-", "_");
cmd.args(&["--cfg", &normalized_revision]);
}
}
fn typecheck_source(&self, src: String) -> ProcRes {
let mut rustc = Command::new(&self.config.rustc_path);
let out_dir = self.output_base_name().with_extension("pretty-out");
let _ = fs::remove_dir_all(&out_dir);
create_dir_all(&out_dir).unwrap();
let target = if self.props.force_host { &*self.config.host } else { &*self.config.target };
let aux_dir = self.aux_output_dir_name();
rustc
.arg("-")
.arg("-Zno-codegen")
.arg("--out-dir")
.arg(&out_dir)
.arg(&format!("--target={}", target))
.arg("-L")
.arg(&self.config.build_base)
.arg("-L")
.arg(aux_dir);
self.set_revision_flags(&mut rustc);
self.maybe_add_external_args(
&mut rustc,
self.split_maybe_args(&self.config.target_rustcflags),
);
rustc.args(&self.props.compile_flags);
self.compose_and_run_compiler(rustc, Some(src))
}
fn run_debuginfo_test(&self) {
match self.config.debugger.unwrap() {
Debugger::Cdb => self.run_debuginfo_cdb_test(),
Debugger::Gdb => self.run_debuginfo_gdb_test(),
Debugger::Lldb => self.run_debuginfo_lldb_test(),
}
}
fn run_debuginfo_cdb_test(&self) {
assert!(self.revision.is_none(), "revisions not relevant here");
let config = Config {
target_rustcflags: self.cleanup_debug_info_options(&self.config.target_rustcflags),
host_rustcflags: self.cleanup_debug_info_options(&self.config.host_rustcflags),
..self.config.clone()
};
let test_cx = TestCx { config: &config, ..*self };
test_cx.run_debuginfo_cdb_test_no_opt();
}
fn run_debuginfo_cdb_test_no_opt(&self) {
let exe_file = self.make_exe_name();
// Existing PDB files are update in-place. When changing the debuginfo
// the compiler generates for something, this can lead to the situation
// where both the old and the new version of the debuginfo for the same
// type is present in the PDB, which is very confusing.
// Therefore we delete any existing PDB file before compiling the test
// case.
// FIXME: If can reliably detect that MSVC's link.exe is used, then
// passing `/INCREMENTAL:NO` might be a cleaner way to do this.
let pdb_file = exe_file.with_extension(".pdb");
if pdb_file.exists() {
std::fs::remove_file(pdb_file).unwrap();
}
// compile test file (it should have 'compile-flags:-g' in the header)
let should_run = self.run_if_enabled();
let compile_result = self.compile_test(should_run, EmitMetadata::No);
if !compile_result.status.success() {
self.fatal_proc_rec("compilation failed!", &compile_result);
}
if let WillExecute::Disabled = should_run {
return;
}
let prefixes = {
static PREFIXES: &[&str] = &["cdb", "cdbg"];
// No "native rust support" variation for CDB yet.
PREFIXES
};
// Parse debugger commands etc from test files
let DebuggerCommands { commands, check_lines, breakpoint_lines, .. } =
match DebuggerCommands::parse_from(&self.testpaths.file, self.config, prefixes) {
Ok(cmds) => cmds,
Err(e) => self.fatal(&e),
};
// https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/debugger-commands
let mut script_str = String::with_capacity(2048);
script_str.push_str("version\n"); // List CDB (and more) version info in test output
script_str.push_str(".nvlist\n"); // List loaded `*.natvis` files, bulk of custom MSVC debug
// If a .js file exists next to the source file being tested, then this is a JavaScript
// debugging extension that needs to be loaded.
let mut js_extension = self.testpaths.file.clone();
js_extension.set_extension("cdb.js");
if js_extension.exists() {
script_str.push_str(&format!(".scriptload \"{}\"\n", js_extension.to_string_lossy()));
}
// Set breakpoints on every line that contains the string "#break"
let source_file_name = self.testpaths.file.file_name().unwrap().to_string_lossy();
for line in &breakpoint_lines {
script_str.push_str(&format!("bp `{}:{}`\n", source_file_name, line));
}
// Append the other `cdb-command:`s
for line in &commands {
script_str.push_str(line);
script_str.push_str("\n");
}
script_str.push_str("\nqq\n"); // Quit the debugger (including remote debugger, if any)
// Write the script into a file
debug!("script_str = {}", script_str);
self.dump_output_file(&script_str, "debugger.script");
let debugger_script = self.make_out_name("debugger.script");
let cdb_path = &self.config.cdb.as_ref().unwrap();
let mut cdb = Command::new(cdb_path);
cdb.arg("-lines") // Enable source line debugging.
.arg("-cf")
.arg(&debugger_script)
.arg(&exe_file);
let debugger_run_result = self.compose_and_run(
cdb,
self.config.run_lib_path.to_str().unwrap(),
None, // aux_path
None, // input
);
if !debugger_run_result.status.success() {
self.fatal_proc_rec("Error while running CDB", &debugger_run_result);
}
if let Err(e) = check_debugger_output(&debugger_run_result, &check_lines) {
self.fatal_proc_rec(&e, &debugger_run_result);
}
}
fn run_debuginfo_gdb_test(&self) {
assert!(self.revision.is_none(), "revisions not relevant here");
let config = Config {
target_rustcflags: self.cleanup_debug_info_options(&self.config.target_rustcflags),
host_rustcflags: self.cleanup_debug_info_options(&self.config.host_rustcflags),
..self.config.clone()
};
let test_cx = TestCx { config: &config, ..*self };
test_cx.run_debuginfo_gdb_test_no_opt();
}
fn run_debuginfo_gdb_test_no_opt(&self) {
let prefixes = if self.config.gdb_native_rust {
// GDB with Rust
static PREFIXES: &[&str] = &["gdb", "gdbr"];
println!("NOTE: compiletest thinks it is using GDB with native rust support");
PREFIXES
} else {
// Generic GDB
static PREFIXES: &[&str] = &["gdb", "gdbg"];
println!("NOTE: compiletest thinks it is using GDB without native rust support");
PREFIXES
};
let DebuggerCommands { commands, check_lines, breakpoint_lines } =
match DebuggerCommands::parse_from(&self.testpaths.file, self.config, prefixes) {
Ok(cmds) => cmds,
Err(e) => self.fatal(&e),
};
let mut cmds = commands.join("\n");
// compile test file (it should have 'compile-flags:-g' in the header)
let should_run = self.run_if_enabled();
let compiler_run_result = self.compile_test(should_run, EmitMetadata::No);
if !compiler_run_result.status.success() {
self.fatal_proc_rec("compilation failed!", &compiler_run_result);
}
if let WillExecute::Disabled = should_run {
return;
}
let exe_file = self.make_exe_name();
let debugger_run_result;
if is_android_gdb_target(&self.config.target) {
cmds = cmds.replace("run", "continue");
let tool_path = match self.config.android_cross_path.to_str() {
Some(x) => x.to_owned(),
None => self.fatal("cannot find android cross path"),
};
// write debugger script
let mut script_str = String::with_capacity(2048);
script_str.push_str(&format!("set charset {}\n", Self::charset()));
script_str.push_str(&format!("set sysroot {}\n", tool_path));
script_str.push_str(&format!("file {}\n", exe_file.to_str().unwrap()));
script_str.push_str("target remote :5039\n");
script_str.push_str(&format!(
"set solib-search-path \
./{}/stage2/lib/rustlib/{}/lib/\n",
self.config.host, self.config.target
));
for line in &breakpoint_lines {
script_str.push_str(
&format!(
"break {:?}:{}\n",
self.testpaths.file.file_name().unwrap().to_string_lossy(),
*line
)[..],
);
}
script_str.push_str(&cmds);
script_str.push_str("\nquit\n");
debug!("script_str = {}", script_str);
self.dump_output_file(&script_str, "debugger.script");
let adb_path = &self.config.adb_path;
Command::new(adb_path)
.arg("push")
.arg(&exe_file)
.arg(&self.config.adb_test_dir)
.status()
.unwrap_or_else(|_| panic!("failed to exec `{:?}`", adb_path));
Command::new(adb_path)
.args(&["forward", "tcp:5039", "tcp:5039"])
.status()
.unwrap_or_else(|_| panic!("failed to exec `{:?}`", adb_path));
let adb_arg = format!(
"export LD_LIBRARY_PATH={}; \
gdbserver{} :5039 {}/{}",
self.config.adb_test_dir.clone(),
if self.config.target.contains("aarch64") { "64" } else { "" },
self.config.adb_test_dir.clone(),
exe_file.file_name().unwrap().to_str().unwrap()
);
debug!("adb arg: {}", adb_arg);
let mut adb = Command::new(adb_path)
.args(&["shell", &adb_arg])
.stdout(Stdio::piped())
.stderr(Stdio::inherit())
.spawn()
.unwrap_or_else(|_| panic!("failed to exec `{:?}`", adb_path));
// Wait for the gdbserver to print out "Listening on port ..."
// at which point we know that it's started and then we can
// execute the debugger below.
let mut stdout = BufReader::new(adb.stdout.take().unwrap());
let mut line = String::new();
loop {
line.truncate(0);
stdout.read_line(&mut line).unwrap();
if line.starts_with("Listening on port 5039") {
break;
}
}
drop(stdout);
let mut debugger_script = OsString::from("-command=");
debugger_script.push(self.make_out_name("debugger.script"));
let debugger_opts: &[&OsStr] =
&["-quiet".as_ref(), "-batch".as_ref(), "-nx".as_ref(), &debugger_script];
let gdb_path = self.config.gdb.as_ref().unwrap();
let Output { status, stdout, stderr } = Command::new(&gdb_path)
.args(debugger_opts)
.output()
.unwrap_or_else(|_| panic!("failed to exec `{:?}`", gdb_path));
let cmdline = {
let mut gdb = Command::new(&format!("{}-gdb", self.config.target));
gdb.args(debugger_opts);
let cmdline = self.make_cmdline(&gdb, "");
logv(self.config, format!("executing {}", cmdline));
cmdline
};
debugger_run_result = ProcRes {
status,
stdout: String::from_utf8(stdout).unwrap(),
stderr: String::from_utf8(stderr).unwrap(),
cmdline,
};
if adb.kill().is_err() {
println!("Adb process is already finished.");
}
} else {
let rust_src_root =
self.config.find_rust_src_root().expect("Could not find Rust source root");
let rust_pp_module_rel_path = Path::new("./src/etc");
let rust_pp_module_abs_path =
rust_src_root.join(rust_pp_module_rel_path).to_str().unwrap().to_owned();
// write debugger script
let mut script_str = String::with_capacity(2048);
script_str.push_str(&format!("set charset {}\n", Self::charset()));
script_str.push_str("show version\n");
match self.config.gdb_version {
Some(version) => {
println!("NOTE: compiletest thinks it is using GDB version {}", version);
if version > extract_gdb_version("7.4").unwrap() {
// Add the directory containing the pretty printers to
// GDB's script auto loading safe path
script_str.push_str(&format!(
"add-auto-load-safe-path {}\n",
rust_pp_module_abs_path.replace(r"\", r"\\")
));
}
}
_ => {
println!(
"NOTE: compiletest does not know which version of \
GDB it is using"
);
}
}
// The following line actually doesn't have to do anything with
// pretty printing, it just tells GDB to print values on one line:
script_str.push_str("set print pretty off\n");
// Add the pretty printer directory to GDB's source-file search path
script_str
.push_str(&format!("directory {}\n", rust_pp_module_abs_path.replace(r"\", r"\\")));
// Load the target executable
script_str
.push_str(&format!("file {}\n", exe_file.to_str().unwrap().replace(r"\", r"\\")));
// Force GDB to print values in the Rust format.
if self.config.gdb_native_rust {
script_str.push_str("set language rust\n");
}
// Add line breakpoints
for line in &breakpoint_lines {
script_str.push_str(&format!(
"break '{}':{}\n",
self.testpaths.file.file_name().unwrap().to_string_lossy(),
*line
));
}
script_str.push_str(&cmds);
script_str.push_str("\nquit\n");
debug!("script_str = {}", script_str);
self.dump_output_file(&script_str, "debugger.script");
let mut debugger_script = OsString::from("-command=");
debugger_script.push(self.make_out_name("debugger.script"));
let debugger_opts: &[&OsStr] =
&["-quiet".as_ref(), "-batch".as_ref(), "-nx".as_ref(), &debugger_script];
let mut gdb = Command::new(self.config.gdb.as_ref().unwrap());
gdb.args(debugger_opts).env("PYTHONPATH", rust_pp_module_abs_path);
debugger_run_result =
self.compose_and_run(gdb, self.config.run_lib_path.to_str().unwrap(), None, None);
}
if !debugger_run_result.status.success() {
self.fatal_proc_rec("gdb failed to execute", &debugger_run_result);
}
if let Err(e) = check_debugger_output(&debugger_run_result, &check_lines) {
self.fatal_proc_rec(&e, &debugger_run_result);
}
}
fn run_debuginfo_lldb_test(&self) {
assert!(self.revision.is_none(), "revisions not relevant here");
if self.config.lldb_python_dir.is_none() {
self.fatal("Can't run LLDB test because LLDB's python path is not set.");
}
let config = Config {
target_rustcflags: self.cleanup_debug_info_options(&self.config.target_rustcflags),
host_rustcflags: self.cleanup_debug_info_options(&self.config.host_rustcflags),
..self.config.clone()
};
let test_cx = TestCx { config: &config, ..*self };
test_cx.run_debuginfo_lldb_test_no_opt();
}
fn run_debuginfo_lldb_test_no_opt(&self) {
// compile test file (it should have 'compile-flags:-g' in the header)
let should_run = self.run_if_enabled();
let compile_result = self.compile_test(should_run, EmitMetadata::No);
if !compile_result.status.success() {
self.fatal_proc_rec("compilation failed!", &compile_result);
}
if let WillExecute::Disabled = should_run {
return;
}
let exe_file = self.make_exe_name();
match self.config.lldb_version {
Some(ref version) => {
println!("NOTE: compiletest thinks it is using LLDB version {}", version);
}
_ => {
println!(
"NOTE: compiletest does not know which version of \
LLDB it is using"
);
}
}
let prefixes = if self.config.lldb_native_rust {
static PREFIXES: &[&str] = &["lldb", "lldbr"];
println!("NOTE: compiletest thinks it is using LLDB with native rust support");
PREFIXES
} else {
static PREFIXES: &[&str] = &["lldb", "lldbg"];
println!("NOTE: compiletest thinks it is using LLDB without native rust support");
PREFIXES
};
// Parse debugger commands etc from test files
let DebuggerCommands { commands, check_lines, breakpoint_lines, .. } =
match DebuggerCommands::parse_from(&self.testpaths.file, self.config, prefixes) {
Ok(cmds) => cmds,
Err(e) => self.fatal(&e),
};
// Write debugger script:
// We don't want to hang when calling `quit` while the process is still running
let mut script_str = String::from("settings set auto-confirm true\n");
// Make LLDB emit its version, so we have it documented in the test output
script_str.push_str("version\n");
// Switch LLDB into "Rust mode"
let rust_src_root =
self.config.find_rust_src_root().expect("Could not find Rust source root");
let rust_pp_module_rel_path = Path::new("./src/etc/lldb_lookup.py");
let rust_pp_module_abs_path =
rust_src_root.join(rust_pp_module_rel_path).to_str().unwrap().to_owned();
let rust_type_regexes = vec![
"^(alloc::([a-z_]+::)+)String$",
"^&(mut )?str$",
"^&(mut )?\\[.+\\]$",
"^(std::ffi::([a-z_]+::)+)OsString$",
"^(alloc::([a-z_]+::)+)Vec<.+>$",
"^(alloc::([a-z_]+::)+)VecDeque<.+>$",
"^(alloc::([a-z_]+::)+)BTreeSet<.+>$",
"^(alloc::([a-z_]+::)+)BTreeMap<.+>$",
"^(std::collections::([a-z_]+::)+)HashMap<.+>$",
"^(std::collections::([a-z_]+::)+)HashSet<.+>$",
"^(alloc::([a-z_]+::)+)Rc<.+>$",
"^(alloc::([a-z_]+::)+)Arc<.+>$",
"^(core::([a-z_]+::)+)Cell<.+>$",
"^(core::([a-z_]+::)+)Ref<.+>$",
"^(core::([a-z_]+::)+)RefMut<.+>$",
"^(core::([a-z_]+::)+)RefCell<.+>$",
];
script_str
.push_str(&format!("command script import {}\n", &rust_pp_module_abs_path[..])[..]);
script_str.push_str("type synthetic add -l lldb_lookup.synthetic_lookup -x '.*' ");
script_str.push_str("--category Rust\n");
for type_regex in rust_type_regexes {
script_str.push_str("type summary add -F lldb_lookup.summary_lookup -e -x -h ");
script_str.push_str(&format!("'{}' ", type_regex));
script_str.push_str("--category Rust\n");
}
script_str.push_str("type category enable Rust\n");
// Set breakpoints on every line that contains the string "#break"
let source_file_name = self.testpaths.file.file_name().unwrap().to_string_lossy();
for line in &breakpoint_lines {
script_str.push_str(&format!(
"breakpoint set --file '{}' --line {}\n",
source_file_name, line
));
}
// Append the other commands
for line in &commands {
script_str.push_str(line);
script_str.push_str("\n");
}
// Finally, quit the debugger
script_str.push_str("\nquit\n");
// Write the script into a file
debug!("script_str = {}", script_str);
self.dump_output_file(&script_str, "debugger.script");
let debugger_script = self.make_out_name("debugger.script");
// Let LLDB execute the script via lldb_batchmode.py
let debugger_run_result = self.run_lldb(&exe_file, &debugger_script, &rust_src_root);
if !debugger_run_result.status.success() {
self.fatal_proc_rec("Error while running LLDB", &debugger_run_result);
}
if let Err(e) = check_debugger_output(&debugger_run_result, &check_lines) {
self.fatal_proc_rec(&e, &debugger_run_result);
}
}
fn run_lldb(
&self,
test_executable: &Path,
debugger_script: &Path,
rust_src_root: &Path,
) -> ProcRes {
// Prepare the lldb_batchmode which executes the debugger script
let lldb_script_path = rust_src_root.join("src/etc/lldb_batchmode.py");
self.cmd2procres(
Command::new(&self.config.python)
.arg(&lldb_script_path)
.arg(test_executable)
.arg(debugger_script)
.env("PYTHONUNBUFFERED", "1") // Help debugging #78665
.env("PYTHONPATH", self.config.lldb_python_dir.as_ref().unwrap()),
)
}
fn cmd2procres(&self, cmd: &mut Command) -> ProcRes {
let (status, out, err) = match cmd.output() {
Ok(Output { status, stdout, stderr }) => {
(status, String::from_utf8(stdout).unwrap(), String::from_utf8(stderr).unwrap())
}
Err(e) => self.fatal(&format!(
"Failed to setup Python process for \
LLDB script: {}",
e
)),
};
self.dump_output(&out, &err);
ProcRes { status, stdout: out, stderr: err, cmdline: format!("{:?}", cmd) }
}
fn cleanup_debug_info_options(&self, options: &Option<String>) -> Option<String> {
if options.is_none() {
return None;
}
// Remove options that are either unwanted (-O) or may lead to duplicates due to RUSTFLAGS.
let options_to_remove = ["-O".to_owned(), "-g".to_owned(), "--debuginfo".to_owned()];
let new_options = self
.split_maybe_args(options)
.into_iter()
.filter(|x| !options_to_remove.contains(x))
.collect::<Vec<String>>();
Some(new_options.join(" "))
}
fn maybe_add_external_args(&self, cmd: &mut Command, args: Vec<String>) {
// Filter out the arguments that should not be added by runtest here.
//
// Notable use-cases are: do not add our optimisation flag if
// `compile-flags: -Copt-level=x` and similar for debug-info level as well.
const OPT_FLAGS: &[&str] = &["-O", "-Copt-level=", /*-C<space>*/ "opt-level="];
const DEBUG_FLAGS: &[&str] = &["-g", "-Cdebuginfo=", /*-C<space>*/ "debuginfo="];
// FIXME: ideally we would "just" check the `cmd` itself, but it does not allow inspecting
// its arguments. They need to be collected separately. For now I cannot be bothered to
// implement this the "right" way.
let have_opt_flag =
self.props.compile_flags.iter().any(|arg| OPT_FLAGS.iter().any(|f| arg.starts_with(f)));
let have_debug_flag = self
.props
.compile_flags
.iter()
.any(|arg| DEBUG_FLAGS.iter().any(|f| arg.starts_with(f)));
for arg in args {
if OPT_FLAGS.iter().any(|f| arg.starts_with(f)) && have_opt_flag {
continue;
}
if DEBUG_FLAGS.iter().any(|f| arg.starts_with(f)) && have_debug_flag {
continue;
}
cmd.arg(arg);
}
}
fn check_error_patterns(
&self,
output_to_check: &str,
proc_res: &ProcRes,
pm: Option<PassMode>,
) {
debug!("check_error_patterns");
if self.props.error_patterns.is_empty() {
if pm.is_some() {
// FIXME(#65865)
return;
} else {
self.fatal(&format!(
"no error pattern specified in {:?}",
self.testpaths.file.display()
));
}
}
let mut missing_patterns: Vec<String> = Vec::new();
for pattern in &self.props.error_patterns {
if output_to_check.contains(pattern.trim()) {
debug!("found error pattern {}", pattern);
} else {
missing_patterns.push(pattern.to_string());
}
}
if missing_patterns.is_empty() {
return;
}
if missing_patterns.len() == 1 {
self.fatal_proc_rec(
&format!("error pattern '{}' not found!", missing_patterns[0]),
proc_res,
);
} else {
for pattern in missing_patterns {
self.error(&format!("error pattern '{}' not found!", pattern));
}
self.fatal_proc_rec("multiple error patterns not found", proc_res);
}
}
fn check_no_compiler_crash(&self, proc_res: &ProcRes, should_ice: bool) {
match proc_res.status.code() {
Some(101) if !should_ice => {
self.fatal_proc_rec("compiler encountered internal error", proc_res)
}
None => self.fatal_proc_rec("compiler terminated by signal", proc_res),
_ => (),
}
}
fn check_forbid_output(&self, output_to_check: &str, proc_res: &ProcRes) {
for pat in &self.props.forbid_output {
if output_to_check.contains(pat) {
self.fatal_proc_rec("forbidden pattern found in compiler output", proc_res);
}
}
}
fn check_expected_errors(&self, expected_errors: Vec<errors::Error>, proc_res: &ProcRes) {
debug!(
"check_expected_errors: expected_errors={:?} proc_res.status={:?}",
expected_errors, proc_res.status
);
if proc_res.status.success()
&& expected_errors.iter().any(|x| x.kind == Some(ErrorKind::Error))
{
self.fatal_proc_rec("process did not return an error status", proc_res);
}
if self.props.known_bug {
if !expected_errors.is_empty() {
self.fatal_proc_rec(
"`known_bug` tests should not have an expected errors",
proc_res,
);
}
return;
}
// On Windows, keep all '\' path separators to match the paths reported in the JSON output
// from the compiler
let os_file_name = self.testpaths.file.display().to_string();
// on windows, translate all '\' path separators to '/'
let file_name = format!("{}", self.testpaths.file.display()).replace(r"\", "/");
// If the testcase being checked contains at least one expected "help"
// message, then we'll ensure that all "help" messages are expected.
// Otherwise, all "help" messages reported by the compiler will be ignored.
// This logic also applies to "note" messages.
let expect_help = expected_errors.iter().any(|ee| ee.kind == Some(ErrorKind::Help));
let expect_note = expected_errors.iter().any(|ee| ee.kind == Some(ErrorKind::Note));
// Parse the JSON output from the compiler and extract out the messages.
let actual_errors = json::parse_output(&os_file_name, &proc_res.stderr, proc_res);
let mut unexpected = Vec::new();
let mut found = vec![false; expected_errors.len()];
for actual_error in &actual_errors {
let opt_index =
expected_errors.iter().enumerate().position(|(index, expected_error)| {
!found[index]
&& actual_error.line_num == expected_error.line_num
&& (expected_error.kind.is_none()
|| actual_error.kind == expected_error.kind)
&& actual_error.msg.contains(&expected_error.msg)
});
match opt_index {
Some(index) => {
// found a match, everybody is happy
assert!(!found[index]);
found[index] = true;
}
None => {
// If the test is a known bug, don't require that the error is annotated
if self.is_unexpected_compiler_message(actual_error, expect_help, expect_note) {
self.error(&format!(
"{}:{}: unexpected {}: '{}'",
file_name,
actual_error.line_num,
actual_error
.kind
.as_ref()
.map_or(String::from("message"), |k| k.to_string()),
actual_error.msg
));
unexpected.push(actual_error);
}
}
}
}
let mut not_found = Vec::new();
// anything not yet found is a problem
for (index, expected_error) in expected_errors.iter().enumerate() {
if !found[index] {
self.error(&format!(
"{}:{}: expected {} not found: {}",
file_name,
expected_error.line_num,
expected_error.kind.as_ref().map_or("message".into(), |k| k.to_string()),
expected_error.msg
));
not_found.push(expected_error);
}
}
if !unexpected.is_empty() || !not_found.is_empty() {
self.error(&format!(
"{} unexpected errors found, {} expected errors not found",
unexpected.len(),
not_found.len()
));
println!("status: {}\ncommand: {}", proc_res.status, proc_res.cmdline);
if !unexpected.is_empty() {
println!("unexpected errors (from JSON output): {:#?}\n", unexpected);
}
if !not_found.is_empty() {
println!("not found errors (from test file): {:#?}\n", not_found);
}
panic!();
}
}
/// Returns `true` if we should report an error about `actual_error`,
/// which did not match any of the expected error. We always require
/// errors/warnings to be explicitly listed, but only require
/// helps/notes if there are explicit helps/notes given.
fn is_unexpected_compiler_message(
&self,
actual_error: &Error,
expect_help: bool,
expect_note: bool,
) -> bool {
match actual_error.kind {
Some(ErrorKind::Help) => expect_help,
Some(ErrorKind::Note) => expect_note,
Some(ErrorKind::Error) | Some(ErrorKind::Warning) => true,
Some(ErrorKind::Suggestion) | None => false,
}
}
fn should_emit_metadata(&self, pm: Option<PassMode>) -> EmitMetadata {
match (pm, self.props.fail_mode, self.config.mode) {
(Some(PassMode::Check), ..) | (_, Some(FailMode::Check), Ui) => EmitMetadata::Yes,
_ => EmitMetadata::No,
}
}
fn compile_test(&self, will_execute: WillExecute, emit_metadata: EmitMetadata) -> ProcRes {
self.compile_test_general(will_execute, emit_metadata, self.props.local_pass_mode())
}
fn compile_test_general(
&self,
will_execute: WillExecute,
emit_metadata: EmitMetadata,
local_pm: Option<PassMode>,
) -> ProcRes {
// Only use `make_exe_name` when the test ends up being executed.
let output_file = match will_execute {
WillExecute::Yes => TargetLocation::ThisFile(self.make_exe_name()),
WillExecute::No | WillExecute::Disabled => {
TargetLocation::ThisDirectory(self.output_base_dir())
}
};
let allow_unused = match self.config.mode {
Ui => {
// UI tests tend to have tons of unused code as
// it's just testing various pieces of the compile, but we don't
// want to actually assert warnings about all this code. Instead
// let's just ignore unused code warnings by defaults and tests
// can turn it back on if needed.
if !self.is_rustdoc()
// Note that we use the local pass mode here as we don't want
// to set unused to allow if we've overridden the pass mode
// via command line flags.
&& local_pm != Some(PassMode::Run)
{
AllowUnused::Yes
} else {
AllowUnused::No
}
}
_ => AllowUnused::No,
};
let mut rustc =
self.make_compile_args(&self.testpaths.file, output_file, emit_metadata, allow_unused);
rustc.arg("-L").arg(&self.aux_output_dir_name());
self.compose_and_run_compiler(rustc, None)
}
fn document(&self, out_dir: &Path) -> ProcRes {
if self.props.build_aux_docs {
for rel_ab in &self.props.aux_builds {
let aux_testpaths = self.compute_aux_test_paths(rel_ab);
let aux_props =
self.props.from_aux_file(&aux_testpaths.file, self.revision, self.config);
let aux_cx = TestCx {
config: self.config,
props: &aux_props,
testpaths: &aux_testpaths,
revision: self.revision,
};
// Create the directory for the stdout/stderr files.
create_dir_all(aux_cx.output_base_dir()).unwrap();
let auxres = aux_cx.document(out_dir);
if !auxres.status.success() {
return auxres;
}
}
}
let aux_dir = self.aux_output_dir_name();
let rustdoc_path = self.config.rustdoc_path.as_ref().expect("--rustdoc-path not passed");
let mut rustdoc = Command::new(rustdoc_path);
rustdoc
.arg("-L")
.arg(self.config.run_lib_path.to_str().unwrap())
.arg("-L")
.arg(aux_dir)
.arg("-o")
.arg(out_dir)
.arg("--deny")
.arg("warnings")
.arg(&self.testpaths.file)
.args(&self.props.compile_flags);
if self.config.mode == RustdocJson {
rustdoc.arg("--output-format").arg("json").arg("-Zunstable-options");
}
if let Some(ref linker) = self.config.linker {
rustdoc.arg(format!("-Clinker={}", linker));
}
self.compose_and_run_compiler(rustdoc, None)
}
fn exec_compiled_test(&self) -> ProcRes {
let env = &self.props.exec_env;
let proc_res = match &*self.config.target {
// This is pretty similar to below, we're transforming:
//
// program arg1 arg2
//
// into
//
// remote-test-client run program 2 support-lib.so support-lib2.so arg1 arg2
//
// The test-client program will upload `program` to the emulator
// along with all other support libraries listed (in this case
// `support-lib.so` and `support-lib2.so`. It will then execute
// the program on the emulator with the arguments specified
// (in the environment we give the process) and then report back
// the same result.
_ if self.config.remote_test_client.is_some() => {
let aux_dir = self.aux_output_dir_name();
let ProcArgs { prog, args } = self.make_run_args();
let mut support_libs = Vec::new();
if let Ok(entries) = aux_dir.read_dir() {
for entry in entries {
let entry = entry.unwrap();
if !entry.path().is_file() {
continue;
}
support_libs.push(entry.path());
}
}
let mut test_client =
Command::new(self.config.remote_test_client.as_ref().unwrap());
test_client
.args(&["run", &support_libs.len().to_string(), &prog])
.args(support_libs)
.args(args)
.envs(env.clone());
self.compose_and_run(
test_client,
self.config.run_lib_path.to_str().unwrap(),
Some(aux_dir.to_str().unwrap()),
None,
)
}
_ if self.config.target.contains("vxworks") => {
let aux_dir = self.aux_output_dir_name();
let ProcArgs { prog, args } = self.make_run_args();
let mut wr_run = Command::new("wr-run");
wr_run.args(&[&prog]).args(args).envs(env.clone());
self.compose_and_run(
wr_run,
self.config.run_lib_path.to_str().unwrap(),
Some(aux_dir.to_str().unwrap()),
None,
)
}
_ => {
let aux_dir = self.aux_output_dir_name();
let ProcArgs { prog, args } = self.make_run_args();
let mut program = Command::new(&prog);
program.args(args).current_dir(&self.output_base_dir()).envs(env.clone());
self.compose_and_run(
program,
self.config.run_lib_path.to_str().unwrap(),
Some(aux_dir.to_str().unwrap()),
None,
)
}
};
if proc_res.status.success() {
// delete the executable after running it to save space.
// it is ok if the deletion failed.
let _ = fs::remove_file(self.make_exe_name());
}
proc_res
}
/// For each `aux-build: foo/bar` annotation, we check to find the
/// file in an `auxiliary` directory relative to the test itself.
fn compute_aux_test_paths(&self, rel_ab: &str) -> TestPaths {
let test_ab = self
.testpaths
.file
.parent()
.expect("test file path has no parent")
.join("auxiliary")
.join(rel_ab);
if !test_ab.exists() {
self.fatal(&format!("aux-build `{}` source not found", test_ab.display()))
}
TestPaths {
file: test_ab,
relative_dir: self
.testpaths
.relative_dir
.join(self.output_testname_unique())
.join("auxiliary")
.join(rel_ab)
.parent()
.expect("aux-build path has no parent")
.to_path_buf(),
}
}
fn is_vxworks_pure_static(&self) -> bool {
if self.config.target.contains("vxworks") {
match env::var("RUST_VXWORKS_TEST_DYLINK") {
Ok(s) => s != "1",
_ => true,
}
} else {
false
}
}
fn is_vxworks_pure_dynamic(&self) -> bool {
self.config.target.contains("vxworks") && !self.is_vxworks_pure_static()
}
fn build_all_auxiliary(&self, rustc: &mut Command) -> PathBuf {
let aux_dir = self.aux_output_dir_name();
if !self.props.aux_builds.is_empty() {
let _ = fs::remove_dir_all(&aux_dir);
create_dir_all(&aux_dir).unwrap();
}
for rel_ab in &self.props.aux_builds {
self.build_auxiliary(rel_ab, &aux_dir);
}
for (aux_name, aux_path) in &self.props.aux_crates {
let is_dylib = self.build_auxiliary(&aux_path, &aux_dir);
let lib_name =
get_lib_name(&aux_path.trim_end_matches(".rs").replace('-', "_"), is_dylib);
rustc.arg("--extern").arg(format!("{}={}/{}", aux_name, aux_dir.display(), lib_name));
}
aux_dir
}
fn compose_and_run_compiler(&self, mut rustc: Command, input: Option<String>) -> ProcRes {
let aux_dir = self.build_all_auxiliary(&mut rustc);
self.props.unset_rustc_env.clone().iter().fold(&mut rustc, |rustc, v| rustc.env_remove(v));
rustc.envs(self.props.rustc_env.clone());
self.compose_and_run(
rustc,
self.config.compile_lib_path.to_str().unwrap(),
Some(aux_dir.to_str().unwrap()),
input,
)
}
/// Builds an aux dependency.
///
/// Returns whether or not it is a dylib.
fn build_auxiliary(&self, source_path: &str, aux_dir: &Path) -> bool {
let aux_testpaths = self.compute_aux_test_paths(source_path);
let aux_props = self.props.from_aux_file(&aux_testpaths.file, self.revision, self.config);
let aux_output = TargetLocation::ThisDirectory(self.aux_output_dir_name());
let aux_cx = TestCx {
config: self.config,
props: &aux_props,
testpaths: &aux_testpaths,
revision: self.revision,
};
// Create the directory for the stdout/stderr files.
create_dir_all(aux_cx.output_base_dir()).unwrap();
let input_file = &aux_testpaths.file;
let mut aux_rustc =
aux_cx.make_compile_args(input_file, aux_output, EmitMetadata::No, AllowUnused::No);
for key in &aux_props.unset_rustc_env {
aux_rustc.env_remove(key);
}
aux_rustc.envs(aux_props.rustc_env.clone());
let (dylib, crate_type) = if aux_props.no_prefer_dynamic {
(true, None)
} else if self.config.target.contains("emscripten")
|| (self.config.target.contains("musl")
&& !aux_props.force_host
&& !self.config.host.contains("musl"))
|| self.config.target.contains("wasm32")
|| self.config.target.contains("nvptx")
|| self.is_vxworks_pure_static()
|| self.config.target.contains("sgx")
|| self.config.target.contains("bpf")
{
// We primarily compile all auxiliary libraries as dynamic libraries
// to avoid code size bloat and large binaries as much as possible
// for the test suite (otherwise including libstd statically in all
// executables takes up quite a bit of space).
//
// For targets like MUSL or Emscripten, however, there is no support for
// dynamic libraries so we just go back to building a normal library. Note,
// however, that for MUSL if the library is built with `force_host` then
// it's ok to be a dylib as the host should always support dylibs.
(false, Some("lib"))
} else {
(true, Some("dylib"))
};
if let Some(crate_type) = crate_type {
aux_rustc.args(&["--crate-type", crate_type]);
}
aux_rustc.arg("-L").arg(&aux_dir);
let auxres = aux_cx.compose_and_run(
aux_rustc,
aux_cx.config.compile_lib_path.to_str().unwrap(),
Some(aux_dir.to_str().unwrap()),
None,
);
if !auxres.status.success() {
self.fatal_proc_rec(
&format!(
"auxiliary build of {:?} failed to compile: ",
aux_testpaths.file.display()
),
&auxres,
);
}
dylib
}
fn compose_and_run(
&self,
mut command: Command,
lib_path: &str,
aux_path: Option<&str>,
input: Option<String>,
) -> ProcRes {
let cmdline = {
let cmdline = self.make_cmdline(&command, lib_path);
logv(self.config, format!("executing {}", cmdline));
cmdline
};
command.stdout(Stdio::piped()).stderr(Stdio::piped()).stdin(Stdio::piped());
// Need to be sure to put both the lib_path and the aux path in the dylib
// search path for the child.
let mut path =
env::split_paths(&env::var_os(dylib_env_var()).unwrap_or_default()).collect::<Vec<_>>();
if let Some(p) = aux_path {
path.insert(0, PathBuf::from(p))
}
path.insert(0, PathBuf::from(lib_path));
// Add the new dylib search path var
let newpath = env::join_paths(&path).unwrap();
command.env(dylib_env_var(), newpath);
let mut child = disable_error_reporting(|| command.spawn())
.unwrap_or_else(|_| panic!("failed to exec `{:?}`", &command));
if let Some(input) = input {
child.stdin.as_mut().unwrap().write_all(input.as_bytes()).unwrap();
}
let Output { status, stdout, stderr } =
read2_abbreviated(child).expect("failed to read output");
let result = ProcRes {
status,
stdout: String::from_utf8_lossy(&stdout).into_owned(),
stderr: String::from_utf8_lossy(&stderr).into_owned(),
cmdline,
};
self.dump_output(&result.stdout, &result.stderr);
result
}
fn is_rustdoc(&self) -> bool {
self.config.src_base.ends_with("rustdoc-ui")
|| self.config.src_base.ends_with("rustdoc-js")
|| self.config.src_base.ends_with("rustdoc-json")
}
fn make_compile_args(
&self,
input_file: &Path,
output_file: TargetLocation,
emit_metadata: EmitMetadata,
allow_unused: AllowUnused,
) -> Command {
let is_aux = input_file.components().map(|c| c.as_os_str()).any(|c| c == "auxiliary");
let is_rustdoc = self.is_rustdoc() && !is_aux;
let mut rustc = if !is_rustdoc {
Command::new(&self.config.rustc_path)
} else {
Command::new(&self.config.rustdoc_path.clone().expect("no rustdoc built yet"))
};
rustc.arg(input_file);
// Use a single thread for efficiency and a deterministic error message order
rustc.arg("-Zthreads=1");
// Optionally prevent default --target if specified in test compile-flags.
let custom_target = self.props.compile_flags.iter().any(|x| x.starts_with("--target"));
if !custom_target {
let target =
if self.props.force_host { &*self.config.host } else { &*self.config.target };
rustc.arg(&format!("--target={}", target));
}
self.set_revision_flags(&mut rustc);
if !is_rustdoc {
if let Some(ref incremental_dir) = self.props.incremental_dir {
rustc.args(&["-C", &format!("incremental={}", incremental_dir.display())]);
rustc.args(&["-Z", "incremental-verify-ich"]);
}
if self.config.mode == CodegenUnits {
rustc.args(&["-Z", "human_readable_cgu_names"]);
}
}
match self.config.mode {
Incremental => {
// If we are extracting and matching errors in the new
// fashion, then you want JSON mode. Old-skool error
// patterns still match the raw compiler output.
if self.props.error_patterns.is_empty() {
rustc.args(&["--error-format", "json"]);
rustc.args(&["--json", "future-incompat"]);
}
rustc.arg("-Zui-testing");
rustc.arg("-Zdeduplicate-diagnostics=no");
}
Ui => {
if !self.props.compile_flags.iter().any(|s| s.starts_with("--error-format")) {
rustc.args(&["--error-format", "json"]);
rustc.args(&["--json", "future-incompat"]);
}
rustc.arg("-Ccodegen-units=1");
rustc.arg("-Zui-testing");
rustc.arg("-Zdeduplicate-diagnostics=no");
}
MirOpt => {
rustc.args(&[
"-Copt-level=1",
"-Zdump-mir=all",
"-Zvalidate-mir",
"-Zdump-mir-exclude-pass-number",
]);
if let Some(pass) = &self.props.mir_unit_test {
rustc.args(&["-Zmir-opt-level=0", &format!("-Zmir-enable-passes=+{}", pass)]);
} else {
rustc.arg("-Zmir-opt-level=4");
}
let mir_dump_dir = self.get_mir_dump_dir();
let _ = fs::remove_dir_all(&mir_dump_dir);
create_dir_all(mir_dump_dir.as_path()).unwrap();
let mut dir_opt = "-Zdump-mir-dir=".to_string();
dir_opt.push_str(mir_dump_dir.to_str().unwrap());
debug!("dir_opt: {:?}", dir_opt);
rustc.arg(dir_opt);
}
RunPassValgrind | Pretty | DebugInfo | Codegen | Rustdoc | RustdocJson | RunMake
| CodegenUnits | JsDocTest | Assembly => {
// do not use JSON output
}
}
if let (false, EmitMetadata::Yes) = (is_rustdoc, emit_metadata) {
rustc.args(&["--emit", "metadata"]);
}
if !is_rustdoc {
if self.config.target == "wasm32-unknown-unknown" || self.is_vxworks_pure_static() {
// rustc.arg("-g"); // get any backtrace at all on errors
} else if !self.props.no_prefer_dynamic {
rustc.args(&["-C", "prefer-dynamic"]);
}
}
match output_file {
TargetLocation::ThisFile(path) => {
rustc.arg("-o").arg(path);
}
TargetLocation::ThisDirectory(path) => {
if is_rustdoc {
// `rustdoc` uses `-o` for the output directory.
rustc.arg("-o").arg(path);
} else {
rustc.arg("--out-dir").arg(path);
}
}
}
match self.config.compare_mode {
Some(CompareMode::Nll) => {
rustc.args(&["-Zborrowck=mir"]);
}
Some(CompareMode::Polonius) => {
rustc.args(&["-Zpolonius", "-Zborrowck=mir"]);
}
Some(CompareMode::Chalk) => {
rustc.args(&["-Zchalk"]);
}
Some(CompareMode::SplitDwarf) => {
rustc.args(&["-Csplit-debuginfo=unpacked", "-Zunstable-options"]);
}
Some(CompareMode::SplitDwarfSingle) => {
rustc.args(&["-Csplit-debuginfo=packed", "-Zunstable-options"]);
}
None => {}
}
// Add `-A unused` before `config` flags and in-test (`props`) flags, so that they can
// overwrite this.
if let AllowUnused::Yes = allow_unused {
rustc.args(&["-A", "unused"]);
}
if self.props.force_host {
self.maybe_add_external_args(
&mut rustc,
self.split_maybe_args(&self.config.host_rustcflags),
);
} else {
self.maybe_add_external_args(
&mut rustc,
self.split_maybe_args(&self.config.target_rustcflags),
);
if !is_rustdoc {
if let Some(ref linker) = self.config.linker {
rustc.arg(format!("-Clinker={}", linker));
}
}
}
// Use dynamic musl for tests because static doesn't allow creating dylibs
if self.config.host.contains("musl") || self.is_vxworks_pure_dynamic() {
rustc.arg("-Ctarget-feature=-crt-static");
}
rustc.args(&self.props.compile_flags);
rustc
}
fn make_exe_name(&self) -> PathBuf {
// Using a single letter here to keep the path length down for
// Windows. Some test names get very long. rustc creates `rcgu`
// files with the module name appended to it which can more than
// double the length.
let mut f = self.output_base_dir().join("a");
// FIXME: This is using the host architecture exe suffix, not target!
if self.config.target.contains("emscripten") {
f = f.with_extra_extension("js");
} else if self.config.target.contains("wasm32") {
f = f.with_extra_extension("wasm");
} else if self.config.target.contains("spirv") {
f = f.with_extra_extension("spv");
} else if !env::consts::EXE_SUFFIX.is_empty() {
f = f.with_extra_extension(env::consts::EXE_SUFFIX);
}
f
}
fn make_run_args(&self) -> ProcArgs {
// If we've got another tool to run under (valgrind),
// then split apart its command
let mut args = self.split_maybe_args(&self.config.runtool);
// If this is emscripten, then run tests under nodejs
if self.config.target.contains("emscripten") {
if let Some(ref p) = self.config.nodejs {
args.push(p.clone());
} else {
self.fatal("no NodeJS binary found (--nodejs)");
}
// If this is otherwise wasm, then run tests under nodejs with our
// shim
} else if self.config.target.contains("wasm32") {
if let Some(ref p) = self.config.nodejs {
args.push(p.clone());
} else {
self.fatal("no NodeJS binary found (--nodejs)");
}
let src = self
.config
.src_base
.parent()
.unwrap() // chop off `ui`
.parent()
.unwrap() // chop off `test`
.parent()
.unwrap(); // chop off `src`
args.push(src.join("src/etc/wasm32-shim.js").display().to_string());
}
let exe_file = self.make_exe_name();
// FIXME (#9639): This needs to handle non-utf8 paths
args.push(exe_file.to_str().unwrap().to_owned());
// Add the arguments in the run_flags directive
args.extend(self.split_maybe_args(&self.props.run_flags));
let prog = args.remove(0);
ProcArgs { prog, args }
}
fn split_maybe_args(&self, argstr: &Option<String>) -> Vec<String> {
match *argstr {
Some(ref s) => s
.split(' ')
.filter_map(|s| {
if s.chars().all(|c| c.is_whitespace()) { None } else { Some(s.to_owned()) }
})
.collect(),
None => Vec::new(),
}
}
fn make_cmdline(&self, command: &Command, libpath: &str) -> String {
use crate::util;
// Linux and mac don't require adjusting the library search path
if cfg!(unix) {
format!("{:?}", command)
} else {
// Build the LD_LIBRARY_PATH variable as it would be seen on the command line
// for diagnostic purposes
fn lib_path_cmd_prefix(path: &str) -> String {
format!("{}=\"{}\"", util::lib_path_env_var(), util::make_new_path(path))
}
format!("{} {:?}", lib_path_cmd_prefix(libpath), command)
}
}
fn dump_output(&self, out: &str, err: &str) {
let revision = if let Some(r) = self.revision { format!("{}.", r) } else { String::new() };
self.dump_output_file(out, &format!("{}out", revision));
self.dump_output_file(err, &format!("{}err", revision));
self.maybe_dump_to_stdout(out, err);
}
fn dump_output_file(&self, out: &str, extension: &str) {
let outfile = self.make_out_name(extension);
fs::write(&outfile, out).unwrap();
}
/// Creates a filename for output with the given extension.
/// E.g., `/.../testname.revision.mode/testname.extension`.
fn make_out_name(&self, extension: &str) -> PathBuf {
self.output_base_name().with_extension(extension)
}
/// Gets the directory where auxiliary files are written.
/// E.g., `/.../testname.revision.mode/auxiliary/`.
fn aux_output_dir_name(&self) -> PathBuf {
self.output_base_dir()
.join("auxiliary")
.with_extra_extension(self.config.mode.disambiguator())
}
/// Generates a unique name for the test, such as `testname.revision.mode`.
fn output_testname_unique(&self) -> PathBuf {
output_testname_unique(self.config, self.testpaths, self.safe_revision())
}
/// The revision, ignored for incremental compilation since it wants all revisions in
/// the same directory.
fn safe_revision(&self) -> Option<&str> {
if self.config.mode == Incremental { None } else { self.revision }
}
/// Gets the absolute path to the directory where all output for the given
/// test/revision should reside.
/// E.g., `/path/to/build/host-triple/test/ui/relative/testname.revision.mode/`.
fn output_base_dir(&self) -> PathBuf {
output_base_dir(self.config, self.testpaths, self.safe_revision())
}
/// Gets the absolute path to the base filename used as output for the given
/// test/revision.
/// E.g., `/.../relative/testname.revision.mode/testname`.
fn output_base_name(&self) -> PathBuf {
output_base_name(self.config, self.testpaths, self.safe_revision())
}
fn maybe_dump_to_stdout(&self, out: &str, err: &str) {
if self.config.verbose {
println!("------stdout------------------------------");
println!("{}", out);
println!("------stderr------------------------------");
println!("{}", err);
println!("------------------------------------------");
}
}
fn error(&self, err: &str) {
match self.revision {
Some(rev) => println!("\nerror in revision `{}`: {}", rev, err),
None => println!("\nerror: {}", err),
}
}
fn fatal(&self, err: &str) -> ! {
self.error(err);
error!("fatal error, panic: {:?}", err);
panic!("fatal error");
}
fn fatal_proc_rec(&self, err: &str, proc_res: &ProcRes) -> ! {
self.error(err);
proc_res.fatal(None, || ());
}
fn fatal_proc_rec_with_ctx(
&self,
err: &str,
proc_res: &ProcRes,
on_failure: impl FnOnce(Self),
) -> ! {
self.error(err);
proc_res.fatal(None, || on_failure(*self));
}
// codegen tests (using FileCheck)
fn compile_test_and_save_ir(&self) -> ProcRes {
let aux_dir = self.aux_output_dir_name();
let output_file = TargetLocation::ThisDirectory(self.output_base_dir());
let input_file = &self.testpaths.file;
let mut rustc =
self.make_compile_args(input_file, output_file, EmitMetadata::No, AllowUnused::No);
rustc.arg("-L").arg(aux_dir).arg("--emit=llvm-ir");
self.compose_and_run_compiler(rustc, None)
}
fn compile_test_and_save_assembly(&self) -> (ProcRes, PathBuf) {
// This works with both `--emit asm` (as default output name for the assembly)
// and `ptx-linker` because the latter can write output at requested location.
let output_path = self.output_base_name().with_extension("s");
let output_file = TargetLocation::ThisFile(output_path.clone());
let input_file = &self.testpaths.file;
let mut rustc =
self.make_compile_args(input_file, output_file, EmitMetadata::No, AllowUnused::No);
rustc.arg("-L").arg(self.aux_output_dir_name());
match self.props.assembly_output.as_ref().map(AsRef::as_ref) {
Some("emit-asm") => {
rustc.arg("--emit=asm");
}
Some("ptx-linker") => {
// No extra flags needed.
}
Some(_) => self.fatal("unknown 'assembly-output' header"),
None => self.fatal("missing 'assembly-output' header"),
}
(self.compose_and_run_compiler(rustc, None), output_path)
}
fn verify_with_filecheck(&self, output: &Path) -> ProcRes {
let mut filecheck = Command::new(self.config.llvm_filecheck.as_ref().unwrap());
filecheck.arg("--input-file").arg(output).arg(&self.testpaths.file);
// It would be more appropriate to make most of the arguments configurable through
// a comment-attribute similar to `compile-flags`. For example, --check-prefixes is a very
// useful flag.
//
// For now, though…
let prefix_for_target =
if self.config.target.contains("msvc") { "MSVC" } else { "NONMSVC" };
let prefixes = if let Some(rev) = self.revision {
format!("CHECK,{},{}", prefix_for_target, rev)
} else {
format!("CHECK,{}", prefix_for_target)
};
if self.config.llvm_version.unwrap_or(0) >= 130000 {
filecheck.args(&["--allow-unused-prefixes", "--check-prefixes", &prefixes]);
} else {
filecheck.args(&["--check-prefixes", &prefixes]);
}
self.compose_and_run(filecheck, "", None, None)
}
fn run_codegen_test(&self) {
if self.config.llvm_filecheck.is_none() {
self.fatal("missing --llvm-filecheck");
}
let proc_res = self.compile_test_and_save_ir();
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
let output_path = self.output_base_name().with_extension("ll");
let proc_res = self.verify_with_filecheck(&output_path);
if !proc_res.status.success() {
self.fatal_proc_rec("verification with 'FileCheck' failed", &proc_res);
}
}
fn run_assembly_test(&self) {
if self.config.llvm_filecheck.is_none() {
self.fatal("missing --llvm-filecheck");
}
let (proc_res, output_path) = self.compile_test_and_save_assembly();
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
let proc_res = self.verify_with_filecheck(&output_path);
if !proc_res.status.success() {
self.fatal_proc_rec("verification with 'FileCheck' failed", &proc_res);
}
}
fn charset() -> &'static str {
// FreeBSD 10.1 defaults to GDB 6.1.1 which doesn't support "auto" charset
if cfg!(target_os = "freebsd") { "ISO-8859-1" } else { "UTF-8" }
}
fn run_rustdoc_test(&self) {
assert!(self.revision.is_none(), "revisions not relevant here");
let out_dir = self.output_base_dir();
let _ = fs::remove_dir_all(&out_dir);
create_dir_all(&out_dir).unwrap();
let proc_res = self.document(&out_dir);
if !proc_res.status.success() {
self.fatal_proc_rec("rustdoc failed!", &proc_res);
}
if self.props.check_test_line_numbers_match {
self.check_rustdoc_test_option(proc_res);
} else {
let root = self.config.find_rust_src_root().unwrap();
let mut cmd = Command::new(&self.config.python);
cmd.arg(root.join("src/etc/htmldocck.py")).arg(&out_dir).arg(&self.testpaths.file);
if self.config.bless {
cmd.arg("--bless");
}
let res = self.cmd2procres(&mut cmd);
if !res.status.success() {
self.fatal_proc_rec_with_ctx("htmldocck failed!", &res, |mut this| {
this.compare_to_default_rustdoc(&out_dir)
});
}
}
}
fn compare_to_default_rustdoc(&mut self, out_dir: &Path) {
if !self.config.has_tidy {
return;
}
println!("info: generating a diff against nightly rustdoc");
let suffix =
self.safe_revision().map_or("nightly".into(), |path| path.to_owned() + "-nightly");
let compare_dir = output_base_dir(self.config, self.testpaths, Some(&suffix));
// Don't give an error if the directory didn't already exist
let _ = fs::remove_dir_all(&compare_dir);
create_dir_all(&compare_dir).unwrap();
// We need to create a new struct for the lifetimes on `config` to work.
let new_rustdoc = TestCx {
config: &Config {
// FIXME: use beta or a user-specified rustdoc instead of
// hardcoding the default toolchain
rustdoc_path: Some("rustdoc".into()),
// Needed for building auxiliary docs below
rustc_path: "rustc".into(),
..self.config.clone()
},
..*self
};
let output_file = TargetLocation::ThisDirectory(new_rustdoc.aux_output_dir_name());
let mut rustc = new_rustdoc.make_compile_args(
&new_rustdoc.testpaths.file,
output_file,
EmitMetadata::No,
AllowUnused::Yes,
);
rustc.arg("-L").arg(&new_rustdoc.aux_output_dir_name());
new_rustdoc.build_all_auxiliary(&mut rustc);
let proc_res = new_rustdoc.document(&compare_dir);
if !proc_res.status.success() {
eprintln!("failed to run nightly rustdoc");
return;
}
#[rustfmt::skip]
let tidy_args = [
"--indent", "yes",
"--indent-spaces", "2",
"--wrap", "0",
"--show-warnings", "no",
"--markup", "yes",
"--quiet", "yes",
"-modify",
];
let tidy_dir = |dir| {
for entry in walkdir::WalkDir::new(dir) {
let entry = entry.expect("failed to read file");
if entry.file_type().is_file()
&& entry.path().extension().and_then(|p| p.to_str()) == Some("html".into())
{
let status =
Command::new("tidy").args(&tidy_args).arg(entry.path()).status().unwrap();
// `tidy` returns 1 if it modified the file.
assert!(status.success() || status.code() == Some(1));
}
}
};
tidy_dir(out_dir);
tidy_dir(&compare_dir);
let pager = {
let output = Command::new("git").args(&["config", "--get", "core.pager"]).output().ok();
output.and_then(|out| {
if out.status.success() {
Some(String::from_utf8(out.stdout).expect("invalid UTF8 in git pager"))
} else {
None
}
})
};
let diff_filename = format!("build/tmp/rustdoc-compare-{}.diff", std::process::id());
if !write_filtered_diff(
&diff_filename,
out_dir,
&compare_dir,
self.config.verbose,
|file_type, extension| {
file_type.is_file()
&& (extension == Some("html".into()) || extension == Some("js".into()))
},
) {
return;
}
match self.config.color {
ColorConfig::AlwaysColor => colored::control::set_override(true),
ColorConfig::NeverColor => colored::control::set_override(false),
_ => {}
}
if let Some(pager) = pager {
let pager = pager.trim();
if self.config.verbose {
eprintln!("using pager {}", pager);
}
let output = Command::new(pager)
// disable paging; we want this to be non-interactive
.env("PAGER", "")
.stdin(File::open(&diff_filename).unwrap())
// Capture output and print it explicitly so it will in turn be
// captured by libtest.
.output()
.unwrap();
assert!(output.status.success());
println!("{}", String::from_utf8_lossy(&output.stdout));
eprintln!("{}", String::from_utf8_lossy(&output.stderr));
} else {
use colored::Colorize;
eprintln!("warning: no pager configured, falling back to unified diff");
eprintln!(
"help: try configuring a git pager (e.g. `delta`) with `git config --global core.pager delta`"
);
let mut out = io::stdout();
let mut diff = BufReader::new(File::open(&diff_filename).unwrap());
let mut line = Vec::new();
loop {
line.truncate(0);
match diff.read_until(b'\n', &mut line) {
Ok(0) => break,
Ok(_) => {}
Err(e) => eprintln!("ERROR: {:?}", e),
}
match String::from_utf8(line.clone()) {
Ok(line) => {
if line.starts_with("+") {
write!(&mut out, "{}", line.green()).unwrap();
} else if line.starts_with("-") {
write!(&mut out, "{}", line.red()).unwrap();
} else if line.starts_with("@") {
write!(&mut out, "{}", line.blue()).unwrap();
} else {
out.write_all(line.as_bytes()).unwrap();
}
}
Err(_) => {
write!(&mut out, "{}", String::from_utf8_lossy(&line).reversed()).unwrap();
}
}
}
};
}
fn run_rustdoc_json_test(&self) {
//FIXME: Add bless option.
assert!(self.revision.is_none(), "revisions not relevant here");
let out_dir = self.output_base_dir();
let _ = fs::remove_dir_all(&out_dir);
create_dir_all(&out_dir).unwrap();
let proc_res = self.document(&out_dir);
if !proc_res.status.success() {
self.fatal_proc_rec("rustdoc failed!", &proc_res);
}
let root = self.config.find_rust_src_root().unwrap();
let mut json_out = out_dir.join(self.testpaths.file.file_stem().unwrap());
json_out.set_extension("json");
let res = self.cmd2procres(
Command::new(self.config.jsondocck_path.as_ref().unwrap())
.arg("--doc-dir")
.arg(root.join(&out_dir))
.arg("--template")
.arg(&self.testpaths.file),
);
if !res.status.success() {
self.fatal_proc_rec_with_ctx("jsondocck failed!", &res, |_| {
println!("Rustdoc Output:");
proc_res.print_info();
})
}
let mut json_out = out_dir.join(self.testpaths.file.file_stem().unwrap());
json_out.set_extension("json");
let res = self.cmd2procres(
Command::new(&self.config.python)
.arg(root.join("src/etc/check_missing_items.py"))
.arg(&json_out),
);
if !res.status.success() {
self.fatal_proc_rec("check_missing_items failed!", &res);
}
}
fn get_lines<P: AsRef<Path>>(
&self,
path: &P,
mut other_files: Option<&mut Vec<String>>,
) -> Vec<usize> {
let content = fs::read_to_string(&path).unwrap();
let mut ignore = false;
content
.lines()
.enumerate()
.filter_map(|(line_nb, line)| {
if (line.trim_start().starts_with("pub mod ")
|| line.trim_start().starts_with("mod "))
&& line.ends_with(';')
{
if let Some(ref mut other_files) = other_files {
other_files.push(line.rsplit("mod ").next().unwrap().replace(";", ""));
}
None
} else {
let sline = line.split("///").last().unwrap_or("");
let line = sline.trim_start();
if line.starts_with("```") {
if ignore {
ignore = false;
None
} else {
ignore = true;
Some(line_nb + 1)
}
} else {
None
}
}
})
.collect()
}
fn check_rustdoc_test_option(&self, res: ProcRes) {
let mut other_files = Vec::new();
let mut files: HashMap<String, Vec<usize>> = HashMap::new();
let cwd = env::current_dir().unwrap();
files.insert(
self.testpaths
.file
.strip_prefix(&cwd)
.unwrap_or(&self.testpaths.file)
.to_str()
.unwrap()
.replace('\\', "/"),
self.get_lines(&self.testpaths.file, Some(&mut other_files)),
);
for other_file in other_files {
let mut path = self.testpaths.file.clone();
path.set_file_name(&format!("{}.rs", other_file));
files.insert(
path.strip_prefix(&cwd).unwrap_or(&path).to_str().unwrap().replace('\\', "/"),
self.get_lines(&path, None),
);
}
let mut tested = 0;
for _ in res.stdout.split('\n').filter(|s| s.starts_with("test ")).inspect(|s| {
if let Some((left, right)) = s.split_once(" - ") {
let path = left.rsplit("test ").next().unwrap();
if let Some(ref mut v) = files.get_mut(&path.replace('\\', "/")) {
tested += 1;
let mut iter = right.split("(line ");
iter.next();
let line = iter
.next()
.unwrap_or(")")
.split(')')
.next()
.unwrap_or("0")
.parse()
.unwrap_or(0);
if let Ok(pos) = v.binary_search(&line) {
v.remove(pos);
} else {
self.fatal_proc_rec(
&format!("Not found doc test: \"{}\" in \"{}\":{:?}", s, path, v),
&res,
);
}
}
}
}) {}
if tested == 0 {
self.fatal_proc_rec(&format!("No test has been found... {:?}", files), &res);
} else {
for (entry, v) in &files {
if !v.is_empty() {
self.fatal_proc_rec(
&format!(
"Not found test at line{} \"{}\":{:?}",
if v.len() > 1 { "s" } else { "" },
entry,
v
),
&res,
);
}
}
}
}
fn run_codegen_units_test(&self) {
assert!(self.revision.is_none(), "revisions not relevant here");
let proc_res = self.compile_test(WillExecute::No, EmitMetadata::No);
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
self.check_no_compiler_crash(&proc_res, self.props.should_ice);
const PREFIX: &str = "MONO_ITEM ";
const CGU_MARKER: &str = "@@";
let actual: Vec<MonoItem> = proc_res
.stdout
.lines()
.filter(|line| line.starts_with(PREFIX))
.map(|line| str_to_mono_item(line, true))
.collect();
let expected: Vec<MonoItem> = errors::load_errors(&self.testpaths.file, None)
.iter()
.map(|e| str_to_mono_item(&e.msg[..], false))
.collect();
let mut missing = Vec::new();
let mut wrong_cgus = Vec::new();
for expected_item in &expected {
let actual_item_with_same_name = actual.iter().find(|ti| ti.name == expected_item.name);
if let Some(actual_item) = actual_item_with_same_name {
if !expected_item.codegen_units.is_empty() &&
// Also check for codegen units
expected_item.codegen_units != actual_item.codegen_units
{
wrong_cgus.push((expected_item.clone(), actual_item.clone()));
}
} else {
missing.push(expected_item.string.clone());
}
}
let unexpected: Vec<_> = actual
.iter()
.filter(|acgu| !expected.iter().any(|ecgu| acgu.name == ecgu.name))
.map(|acgu| acgu.string.clone())
.collect();
if !missing.is_empty() {
missing.sort();
println!("\nThese items should have been contained but were not:\n");
for item in &missing {
println!("{}", item);
}
println!("\n");
}
if !unexpected.is_empty() {
let sorted = {
let mut sorted = unexpected.clone();
sorted.sort();
sorted
};
println!("\nThese items were contained but should not have been:\n");
for item in sorted {
println!("{}", item);
}
println!("\n");
}
if !wrong_cgus.is_empty() {
wrong_cgus.sort_by_key(|pair| pair.0.name.clone());
println!("\nThe following items were assigned to wrong codegen units:\n");
for &(ref expected_item, ref actual_item) in &wrong_cgus {
println!("{}", expected_item.name);
println!(" expected: {}", codegen_units_to_str(&expected_item.codegen_units));
println!(" actual: {}", codegen_units_to_str(&actual_item.codegen_units));
println!();
}
}
if !(missing.is_empty() && unexpected.is_empty() && wrong_cgus.is_empty()) {
panic!();
}
#[derive(Clone, Eq, PartialEq)]
struct MonoItem {
name: String,
codegen_units: HashSet<String>,
string: String,
}
// [MONO_ITEM] name [@@ (cgu)+]
fn str_to_mono_item(s: &str, cgu_has_crate_disambiguator: bool) -> MonoItem {
let s = if s.starts_with(PREFIX) { (&s[PREFIX.len()..]).trim() } else { s.trim() };
let full_string = format!("{}{}", PREFIX, s);
let parts: Vec<&str> =
s.split(CGU_MARKER).map(str::trim).filter(|s| !s.is_empty()).collect();
let name = parts[0].trim();
let cgus = if parts.len() > 1 {
let cgus_str = parts[1];
cgus_str
.split(' ')
.map(str::trim)
.filter(|s| !s.is_empty())
.map(|s| {
if cgu_has_crate_disambiguator {
remove_crate_disambiguators_from_set_of_cgu_names(s)
} else {
s.to_string()
}
})
.collect()
} else {
HashSet::new()
};
MonoItem { name: name.to_owned(), codegen_units: cgus, string: full_string }
}
fn codegen_units_to_str(cgus: &HashSet<String>) -> String {
let mut cgus: Vec<_> = cgus.iter().collect();
cgus.sort();
let mut string = String::new();
for cgu in cgus {
string.push_str(&cgu[..]);
string.push_str(" ");
}
string
}
// Given a cgu-name-prefix of the form <crate-name>.<crate-disambiguator> or
// the form <crate-name1>.<crate-disambiguator1>-in-<crate-name2>.<crate-disambiguator2>,
// remove all crate-disambiguators.
fn remove_crate_disambiguator_from_cgu(cgu: &str) -> String {
lazy_static! {
static ref RE: Regex =
Regex::new(r"^[^\.]+(?P<d1>\.[[:alnum:]]+)(-in-[^\.]+(?P<d2>\.[[:alnum:]]+))?")
.unwrap();
}
let captures =
RE.captures(cgu).unwrap_or_else(|| panic!("invalid cgu name encountered: {}", cgu));
let mut new_name = cgu.to_owned();
if let Some(d2) = captures.name("d2") {
new_name.replace_range(d2.start()..d2.end(), "");
}
let d1 = captures.name("d1").unwrap();
new_name.replace_range(d1.start()..d1.end(), "");
new_name
}
// The name of merged CGUs is constructed as the names of the original
// CGUs joined with "--". This function splits such composite CGU names
// and handles each component individually.
fn remove_crate_disambiguators_from_set_of_cgu_names(cgus: &str) -> String {
cgus.split("--")
.map(|cgu| remove_crate_disambiguator_from_cgu(cgu))
.collect::<Vec<_>>()
.join("--")
}
}
fn init_incremental_test(&self) {
// (See `run_incremental_test` for an overview of how incremental tests work.)
// Before any of the revisions have executed, create the
// incremental workproduct directory. Delete any old
// incremental work products that may be there from prior
// runs.
let incremental_dir = self.props.incremental_dir.as_ref().unwrap();
if incremental_dir.exists() {
// Canonicalizing the path will convert it to the //?/ format
// on Windows, which enables paths longer than 260 character
let canonicalized = incremental_dir.canonicalize().unwrap();
fs::remove_dir_all(canonicalized).unwrap();
}
fs::create_dir_all(&incremental_dir).unwrap();
if self.config.verbose {
println!("init_incremental_test: incremental_dir={}", incremental_dir.display());
}
}
fn run_incremental_test(&self) {
// Basic plan for a test incremental/foo/bar.rs:
// - load list of revisions rpass1, cfail2, rpass3
// - each should begin with `rpass`, `cfail`, or `rfail`
// - if `rpass`, expect compile and execution to succeed
// - if `cfail`, expect compilation to fail
// - if `rfail`, expect execution to fail
// - create a directory build/foo/bar.incremental
// - compile foo/bar.rs with -C incremental=.../foo/bar.incremental and -C rpass1
// - because name of revision starts with "rpass", expect success
// - compile foo/bar.rs with -C incremental=.../foo/bar.incremental and -C cfail2
// - because name of revision starts with "cfail", expect an error
// - load expected errors as usual, but filter for those that end in `[rfail2]`
// - compile foo/bar.rs with -C incremental=.../foo/bar.incremental and -C rpass3
// - because name of revision starts with "rpass", expect success
// - execute build/foo/bar.exe and save output
//
// FIXME -- use non-incremental mode as an oracle? That doesn't apply
// to #[rustc_dirty] and clean tests I guess
let revision = self.revision.expect("incremental tests require a list of revisions");
// Incremental workproduct directory should have already been created.
let incremental_dir = self.props.incremental_dir.as_ref().unwrap();
assert!(incremental_dir.exists(), "init_incremental_test failed to create incremental dir");
if self.config.verbose {
print!("revision={:?} props={:#?}", revision, self.props);
}
if revision.starts_with("rpass") {
if self.props.should_ice {
self.fatal("can only use should-ice in cfail tests");
}
self.run_rpass_test();
} else if revision.starts_with("rfail") {
if self.props.should_ice {
self.fatal("can only use should-ice in cfail tests");
}
self.run_rfail_test();
} else if revision.starts_with("cfail") {
self.run_cfail_test();
} else {
self.fatal("revision name must begin with rpass, rfail, or cfail");
}
}
fn run_rmake_test(&self) {
let cwd = env::current_dir().unwrap();
let src_root = self.config.src_base.parent().unwrap().parent().unwrap().parent().unwrap();
let src_root = cwd.join(&src_root);
let tmpdir = cwd.join(self.output_base_name());
if tmpdir.exists() {
self.aggressive_rm_rf(&tmpdir).unwrap();
}
create_dir_all(&tmpdir).unwrap();
let host = &self.config.host;
let make = if host.contains("dragonfly")
|| host.contains("freebsd")
|| host.contains("netbsd")
|| host.contains("openbsd")
{
"gmake"
} else {
"make"
};
let mut cmd = Command::new(make);
cmd.current_dir(&self.testpaths.file)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.env("TARGET", &self.config.target)
.env("PYTHON", &self.config.python)
.env("S", src_root)
.env("RUST_BUILD_STAGE", &self.config.stage_id)
.env("RUSTC", cwd.join(&self.config.rustc_path))
.env("TMPDIR", &tmpdir)
.env("LD_LIB_PATH_ENVVAR", dylib_env_var())
.env("HOST_RPATH_DIR", cwd.join(&self.config.compile_lib_path))
.env("TARGET_RPATH_DIR", cwd.join(&self.config.run_lib_path))
.env("LLVM_COMPONENTS", &self.config.llvm_components)
// We for sure don't want these tests to run in parallel, so make
// sure they don't have access to these vars if we run via `make`
// at the top level
.env_remove("MAKEFLAGS")
.env_remove("MFLAGS")
.env_remove("CARGO_MAKEFLAGS");
if let Some(ref rustdoc) = self.config.rustdoc_path {
cmd.env("RUSTDOC", cwd.join(rustdoc));
}
if let Some(ref rust_demangler) = self.config.rust_demangler_path {
cmd.env("RUST_DEMANGLER", cwd.join(rust_demangler));
}
if let Some(ref node) = self.config.nodejs {
cmd.env("NODE", node);
}
if let Some(ref linker) = self.config.linker {
cmd.env("RUSTC_LINKER", linker);
}
if let Some(ref clang) = self.config.run_clang_based_tests_with {
cmd.env("CLANG", clang);
}
if let Some(ref filecheck) = self.config.llvm_filecheck {
cmd.env("LLVM_FILECHECK", filecheck);
}
if let Some(ref llvm_bin_dir) = self.config.llvm_bin_dir {
cmd.env("LLVM_BIN_DIR", llvm_bin_dir);
}
// We don't want RUSTFLAGS set from the outside to interfere with
// compiler flags set in the test cases:
cmd.env_remove("RUSTFLAGS");
// Use dynamic musl for tests because static doesn't allow creating dylibs
if self.config.host.contains("musl") {
cmd.env("RUSTFLAGS", "-Ctarget-feature=-crt-static").env("IS_MUSL_HOST", "1");
}
if self.config.bless {
cmd.env("RUSTC_BLESS_TEST", "--bless");
// Assume this option is active if the environment variable is "defined", with _any_ value.
// As an example, a `Makefile` can use this option by:
//
// ifdef RUSTC_BLESS_TEST
// cp "$(TMPDIR)"/actual_something.ext expected_something.ext
// else
// $(DIFF) expected_something.ext "$(TMPDIR)"/actual_something.ext
// endif
}
if self.config.target.contains("msvc") && self.config.cc != "" {
// We need to pass a path to `lib.exe`, so assume that `cc` is `cl.exe`
// and that `lib.exe` lives next to it.
let lib = Path::new(&self.config.cc).parent().unwrap().join("lib.exe");
// MSYS doesn't like passing flags of the form `/foo` as it thinks it's
// a path and instead passes `C:\msys64\foo`, so convert all
// `/`-arguments to MSVC here to `-` arguments.
let cflags = self
.config
.cflags
.split(' ')
.map(|s| s.replace("/", "-"))
.collect::<Vec<_>>()
.join(" ");
let cxxflags = self
.config
.cxxflags
.split(' ')
.map(|s| s.replace("/", "-"))
.collect::<Vec<_>>()
.join(" ");
cmd.env("IS_MSVC", "1")
.env("IS_WINDOWS", "1")
.env("MSVC_LIB", format!("'{}' -nologo", lib.display()))
.env("CC", format!("'{}' {}", self.config.cc, cflags))
.env("CXX", format!("'{}' {}", &self.config.cxx, cxxflags));
} else {
cmd.env("CC", format!("{} {}", self.config.cc, self.config.cflags))
.env("CXX", format!("{} {}", self.config.cxx, self.config.cxxflags))
.env("AR", &self.config.ar);
if self.config.target.contains("windows") {
cmd.env("IS_WINDOWS", "1");
}
}
let output = cmd.spawn().and_then(read2_abbreviated).expect("failed to spawn `make`");
if !output.status.success() {
let res = ProcRes {
status: output.status,
stdout: String::from_utf8_lossy(&output.stdout).into_owned(),
stderr: String::from_utf8_lossy(&output.stderr).into_owned(),
cmdline: format!("{:?}", cmd),
};
self.fatal_proc_rec("make failed", &res);
}
}
fn aggressive_rm_rf(&self, path: &Path) -> io::Result<()> {
for e in path.read_dir()? {
let entry = e?;
let path = entry.path();
if entry.file_type()?.is_dir() {
self.aggressive_rm_rf(&path)?;
} else {
// Remove readonly files as well on windows (by default we can't)
fs::remove_file(&path).or_else(|e| {
if cfg!(windows) && e.kind() == io::ErrorKind::PermissionDenied {
let mut meta = entry.metadata()?.permissions();
meta.set_readonly(false);
fs::set_permissions(&path, meta)?;
fs::remove_file(&path)
} else {
Err(e)
}
})?;
}
}
fs::remove_dir(path)
}
fn run_js_doc_test(&self) {
if let Some(nodejs) = &self.config.nodejs {
let out_dir = self.output_base_dir();
self.document(&out_dir);
let root = self.config.find_rust_src_root().unwrap();
let file_stem =
self.testpaths.file.file_stem().and_then(|f| f.to_str()).expect("no file stem");
let res = self.cmd2procres(
Command::new(&nodejs)
.arg(root.join("src/tools/rustdoc-js/tester.js"))
.arg("--doc-folder")
.arg(out_dir)
.arg("--crate-name")
.arg(file_stem.replace("-", "_"))
.arg("--test-file")
.arg(self.testpaths.file.with_extension("js")),
);
if !res.status.success() {
self.fatal_proc_rec("rustdoc-js test failed!", &res);
}
} else {
self.fatal("no nodeJS");
}
}
fn load_compare_outputs(
&self,
proc_res: &ProcRes,
output_kind: TestOutput,
explicit_format: bool,
) -> usize {
let stderr_bits = format!("{}.stderr", get_pointer_width(&self.config.target));
let (stderr_kind, stdout_kind) = match output_kind {
TestOutput::Compile => (
{
if self.props.stderr_per_bitwidth { &stderr_bits } else { UI_STDERR }
},
UI_STDOUT,
),
TestOutput::Run => (UI_RUN_STDERR, UI_RUN_STDOUT),
};
let expected_stderr = self.load_expected_output(stderr_kind);
let expected_stdout = self.load_expected_output(stdout_kind);
let normalized_stdout = match output_kind {
TestOutput::Run if self.config.remote_test_client.is_some() => {
// When tests are run using the remote-test-client, the string
// 'uploaded "$TEST_BUILD_DIR/<test_executable>, waiting for result"'
// is printed to stdout by the client and then captured in the ProcRes,
// so it needs to be removed when comparing the run-pass test execution output
lazy_static! {
static ref REMOTE_TEST_RE: Regex = Regex::new(
"^uploaded \"\\$TEST_BUILD_DIR(/[[:alnum:]_\\-.]+)+\", waiting for result\n"
)
.unwrap();
}
REMOTE_TEST_RE
.replace(
&self.normalize_output(&proc_res.stdout, &self.props.normalize_stdout),
"",
)
.to_string()
}
_ => self.normalize_output(&proc_res.stdout, &self.props.normalize_stdout),
};
let stderr = if explicit_format {
proc_res.stderr.clone()
} else {
json::extract_rendered(&proc_res.stderr)
};
let normalized_stderr = self.normalize_output(&stderr, &self.props.normalize_stderr);
let mut errors = 0;
match output_kind {
TestOutput::Compile => {
if !self.props.dont_check_compiler_stdout {
errors +=
self.compare_output(stdout_kind, &normalized_stdout, &expected_stdout);
}
if !self.props.dont_check_compiler_stderr {
errors +=
self.compare_output(stderr_kind, &normalized_stderr, &expected_stderr);
}
}
TestOutput::Run => {
errors += self.compare_output(stdout_kind, &normalized_stdout, &expected_stdout);
errors += self.compare_output(stderr_kind, &normalized_stderr, &expected_stderr);
}
}
errors
}
fn run_ui_test(&self) {
if let Some(FailMode::Build) = self.props.fail_mode {
// Make sure a build-fail test cannot fail due to failing analysis (e.g. typeck).
let pm = Some(PassMode::Check);
let proc_res = self.compile_test_general(WillExecute::No, EmitMetadata::Yes, pm);
self.check_if_test_should_compile(&proc_res, pm);
}
let pm = self.pass_mode();
let should_run = self.should_run(pm);
let emit_metadata = self.should_emit_metadata(pm);
let proc_res = self.compile_test(should_run, emit_metadata);
self.check_if_test_should_compile(&proc_res, pm);
// if the user specified a format in the ui test
// print the output to the stderr file, otherwise extract
// the rendered error messages from json and print them
let explicit = self.props.compile_flags.iter().any(|s| s.contains("--error-format"));
let expected_fixed = self.load_expected_output(UI_FIXED);
let modes_to_prune = vec![CompareMode::Nll];
self.check_and_prune_duplicate_outputs(&proc_res, &[], &modes_to_prune);
let mut errors = self.load_compare_outputs(&proc_res, TestOutput::Compile, explicit);
let rustfix_input = json::rustfix_diagnostics_only(&proc_res.stderr);
if self.config.compare_mode.is_some() {
// don't test rustfix with nll right now
} else if self.config.rustfix_coverage {
// Find out which tests have `MachineApplicable` suggestions but are missing
// `run-rustfix` or `run-rustfix-only-machine-applicable` headers.
//
// This will return an empty `Vec` in case the executed test file has a
// `compile-flags: --error-format=xxxx` header with a value other than `json`.
let suggestions = get_suggestions_from_json(
&rustfix_input,
&HashSet::new(),
Filter::MachineApplicableOnly,
)
.unwrap_or_default();
if !suggestions.is_empty()
&& !self.props.run_rustfix
&& !self.props.rustfix_only_machine_applicable
{
let mut coverage_file_path = self.config.build_base.clone();
coverage_file_path.push("rustfix_missing_coverage.txt");
debug!("coverage_file_path: {}", coverage_file_path.display());
let mut file = OpenOptions::new()
.create(true)
.append(true)
.open(coverage_file_path.as_path())
.expect("could not create or open file");
if writeln!(file, "{}", self.testpaths.file.display()).is_err() {
panic!("couldn't write to {}", coverage_file_path.display());
}
}
} else if self.props.run_rustfix {
// Apply suggestions from rustc to the code itself
let unfixed_code = self.load_expected_output_from_path(&self.testpaths.file).unwrap();
let suggestions = get_suggestions_from_json(
&rustfix_input,
&HashSet::new(),
if self.props.rustfix_only_machine_applicable {
Filter::MachineApplicableOnly
} else {
Filter::Everything
},
)
.unwrap();
let fixed_code = apply_suggestions(&unfixed_code, &suggestions).unwrap_or_else(|e| {
panic!(
"failed to apply suggestions for {:?} with rustfix: {}",
self.testpaths.file, e
)
});
errors += self.compare_output("fixed", &fixed_code, &expected_fixed);
} else if !expected_fixed.is_empty() {
panic!(
"the `// run-rustfix` directive wasn't found but a `*.fixed` \
file was found"
);
}
if errors > 0 {
println!("To update references, rerun the tests and pass the `--bless` flag");
let relative_path_to_file =
self.testpaths.relative_dir.join(self.testpaths.file.file_name().unwrap());
println!(
"To only update this specific test, also pass `--test-args {}`",
relative_path_to_file.display(),
);
self.fatal_proc_rec(
&format!("{} errors occurred comparing output.", errors),
&proc_res,
);
}
let expected_errors = errors::load_errors(&self.testpaths.file, self.revision);
if let WillExecute::Yes = should_run {
let proc_res = self.exec_compiled_test();
let run_output_errors = if self.props.check_run_results {
self.load_compare_outputs(&proc_res, TestOutput::Run, explicit)
} else {
0
};
if run_output_errors > 0 {
self.fatal_proc_rec(
&format!("{} errors occurred comparing run output.", run_output_errors),
&proc_res,
);
}
if self.should_run_successfully(pm) {
if !proc_res.status.success() {
self.fatal_proc_rec("test run failed!", &proc_res);
}
} else if proc_res.status.success() {
self.fatal_proc_rec("test run succeeded!", &proc_res);
}
if !self.props.error_patterns.is_empty() {
// "// error-pattern" comments
self.check_error_patterns(&proc_res.stderr, &proc_res, pm);
}
}
debug!(
"run_ui_test: explicit={:?} config.compare_mode={:?} expected_errors={:?} \
proc_res.status={:?} props.error_patterns={:?}",
explicit,
self.config.compare_mode,
expected_errors,
proc_res.status,
self.props.error_patterns
);
if !explicit && self.config.compare_mode.is_none() {
let check_patterns =
should_run == WillExecute::No && !self.props.error_patterns.is_empty();
let check_annotations = !check_patterns || !expected_errors.is_empty();
if check_patterns {
// "// error-pattern" comments
self.check_error_patterns(&proc_res.stderr, &proc_res, pm);
}
if check_annotations {
// "//~ERROR comments"
self.check_expected_errors(expected_errors, &proc_res);
}
}
if self.props.run_rustfix && self.config.compare_mode.is_none() {
// And finally, compile the fixed code and make sure it both
// succeeds and has no diagnostics.
let mut rustc = self.make_compile_args(
&self.testpaths.file.with_extension(UI_FIXED),
TargetLocation::ThisFile(self.make_exe_name()),
emit_metadata,
AllowUnused::No,
);
rustc.arg("-L").arg(&self.aux_output_dir_name());
let res = self.compose_and_run_compiler(rustc, None);
if !res.status.success() {
self.fatal_proc_rec("failed to compile fixed code", &res);
}
if !res.stderr.is_empty()
&& !self.props.rustfix_only_machine_applicable
&& !json::rustfix_diagnostics_only(&res.stderr).is_empty()
{
self.fatal_proc_rec("fixed code is still producing diagnostics", &res);
}
}
}
fn run_mir_opt_test(&self) {
let pm = self.pass_mode();
let should_run = self.should_run(pm);
let emit_metadata = self.should_emit_metadata(pm);
let proc_res = self.compile_test(should_run, emit_metadata);
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
self.check_mir_dump();
if let WillExecute::Yes = should_run {
let proc_res = self.exec_compiled_test();
if !proc_res.status.success() {
self.fatal_proc_rec("test run failed!", &proc_res);
}
}
}
fn check_mir_dump(&self) {
let test_file_contents = fs::read_to_string(&self.testpaths.file).unwrap();
let test_dir = self.testpaths.file.parent().unwrap();
let test_crate =
self.testpaths.file.file_stem().unwrap().to_str().unwrap().replace("-", "_");
let mut bit_width = String::new();
if test_file_contents.lines().any(|l| l == "// EMIT_MIR_FOR_EACH_BIT_WIDTH") {
bit_width = format!(".{}", get_pointer_width(&self.config.target));
}
if self.config.bless {
for e in
glob(&format!("{}/{}.*{}.mir", test_dir.display(), test_crate, bit_width)).unwrap()
{
std::fs::remove_file(e.unwrap()).unwrap();
}
for e in
glob(&format!("{}/{}.*{}.diff", test_dir.display(), test_crate, bit_width)).unwrap()
{
std::fs::remove_file(e.unwrap()).unwrap();
}
}
for l in test_file_contents.lines() {
if l.starts_with("// EMIT_MIR ") {
let test_name = l.trim_start_matches("// EMIT_MIR ").trim();
let mut test_names = test_name.split(' ');
// sometimes we specify two files so that we get a diff between the two files
let test_name = test_names.next().unwrap();
let mut expected_file;
let from_file;
let to_file;
if test_name.ends_with(".diff") {
let trimmed = test_name.trim_end_matches(".diff");
let test_against = format!("{}.after.mir", trimmed);
from_file = format!("{}.before.mir", trimmed);
expected_file = format!("{}{}.diff", trimmed, bit_width);
assert!(
test_names.next().is_none(),
"two mir pass names specified for MIR diff"
);
to_file = Some(test_against);
} else if let Some(first_pass) = test_names.next() {
let second_pass = test_names.next().unwrap();
assert!(
test_names.next().is_none(),
"three mir pass names specified for MIR diff"
);
expected_file =
format!("{}{}.{}-{}.diff", test_name, bit_width, first_pass, second_pass);
let second_file = format!("{}.{}.mir", test_name, second_pass);
from_file = format!("{}.{}.mir", test_name, first_pass);
to_file = Some(second_file);
} else {
let ext_re = Regex::new(r#"(\.(mir|dot|html))$"#).unwrap();
let cap = ext_re
.captures_iter(test_name)
.next()
.expect("test_name has an invalid extension");
let extension = cap.get(1).unwrap().as_str();
expected_file = format!(
"{}{}{}",
test_name.trim_end_matches(extension),
bit_width,
extension,
);
from_file = test_name.to_string();
assert!(
test_names.next().is_none(),
"two mir pass names specified for MIR dump"
);
to_file = None;
};
if !expected_file.starts_with(&test_crate) {
expected_file = format!("{}.{}", test_crate, expected_file);
}
let expected_file = test_dir.join(expected_file);
let dumped_string = if let Some(after) = to_file {
self.diff_mir_files(from_file.into(), after.into())
} else {
let mut output_file = PathBuf::new();
output_file.push(self.get_mir_dump_dir());
output_file.push(&from_file);
debug!(
"comparing the contents of: {} with {}",
output_file.display(),
expected_file.display()
);
if !output_file.exists() {
panic!(
"Output file `{}` from test does not exist, available files are in `{}`",
output_file.display(),
output_file.parent().unwrap().display()
);
}
self.check_mir_test_timestamp(&from_file, &output_file);
let dumped_string = fs::read_to_string(&output_file).unwrap();
self.normalize_output(&dumped_string, &[])
};
if self.config.bless {
let _ = std::fs::remove_file(&expected_file);
std::fs::write(expected_file, dumped_string.as_bytes()).unwrap();
} else {
if !expected_file.exists() {
panic!(
"Output file `{}` from test does not exist",
expected_file.display()
);
}
let expected_string = fs::read_to_string(&expected_file).unwrap();
if dumped_string != expected_string {
print!("{}", write_diff(&expected_string, &dumped_string, 3));
panic!(
"Actual MIR output differs from expected MIR output {}",
expected_file.display()
);
}
}
}
}
}
fn diff_mir_files(&self, before: PathBuf, after: PathBuf) -> String {
let to_full_path = |path: PathBuf| {
let full = self.get_mir_dump_dir().join(&path);
if !full.exists() {
panic!(
"the mir dump file for {} does not exist (requested in {})",
path.display(),
self.testpaths.file.display(),
);
}
full
};
let before = to_full_path(before);
let after = to_full_path(after);
debug!("comparing the contents of: {} with {}", before.display(), after.display());
let before = fs::read_to_string(before).unwrap();
let after = fs::read_to_string(after).unwrap();
let before = self.normalize_output(&before, &[]);
let after = self.normalize_output(&after, &[]);
let mut dumped_string = String::new();
for result in diff::lines(&before, &after) {
use std::fmt::Write;
match result {
diff::Result::Left(s) => writeln!(dumped_string, "- {}", s).unwrap(),
diff::Result::Right(s) => writeln!(dumped_string, "+ {}", s).unwrap(),
diff::Result::Both(s, _) => writeln!(dumped_string, " {}", s).unwrap(),
}
}
dumped_string
}
fn check_mir_test_timestamp(&self, test_name: &str, output_file: &Path) {
let t = |file| fs::metadata(file).unwrap().modified().unwrap();
let source_file = &self.testpaths.file;
let output_time = t(output_file);
let source_time = t(source_file);
if source_time > output_time {
debug!("source file time: {:?} output file time: {:?}", source_time, output_time);
panic!(
"test source file `{}` is newer than potentially stale output file `{}`.",
source_file.display(),
test_name
);
}
}
fn get_mir_dump_dir(&self) -> PathBuf {
let mut mir_dump_dir = PathBuf::from(self.config.build_base.as_path());
debug!("input_file: {:?}", self.testpaths.file);
mir_dump_dir.push(&self.testpaths.relative_dir);
mir_dump_dir.push(self.testpaths.file.file_stem().unwrap());
mir_dump_dir
}
fn normalize_output(&self, output: &str, custom_rules: &[(String, String)]) -> String {
let cflags = self.props.compile_flags.join(" ");
let json = cflags.contains("--error-format json")
|| cflags.contains("--error-format pretty-json")
|| cflags.contains("--error-format=json")
|| cflags.contains("--error-format=pretty-json")
|| cflags.contains("--output-format json")
|| cflags.contains("--output-format=json");
let mut normalized = output.to_string();
let mut normalize_path = |from: &Path, to: &str| {
let mut from = from.display().to_string();
if json {
from = from.replace("\\", "\\\\");
}
normalized = normalized.replace(&from, to);
};
let parent_dir = self.testpaths.file.parent().unwrap();
normalize_path(parent_dir, "$DIR");
// Paths into the libstd/libcore
let base_dir = self.config.src_base.parent().unwrap().parent().unwrap().parent().unwrap();
let src_dir = base_dir.join("library");
normalize_path(&src_dir, "$SRC_DIR");
// `ui-fulldeps` tests can show paths to the compiler source when testing macros from
// `rustc_macros`
// eg. /home/user/rust/compiler
let compiler_src_dir = base_dir.join("compiler");
normalize_path(&compiler_src_dir, "$COMPILER_DIR");
if let Some(virtual_rust_source_base_dir) =
option_env!("CFG_VIRTUAL_RUST_SOURCE_BASE_DIR").map(PathBuf::from)
{
normalize_path(&virtual_rust_source_base_dir.join("library"), "$SRC_DIR");
normalize_path(&virtual_rust_source_base_dir.join("compiler"), "$COMPILER_DIR");
}
// Paths into the build directory
let test_build_dir = &self.config.build_base;
let parent_build_dir = test_build_dir.parent().unwrap().parent().unwrap().parent().unwrap();
// eg. /home/user/rust/build/x86_64-unknown-linux-gnu/test/ui
normalize_path(test_build_dir, "$TEST_BUILD_DIR");
// eg. /home/user/rust/build
normalize_path(parent_build_dir, "$BUILD_DIR");
// Paths into lib directory.
normalize_path(&parent_build_dir.parent().unwrap().join("lib"), "$LIB_DIR");
if json {
// escaped newlines in json strings should be readable
// in the stderr files. There's no point int being correct,
// since only humans process the stderr files.
// Thus we just turn escaped newlines back into newlines.
normalized = normalized.replace("\\n", "\n");
}
// If there are `$SRC_DIR` normalizations with line and column numbers, then replace them
// with placeholders as we do not want tests needing updated when compiler source code
// changes.
// eg. $SRC_DIR/libcore/mem.rs:323:14 becomes $SRC_DIR/libcore/mem.rs:LL:COL
lazy_static! {
static ref SRC_DIR_RE: Regex =
Regex::new("SRC_DIR(.+):\\d+:\\d+(: \\d+:\\d+)?").unwrap();
}
normalized = SRC_DIR_RE.replace_all(&normalized, "SRC_DIR$1:LL:COL").into_owned();
normalized = Self::normalize_platform_differences(&normalized);
normalized = normalized.replace("\t", "\\t"); // makes tabs visible
// Remove test annotations like `//~ ERROR text` from the output,
// since they duplicate actual errors and make the output hard to read.
// This mirrors the regex in src/tools/tidy/src/style.rs, please update
// both if either are changed.
lazy_static! {
static ref ANNOTATION_RE: Regex = Regex::new("\\s*//(\\[.*\\])?~.*").unwrap();
}
normalized = ANNOTATION_RE.replace_all(&normalized, "").into_owned();
// This code normalizes various hashes in v0 symbol mangling that is
// emitted in the ui and mir-opt tests.
lazy_static! {
static ref V0_CRATE_HASH_PREFIX_RE: Regex =
Regex::new(r"_R.*?Cs[0-9a-zA-Z]+_").unwrap();
static ref V0_CRATE_HASH_RE: Regex = Regex::new(r"Cs[0-9a-zA-Z]+_").unwrap();
}
const V0_CRATE_HASH_PLACEHOLDER: &str = r"CsCRATE_HASH_";
if V0_CRATE_HASH_PREFIX_RE.is_match(&normalized) {
// Normalize crate hash
normalized =
V0_CRATE_HASH_RE.replace_all(&normalized, V0_CRATE_HASH_PLACEHOLDER).into_owned();
}
lazy_static! {
static ref V0_BACK_REF_PREFIX_RE: Regex = Regex::new(r"\(_R.*?B[0-9a-zA-Z]_").unwrap();
static ref V0_BACK_REF_RE: Regex = Regex::new(r"B[0-9a-zA-Z]_").unwrap();
}
const V0_BACK_REF_PLACEHOLDER: &str = r"B<REF>_";
if V0_BACK_REF_PREFIX_RE.is_match(&normalized) {
// Normalize back references (see RFC 2603)
normalized =
V0_BACK_REF_RE.replace_all(&normalized, V0_BACK_REF_PLACEHOLDER).into_owned();
}
// Custom normalization rules
for rule in custom_rules {
let re = Regex::new(&rule.0).expect("bad regex in custom normalization rule");
normalized = re.replace_all(&normalized, &rule.1[..]).into_owned();
}
normalized
}
/// Normalize output differences across platforms. Generally changes Windows output to be more
/// Unix-like.
///
/// Replaces backslashes in paths with forward slashes, and replaces CRLF line endings
/// with LF.
fn normalize_platform_differences(output: &str) -> String {
lazy_static! {
/// Used to find Windows paths.
///
/// It's not possible to detect paths in the error messages generally, but this is a
/// decent enough heuristic.
static ref PATH_BACKSLASH_RE: Regex = Regex::new(r#"(?x)
(?:
# Match paths that don't include spaces.
(?:\\[\pL\pN\.\-_']+)+\.\pL+
|
# If the path starts with a well-known root, then allow spaces.
\$(?:DIR|SRC_DIR|TEST_BUILD_DIR|BUILD_DIR|LIB_DIR)(?:\\[\pL\pN\.\-_' ]+)+
)"#
).unwrap();
}
let output = output.replace(r"\\", r"\");
PATH_BACKSLASH_RE
.replace_all(&output, |caps: &Captures<'_>| {
println!("{}", &caps[0]);
caps[0].replace(r"\", "/")
})
.replace("\r\n", "\n")
}
fn expected_output_path(&self, kind: &str) -> PathBuf {
let mut path =
expected_output_path(&self.testpaths, self.revision, &self.config.compare_mode, kind);
if !path.exists() {
if let Some(CompareMode::Polonius) = self.config.compare_mode {
path = expected_output_path(
&self.testpaths,
self.revision,
&Some(CompareMode::Nll),
kind,
);
}
}
if !path.exists() {
path = expected_output_path(&self.testpaths, self.revision, &None, kind);
}
path
}
fn load_expected_output(&self, kind: &str) -> String {
let path = self.expected_output_path(kind);
if path.exists() {
match self.load_expected_output_from_path(&path) {
Ok(x) => x,
Err(x) => self.fatal(&x),
}
} else {
String::new()
}
}
fn load_expected_output_from_path(&self, path: &Path) -> Result<String, String> {
fs::read_to_string(path).map_err(|err| {
format!("failed to load expected output from `{}`: {}", path.display(), err)
})
}
fn delete_file(&self, file: &PathBuf) {
if !file.exists() {
// Deleting a nonexistant file would error.
return;
}
if let Err(e) = fs::remove_file(file) {
self.fatal(&format!("failed to delete `{}`: {}", file.display(), e,));
}
}
fn compare_output(&self, kind: &str, actual: &str, expected: &str) -> usize {
if actual == expected {
return 0;
}
if !self.config.bless {
if expected.is_empty() {
println!("normalized {}:\n{}\n", kind, actual);
} else {
println!("diff of {}:\n", kind);
print!("{}", write_diff(expected, actual, 3));
}
}
let mode = self.config.compare_mode.as_ref().map_or("", |m| m.to_str());
let output_file = self
.output_base_name()
.with_extra_extension(self.revision.unwrap_or(""))
.with_extra_extension(mode)
.with_extra_extension(kind);
let mut files = vec![output_file];
if self.config.bless {
// Delete non-revision .stderr/.stdout file if revisions are used.
// Without this, we'd just generate the new files and leave the old files around.
if self.revision.is_some() {
let old =
expected_output_path(self.testpaths, None, &self.config.compare_mode, kind);
self.delete_file(&old);
}
files.push(expected_output_path(
self.testpaths,
self.revision,
&self.config.compare_mode,
kind,
));
}
for output_file in &files {
if actual.is_empty() {
self.delete_file(output_file);
} else if let Err(err) = fs::write(&output_file, &actual) {
self.fatal(&format!(
"failed to write {} to `{}`: {}",
kind,
output_file.display(),
err,
));
}
}
println!("\nThe actual {0} differed from the expected {0}.", kind);
for output_file in files {
println!("Actual {} saved to {}", kind, output_file.display());
}
if self.config.bless { 0 } else { 1 }
}
fn check_and_prune_duplicate_outputs(
&self,
proc_res: &ProcRes,
modes: &[CompareMode],
require_same_modes: &[CompareMode],
) {
for kind in UI_EXTENSIONS {
let canon_comparison_path =
expected_output_path(&self.testpaths, self.revision, &None, kind);
let canon = match self.load_expected_output_from_path(&canon_comparison_path) {
Ok(canon) => canon,
_ => continue,
};
let bless = self.config.bless;
let check_and_prune_duplicate_outputs = |mode: &CompareMode, require_same: bool| {
let examined_path =
expected_output_path(&self.testpaths, self.revision, &Some(mode.clone()), kind);
// If there is no output, there is nothing to do
let examined_content = match self.load_expected_output_from_path(&examined_path) {
Ok(content) => content,
_ => return,
};
let is_duplicate = canon == examined_content;
match (bless, require_same, is_duplicate) {
// If we're blessing and the output is the same, then delete the file.
(true, _, true) => {
self.delete_file(&examined_path);
}
// If we want them to be the same, but they are different, then error.
// We do this wether we bless or not
(_, true, false) => {
self.fatal_proc_rec(
&format!("`{}` should not have different output from base test!", kind),
proc_res,
);
}
_ => {}
}
};
for mode in modes {
check_and_prune_duplicate_outputs(mode, false);
}
for mode in require_same_modes {
check_and_prune_duplicate_outputs(mode, true);
}
}
}
fn create_stamp(&self) {
let stamp = crate::stamp(&self.config, self.testpaths, self.revision);
fs::write(&stamp, compute_stamp_hash(&self.config)).unwrap();
}
}
struct ProcArgs {
prog: String,
args: Vec<String>,
}
pub struct ProcRes {
status: ExitStatus,
stdout: String,
stderr: String,
cmdline: String,
}
impl ProcRes {
pub fn print_info(&self) {
fn render(name: &str, contents: &str) -> String {
let contents = json::extract_rendered(contents);
let contents = contents.trim();
if contents.is_empty() {
format!("{name}: none")
} else {
format!(
"\
--- {name} -------------------------------\n\
{contents}\n\
------------------------------------------",
)
}
}
println!(
"status: {}\ncommand: {}\n{}\n{}\n",
self.status,
self.cmdline,
render("stdout", &self.stdout),
render("stderr", &self.stderr),
);
}
pub fn fatal(&self, err: Option<&str>, on_failure: impl FnOnce()) -> ! {
if let Some(e) = err {
println!("\nerror: {}", e);
}
self.print_info();
on_failure();
// Use resume_unwind instead of panic!() to prevent a panic message + backtrace from
// compiletest, which is unnecessary noise.
std::panic::resume_unwind(Box::new(()));
}
}
#[derive(Debug)]
enum TargetLocation {
ThisFile(PathBuf),
ThisDirectory(PathBuf),
}
enum AllowUnused {
Yes,
No,
}
| 38.262529 | 110 | 0.527788 |
714778a65a891478dadad038a6a90ed7641de789
| 75 |
mod bfs;
mod dfs;
mod dijkstra_shortest_paths;
mod search;
mod tarjan_scc;
| 12.5 | 28 | 0.8 |
7514304aec270b024fe2f8762c21b6d86840ea92
| 4,415 |
//! Tokio wrapper for windows named pipes.
#![cfg(windows)]
#![warn(missing_docs)]
extern crate tokio;
extern crate bytes;
extern crate mio;
extern crate mio_named_pipes;
extern crate futures;
use std::ffi::OsStr;
use std::fmt;
use std::io::{Read, Write};
use std::os::windows::io::*;
use futures::{Async, Poll};
use bytes::{BufMut, Buf};
use mio::Ready;
use tokio::reactor::{Handle, PollEvented2};
use tokio::io::{AsyncRead, AsyncWrite};
/// Named pipe connection.
pub struct NamedPipe {
io: PollEvented2<mio_named_pipes::NamedPipe>,
}
impl NamedPipe {
/// New named pipe connection to the existing event pool.
pub fn new<P: AsRef<OsStr>>(p: P, handle: &Handle) -> std::io::Result<NamedPipe> {
let inner = try!(mio_named_pipes::NamedPipe::new(p.as_ref()));
NamedPipe::from_pipe(inner, handle)
}
/// New named pipe connection to the existing event pool from the existig mio pipe.
pub fn from_pipe(pipe: mio_named_pipes::NamedPipe, handle: &Handle)
-> std::io::Result<NamedPipe> {
Ok(NamedPipe {
io: PollEvented2::new_with_handle(pipe, handle)?,
})
}
/// Connect to the pipe.
pub fn connect(&self) -> std::io::Result<()> {
self.io.get_ref().connect()
}
/// Disconnect from the pipe.
pub fn disconnect(&self) -> std::io::Result<()> {
self.io.get_ref().disconnect()
}
/// Poll connection for read.
pub fn poll_read_ready_readable(&mut self) -> tokio::io::Result<Async<Ready>> {
self.io.poll_read_ready(Ready::readable())
}
/// Poll connection for write.
pub fn poll_write_ready(&mut self) -> tokio::io::Result<Async<Ready>> {
self.io.poll_write_ready()
}
fn io_mut(&mut self) -> &mut PollEvented2<mio_named_pipes::NamedPipe> {
&mut self.io
}
}
impl Read for NamedPipe {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.io.read(buf)
}
}
impl Write for NamedPipe {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.io.write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
self.io.flush()
}
}
impl<'a> Read for &'a NamedPipe {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
(&self.io).read(buf)
}
}
impl<'a> Write for &'a NamedPipe {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
(&self.io).write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
(&self.io).flush()
}
}
impl AsyncRead for NamedPipe {
unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool {
false
}
fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> Poll<usize, std::io::Error> {
if let Async::NotReady = self.io.poll_read_ready(Ready::readable())? {
return Ok(Async::NotReady)
}
let mut stack_buf = [0u8; 1024];
let bytes_read = self.io_mut().read(&mut stack_buf);
match bytes_read {
Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
self.io_mut().clear_read_ready(Ready::readable())?;
return Ok(Async::NotReady);
},
Err(e) => Err(e),
Ok(bytes_read) => {
buf.put_slice(&stack_buf[0..bytes_read]);
Ok(Async::Ready(bytes_read))
}
}
}
}
impl AsyncWrite for NamedPipe {
fn shutdown(&mut self) -> Poll<(), std::io::Error> {
Ok(().into())
}
fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, std::io::Error> {
if let Async::NotReady = self.io.poll_write_ready()? {
return Ok(Async::NotReady)
}
let bytes_wrt = self.io_mut().write(buf.bytes());
match bytes_wrt {
Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
self.io_mut().clear_write_ready()?;
return Ok(Async::NotReady);
},
Err(e) => Err(e),
Ok(bytes_wrt) => {
buf.advance(bytes_wrt);
Ok(Async::Ready(bytes_wrt))
}
}
}
}
impl fmt::Debug for NamedPipe {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.io.get_ref().fmt(f)
}
}
impl AsRawHandle for NamedPipe {
fn as_raw_handle(&self) -> RawHandle {
self.io.get_ref().as_raw_handle()
}
}
| 27.42236 | 87 | 0.566251 |
14af18ac56a9e73d1bd4184e764031d16e596dab
| 2,019 |
//! Reddit Service Provider -- an application to built subscriptions support atop of the zkSync network.
//!
//! This application has the following modules:
//! - `config`: configuration of the application (can be loaded either from JSON or environment variables);
//! - `database`: bindings of the application data schema to the database back-ends;
//! - `oracle`: module of the interaction with the Community Oracle application;
//! - `zksync`: module of the interaction with the zkSync network;
//! - `utils`: minor helper functions;
//! - `requests`: incoming request types for the API server;
//! - `responses`: outgoing response types for the API server;
//! - `service_provider`: API server and controller for the logic of the application.
use crate::{
config::AppConfig,
database::{DatabaseAccess, MemoryDb},
service_provider::ServiceProvider,
};
use actix_web::{App, HttpServer};
use std::path::PathBuf;
use structopt::StructOpt;
mod config;
mod database;
mod oracle;
mod requests;
mod responses;
mod service_provider;
mod utils;
mod zksync;
async fn run_server(db: MemoryDb, config: AppConfig) -> std::io::Result<()> {
let service_provider = ServiceProvider::new(db, config.clone());
HttpServer::new(move || {
let provider = service_provider.clone();
let app = provider.into_web_scope();
App::new().service(app)
})
.bind(config.app_bind_address)?
.run()
.await
}
#[derive(Debug, StructOpt)]
#[structopt(name = "service_provider", about = "A Reddit Service Provider.")]
pub struct CliArgs {
/// Load config from env (rather than a config file)
#[structopt(short, long)]
pub env_config: bool,
}
#[actix_rt::main]
async fn main() -> std::io::Result<()> {
const CONFIG_PATH: &str = "config.json";
let opt = CliArgs::from_args();
env_logger::init();
let config = AppConfig::load(opt.env_config, &PathBuf::from(CONFIG_PATH));
let memory_db = MemoryDb::init(()).unwrap();
run_server(memory_db, config).await
}
| 31.061538 | 107 | 0.692422 |
614c5a24e2a0e93761d330e114295afdfaee71e9
| 15,100 |
#![allow(clippy::unreadable_literal)]
use crate::avm1::opcode::OpCode;
use crate::avm1::types::*;
use crate::read::SwfRead;
use std::io::{Error, ErrorKind, Read, Result};
pub struct Reader<R: Read> {
inner: R,
version: u8,
}
impl<R: Read> SwfRead<R> for Reader<R> {
fn get_inner(&mut self) -> &mut R {
&mut self.inner
}
}
impl<R: Read> Reader<R> {
pub fn new(inner: R, version: u8) -> Reader<R> {
Reader { inner, version }
}
pub fn read_action_list(&mut self) -> Result<Vec<Action>> {
let mut actions = Vec::new();
while let Some(action) = self.read_action()? {
actions.push(action);
}
Ok(actions)
}
pub fn read_action(&mut self) -> Result<Option<Action>> {
use num_traits::FromPrimitive;
let (opcode, length) = self.read_opcode_and_length()?;
let mut action_reader = Reader::new(self.inner.by_ref().take(length as u64), self.version);
let action = if let Some(op) = OpCode::from_u8(opcode) {
match op {
OpCode::End => return Ok(None),
OpCode::Add => Action::Add,
OpCode::Add2 => Action::Add2,
OpCode::And => Action::And,
OpCode::AsciiToChar => Action::AsciiToChar,
OpCode::BitAnd => Action::BitAnd,
OpCode::BitLShift => Action::BitLShift,
OpCode::BitOr => Action::BitOr,
OpCode::BitRShift => Action::BitRShift,
OpCode::BitURShift => Action::BitURShift,
OpCode::BitXor => Action::BitXor,
OpCode::Call => Action::Call,
OpCode::CallFunction => Action::CallFunction,
OpCode::CallMethod => Action::CallMethod,
OpCode::CastOp => Action::CastOp,
OpCode::CharToAscii => Action::CharToAscii,
OpCode::CloneSprite => Action::CloneSprite,
OpCode::ConstantPool => {
let mut constants = vec![];
for _ in 0..action_reader.read_u16()? {
constants.push(action_reader.read_c_string()?);
}
Action::ConstantPool(constants)
}
OpCode::Decrement => Action::Decrement,
OpCode::DefineFunction => action_reader.read_define_function()?,
OpCode::DefineFunction2 => action_reader.read_define_function_2()?,
OpCode::DefineLocal => Action::DefineLocal,
OpCode::DefineLocal2 => Action::DefineLocal2,
OpCode::Delete => Action::Delete,
OpCode::Delete2 => Action::Delete2,
OpCode::Divide => Action::Divide,
OpCode::EndDrag => Action::EndDrag,
OpCode::Enumerate => Action::Enumerate,
OpCode::Enumerate2 => Action::Enumerate2,
OpCode::Equals => Action::Equals,
OpCode::Equals2 => Action::Equals2,
OpCode::Extends => Action::Extends,
OpCode::GetMember => Action::GetMember,
OpCode::GetProperty => Action::GetProperty,
OpCode::GetTime => Action::GetTime,
OpCode::GetUrl => Action::GetUrl {
url: action_reader.read_c_string()?,
target: action_reader.read_c_string()?,
},
OpCode::GetUrl2 => {
let flags = action_reader.read_u8()?;
Action::GetUrl2 {
is_target_sprite: flags & 0b10 != 0,
is_load_vars: flags & 0b1 != 0,
send_vars_method: match flags >> 6 {
0 => SendVarsMethod::None,
1 => SendVarsMethod::Get,
2 => SendVarsMethod::Post,
_ => {
return Err(Error::new(
ErrorKind::InvalidData,
"Invalid HTTP method in ActionGetUrl2",
))
}
},
}
}
OpCode::GetVariable => Action::GetVariable,
OpCode::GotoFrame => {
let frame = action_reader.read_u16()?;
Action::GotoFrame(frame)
}
OpCode::GotoFrame2 => {
let flags = action_reader.read_u8()?;
Action::GotoFrame2 {
set_playing: flags & 0b1 != 0,
scene_offset: if flags & 0b10 != 0 {
action_reader.read_u16()?
} else {
0
},
}
}
OpCode::GotoLabel => Action::GotoLabel(action_reader.read_c_string()?),
OpCode::Greater => Action::Greater,
OpCode::If => Action::If {
offset: action_reader.read_i16()?,
},
OpCode::ImplementsOp => Action::ImplementsOp,
OpCode::Increment => Action::Increment,
OpCode::InitArray => Action::InitArray,
OpCode::InitObject => Action::InitObject,
OpCode::InstanceOf => Action::InstanceOf,
OpCode::Jump => Action::Jump {
offset: action_reader.read_i16()?,
},
OpCode::Less => Action::Less,
OpCode::Less2 => Action::Less2,
OpCode::MBAsciiToChar => Action::MBAsciiToChar,
OpCode::MBCharToAscii => Action::MBCharToAscii,
OpCode::MBStringExtract => Action::MBStringExtract,
OpCode::MBStringLength => Action::MBStringLength,
OpCode::Modulo => Action::Modulo,
OpCode::Multiply => Action::Multiply,
OpCode::NewMethod => Action::NewMethod,
OpCode::NewObject => Action::NewObject,
OpCode::NextFrame => Action::NextFrame,
OpCode::Not => Action::Not,
OpCode::Or => Action::Or,
OpCode::Play => Action::Play,
OpCode::Pop => Action::Pop,
OpCode::PreviousFrame => Action::PreviousFrame,
// TODO: Verify correct version for complex types.
OpCode::Push => {
let mut values = vec![];
while let Ok(value) = action_reader.read_push_value() {
values.push(value);
}
Action::Push(values)
}
OpCode::PushDuplicate => Action::PushDuplicate,
OpCode::RandomNumber => Action::RandomNumber,
OpCode::RemoveSprite => Action::RemoveSprite,
OpCode::Return => Action::Return,
OpCode::SetMember => Action::SetMember,
OpCode::SetProperty => Action::SetProperty,
OpCode::SetTarget => Action::SetTarget(action_reader.read_c_string()?),
OpCode::SetTarget2 => Action::SetTarget2,
OpCode::SetVariable => Action::SetVariable,
OpCode::StackSwap => Action::StackSwap,
OpCode::StartDrag => Action::StartDrag,
OpCode::Stop => Action::Stop,
OpCode::StopSounds => Action::StopSounds,
OpCode::StoreRegister => Action::StoreRegister(action_reader.read_u8()?),
OpCode::StrictEquals => Action::StrictEquals,
OpCode::StringAdd => Action::StringAdd,
OpCode::StringEquals => Action::StringEquals,
OpCode::StringExtract => Action::StringExtract,
OpCode::StringGreater => Action::StringGreater,
OpCode::StringLength => Action::StringLength,
OpCode::StringLess => Action::StringLess,
OpCode::Subtract => Action::Subtract,
OpCode::TargetPath => Action::TargetPath,
OpCode::Throw => Action::Throw,
OpCode::ToggleQuality => Action::ToggleQuality,
OpCode::ToInteger => Action::ToInteger,
OpCode::ToNumber => Action::ToNumber,
OpCode::ToString => Action::ToString,
OpCode::Trace => Action::Trace,
OpCode::Try => action_reader.read_try()?,
OpCode::TypeOf => Action::TypeOf,
OpCode::WaitForFrame => Action::WaitForFrame {
frame: action_reader.read_u16()?,
num_actions_to_skip: action_reader.read_u8()?,
},
OpCode::With => {
let code_length = action_reader.read_u16()?;
let mut with_reader = Reader::new(
(&mut action_reader.inner as &mut dyn Read).take(code_length.into()),
self.version,
);
Action::With {
actions: with_reader.read_action_list()?,
}
}
OpCode::WaitForFrame2 => Action::WaitForFrame2 {
num_actions_to_skip: action_reader.read_u8()?,
},
}
} else {
action_reader.read_unknown_action(opcode, length)?
};
Ok(Some(action))
}
pub fn read_opcode_and_length(&mut self) -> Result<(u8, usize)> {
let opcode = self.read_u8()?;
let length = if opcode >= 0x80 {
self.read_u16()? as usize
} else {
0
};
Ok((opcode, length))
}
fn read_unknown_action(&mut self, opcode: u8, length: usize) -> Result<Action> {
let mut data = vec![0u8; length];
self.inner.read_exact(&mut data)?;
Ok(Action::Unknown { opcode, data })
}
fn read_push_value(&mut self) -> Result<Value> {
let value = match self.read_u8()? {
0 => Value::Str(self.read_c_string()?),
1 => Value::Float(self.read_f32()?),
2 => Value::Null,
3 => Value::Undefined,
4 => Value::Register(self.read_u8()?),
5 => Value::Bool(self.read_u8()? != 0),
6 => Value::Double(self.read_f64()?),
7 => Value::Int(self.read_i32()?),
8 => Value::ConstantPool(self.read_u8()?.into()),
9 => Value::ConstantPool(self.read_u16()?),
_ => {
return Err(Error::new(
ErrorKind::InvalidData,
"Invalid value type in ActionPush",
))
}
};
Ok(value)
}
fn read_define_function(&mut self) -> Result<Action> {
let name = self.read_c_string()?;
let num_params = self.read_u16()?;
let mut params = Vec::with_capacity(num_params as usize);
for _ in 0..num_params {
params.push(self.read_c_string()?);
}
let code_length = self.read_u16()?;
let mut fn_reader = Reader::new(
(&mut self.inner as &mut dyn Read).take(code_length.into()),
self.version,
);
Ok(Action::DefineFunction {
name,
params,
actions: fn_reader.read_action_list()?,
})
}
fn read_define_function_2(&mut self) -> Result<Action> {
let name = self.read_c_string()?;
let num_params = self.read_u16()?;
let num_registers = self.read_u8()?; // Number of registers
let flags = self.read_u16()?;
let mut params = Vec::with_capacity(num_params as usize + num_registers as usize);
for _ in 0..num_params {
let register = self.read_u8()?;
params.push(FunctionParam {
name: self.read_c_string()?,
register_index: if register == 0 { None } else { Some(register) },
});
}
let code_length = self.read_u16()?;
let mut fn_reader = Reader::new(
(&mut self.inner as &mut dyn Read).take(code_length.into()),
self.version,
);
Ok(Action::DefineFunction2(Function {
name,
params,
preload_global: flags & 0b1_00000000 != 0,
preload_parent: flags & 0b10000000 != 0,
preload_root: flags & 0b1000000 != 0,
suppress_super: flags & 0b100000 != 0,
preload_super: flags & 0b10000 != 0,
suppress_arguments: flags & 0b1000 != 0,
preload_arguments: flags & 0b100 != 0,
suppress_this: flags & 0b10 != 0,
preload_this: flags & 0b1 != 0,
actions: fn_reader.read_action_list()?,
}))
}
fn read_try(&mut self) -> Result<Action> {
let flags = self.read_u8()?;
let try_length = self.read_u16()?;
let catch_length = self.read_u16()?;
let finally_length = self.read_u16()?;
let catch_var = if flags & 0b100 != 0 {
CatchVar::Var(self.read_c_string()?)
} else {
CatchVar::Register(self.read_u8()?)
};
let try_actions = {
let mut fn_reader = Reader::new(
(&mut self.inner as &mut dyn Read).take(try_length.into()),
self.version,
);
fn_reader.read_action_list()?
};
let catch_actions = {
let mut fn_reader = Reader::new(
(&mut self.inner as &mut dyn Read).take(catch_length.into()),
self.version,
);
fn_reader.read_action_list()?
};
let finally_actions = {
let mut fn_reader = Reader::new(
(&mut self.inner as &mut dyn Read).take(finally_length.into()),
self.version,
);
fn_reader.read_action_list()?
};
Ok(Action::Try(TryBlock {
try_actions,
catch: if flags & 0b1 != 0 {
Some((catch_var, catch_actions))
} else {
None
},
finally: if flags & 0b10 != 0 {
Some(finally_actions)
} else {
None
},
}))
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::test_data;
#[test]
fn read_action() {
for (swf_version, expected_action, action_bytes) in test_data::avm1_tests() {
let mut reader = Reader::new(&action_bytes[..], swf_version);
let parsed_action = reader.read_action().unwrap().unwrap();
if parsed_action != expected_action {
// Failed, result doesn't match.
panic!(
"Incorrectly parsed action.\nRead:\n{:?}\n\nExpected:\n{:?}",
parsed_action, expected_action
);
}
}
}
}
| 40.591398 | 99 | 0.492781 |
4877cf367ccf623b39939b3ad0bdb5bf47baf7d1
| 81 |
//! Data package support.
pub mod common;
pub mod json_schema;
pub mod sealing;
| 13.5 | 25 | 0.740741 |
f81c7fb1df062bf51b205b72d071f572fc89acde
| 7,305 |
//! Internal api for initializing big arrays.
//!
//! For public api see [`Array`]'s [`try_unfold`], [`unfold`], [`try_from_fn`],
//! [`from_fn`] and [`from_iter`].
//!
//! [`Array`]: crate::Array
//! [`try_unfold`]: crate::Array::try_unfold
//! [`unfold`]: crate::Array::unfold
//! [`try_from_fn`]: crate::Array::try_from_fn
//! [`from_fn`]: crate::Array::from_fn
//! [`from_iter`]: crate::Array::from_iter
use core::{
convert::Infallible,
mem::{self, MaybeUninit},
ptr,
};
use crate::{Array, ArrayShorthand, MaybeUninitSlice};
#[inline]
pub(crate) fn array_init_fn<Arr, F>(mut init: F) -> Arr
where
Arr: Array,
Arr::Item: Sized,
F: FnMut(usize) -> Arr::Item,
{
try_array_init_fn(|i| Ok::<_, Infallible>(init(i))).unwrap_or_else(|inf| match inf {})
}
#[inline]
pub(crate) fn array_init_iter<Arr, I>(mut iter: I) -> Option<Arr>
where
Arr: Array,
Arr::Item: Sized,
I: Iterator<Item = Arr::Item>,
{
try_unfold_array((), |_| iter.next().ok_or(())).ok()
}
#[inline]
pub(crate) fn try_array_init_fn<Arr, Err, F>(mut f: F) -> Result<Arr, Err>
where
Arr: Array,
Arr::Item: Sized,
F: FnMut(usize) -> Result<Arr::Item, Err>,
{
try_unfold_array(0usize, |state| {
let item = f(*state);
*state += 1;
item
})
}
#[inline]
pub(crate) fn unfold_array<Arr, St, F>(init: St, mut f: F) -> Arr
where
Arr: Array,
F: FnMut(&mut St) -> Arr::Item,
{
try_unfold_array(init, |state| Ok::<_, Infallible>(f(state))).unwrap_or_else(|inf| match inf {})
}
#[inline]
pub(crate) fn try_unfold_array<Arr, St, F, E>(init: St, mut f: F) -> Result<Arr, E>
where
Arr: Array,
// It's better to use `Try` here, instead of `Result` but it's unstable
F: FnMut(&mut St) -> Result<Arr::Item, E>,
{
let mut array = Arr::uninit();
let mut state = init;
if !mem::needs_drop::<Arr::Item>() {
for hole in array.iter_mut() {
// If `init` panics/fails nothing really happen: panic/fail just go up
// (Item doesn't need drop, so there are no leaks and everything is ok')
*hole = MaybeUninit::new(f(&mut state)?)
}
} else {
// Item needs drop, so things came up a bit tricky
/// Guard that runs drop's of initialized elements on panic or early
/// return.
///
/// This struct is private to this function, because of the unsafe code
/// in it's `Drop` impl, which is sound only if:
/// - all elements of `self.arr[..self.initialized]` are initialized...
/// - elements behind `arr` slice aren't used after `DropGuard` is
/// dropped
/// - ...so it must be sound to drop these elements using
/// `ptr::drop_in_place`
struct DropGuard<'a, Item> {
arr: &'a mut [MaybeUninit<Item>],
initialized: usize,
}
impl<Item> Drop for DropGuard<'_, Item> {
fn drop(&mut self) {
// ## Safety
//
// The contract of the struct guarantees that this is sound
unsafe {
let inited: &mut [Item] = self.arr[..self.initialized].assume_init_mut();
// drop initialized elements
ptr::drop_in_place(inited);
}
}
}
// If the `f(&mut state)?` call panics or fails, `guard` is dropped,
// thus dropping `array[..initialized]` => no memory leak!
//
// ## Safety
//
// By construction, `array[..initialized]` only contains
// init elements, thus there is no risk of dropping uninit data.
let mut guard = DropGuard {
arr: array.as_mut_slice(),
initialized: 0,
};
// We need `guard` to hold unique reference to the `array`,
// so we can't access `array` directly
for (i, hole) in guard.arr.iter_mut().enumerate() {
// Invariant: `i` elements have already been initialized
guard.initialized = i;
// If `f(&mut state)?` panics or fails, `guard` is dropped, thus
// dropping the elements in `array[..i]`
*hole = MaybeUninit::new(f(&mut state)?);
}
// Next lines return array from the function, so if `panic_guard` will be
// dropped it could cause "use after free".
mem::forget(guard);
}
// don't be fooled by this unsafe{} block, all the magic is in the previous
// if/else.
unsafe {
// ## Safety
//
// We already initialized all elements of the array
Ok(Arr::assume_init(array))
}
}
#[cfg(test)]
mod tests {
// I just don't want to think about ordering
#![allow(clippy::mutex_atomic)]
use core::convert::TryFrom;
use std::sync::Mutex;
use super::{array_init_fn, try_array_init_fn};
use crate::util::init::array_init_iter;
/// Add `1` to mutex on drop
#[derive(Debug)]
struct DropCount<'a>(&'a Mutex<usize>);
impl<'a> Drop for DropCount<'a> {
fn drop(&mut self) {
let mut guard = self.0.lock().unwrap();
*guard += 1;
}
}
#[test]
fn copy() {
let arr: [i32; 32] = super::try_array_init_fn(i32::try_from).unwrap();
assert_eq!(
arr,
[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 31,
]
);
}
#[test]
fn drop() {
struct HasDrop;
impl Drop for HasDrop {
fn drop(&mut self) {}
}
let _: [HasDrop; 16] = array_init_fn(|_| HasDrop);
}
#[test]
fn drop_on_panic() {
let counter = Mutex::new(0);
let r = std::panic::catch_unwind(|| {
let _: [DropCount; 16] = array_init_fn(|i| {
if i == 10 {
panic!()
} else {
DropCount(&counter)
}
});
});
assert!(r.is_err());
assert_eq!(*counter.lock().unwrap(), 10);
}
#[test]
fn drop_on_fail() {
let counter = Mutex::new(0);
let r: Result<[DropCount; 16], ()> = try_array_init_fn(|i| {
if i == 10 {
Err(())
} else {
Ok(DropCount(&counter))
}
});
assert!(r.is_err());
assert_eq!(*counter.lock().unwrap(), 10);
}
#[test]
fn zst() {
let _: [(); 65536] = array_init_fn(|_| ());
}
#[test]
fn the_biggest() {
let _: [usize; 16384] = array_init_fn(|i| i);
}
#[test]
fn iter_equal_len() {
let mut vec = vec![0, 1, 2, 3, 4];
let arr: [i32; 5] = array_init_iter(vec.drain(..)).unwrap();
assert_eq!(arr, [0, 1, 2, 3, 4]);
}
#[test]
fn iter_greater_len() {
let mut vec = vec![0, 1, 2, 3, 4, 5, 6, 7, 8];
let arr: [i32; 5] = array_init_iter(vec.drain(..)).unwrap();
assert_eq!(arr, [0, 1, 2, 3, 4]);
}
#[test]
fn iter_less_len() {
let mut vec = vec![0, 1, 2];
let arr: Option<[i32; 5]> = array_init_iter(vec.drain(..));
assert_eq!(arr, None);
}
}
| 27.881679 | 100 | 0.519644 |
9188cb22983dd57312140dab80923ec0b0d14ced
| 50,926 |
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! This module translates the bytecode of a module to Boogie code.
use std::collections::BTreeMap;
use itertools::Itertools;
#[allow(unused_imports)]
use log::{debug, info, log, warn, Level};
use bytecode::{
function_target::FunctionTarget,
function_target_pipeline::FunctionTargetsHolder,
stackless_bytecode::{BorrowNode, Bytecode, Constant, Operation},
verification_analysis,
};
use move_model::{
code_writer::CodeWriter,
emit, emitln,
model::{GlobalEnv, ModuleEnv, StructEnv, TypeParameter},
pragmas::{ADDITION_OVERFLOW_UNCHECKED_PRAGMA, SEED_PRAGMA, TIMEOUT_PRAGMA},
ty::{PrimitiveType, Type},
};
use crate::{
boogie_helpers::{
boogie_byte_blob, boogie_debug_track_abort, boogie_debug_track_local,
boogie_debug_track_return, boogie_field_name, boogie_function_name, boogie_local_type,
boogie_modifies_memory_name, boogie_resource_memory_name, boogie_struct_name,
boogie_type_value, boogie_type_value_array, boogie_type_values, boogie_well_formed_check,
},
options::BoogieOptions,
spec_translator::SpecTranslator,
};
use bytecode::{
function_target_pipeline::FunctionVariant,
stackless_bytecode::{AbortAction, PropKind},
};
use codespan::LineIndex;
use move_model::{ast::TempIndex, model::Loc};
pub struct BoogieTranslator<'env> {
env: &'env GlobalEnv,
options: &'env BoogieOptions,
writer: &'env CodeWriter,
targets: &'env FunctionTargetsHolder,
}
pub struct ModuleTranslator<'env> {
writer: &'env CodeWriter,
module_env: ModuleEnv<'env>,
spec_translator: SpecTranslator<'env>,
options: &'env BoogieOptions,
targets: &'env FunctionTargetsHolder,
}
impl<'env> BoogieTranslator<'env> {
pub fn new(
env: &'env GlobalEnv,
options: &'env BoogieOptions,
targets: &'env FunctionTargetsHolder,
writer: &'env CodeWriter,
) -> Self {
Self {
env,
options,
targets,
writer,
}
}
pub fn translate(&mut self) {
// generate definitions for all modules.
for module_env in self.env.get_modules() {
ModuleTranslator::new(self, module_env).translate();
}
}
}
impl<'env> ModuleTranslator<'env> {
/// Creates a new module translator.
fn new(parent: &'env BoogieTranslator, module: ModuleEnv<'env>) -> Self {
Self {
writer: parent.writer,
options: parent.options,
module_env: module,
spec_translator: SpecTranslator::new(parent.writer, &parent.env, parent.options),
targets: &parent.targets,
}
}
/// Translates this module.
fn translate(&mut self) {
log!(
if !self.module_env.is_target() {
Level::Debug
} else {
Level::Info
},
"translating module {}",
self.module_env
.get_name()
.display(self.module_env.symbol_pool())
);
self.writer
.set_location(&self.module_env.env.internal_loc());
self.spec_translator.translate_spec_vars(&self.module_env);
self.spec_translator.translate_spec_funs(&self.module_env);
self.translate_structs();
self.translate_functions();
}
/// Translates all structs in the module.
fn translate_structs(&self) {
emitln!(
self.writer,
"\n\n// ** structs of module {}\n",
self.module_env
.get_name()
.display(self.module_env.symbol_pool())
);
for struct_env in self.module_env.get_structs() {
// Set the location to internal so we don't see locations of pack/unpack
// in execution traces.
self.writer
.set_location(&self.module_env.env.internal_loc());
self.translate_struct_type(&struct_env);
}
}
/// Translates the given struct.
fn translate_struct_type(&self, struct_env: &StructEnv<'_>) {
// Emit TypeName
let struct_name = boogie_struct_name(&struct_env);
emitln!(self.writer, "const unique {}: $TypeName;", struct_name);
// Emit FieldNames
for (i, field_env) in struct_env.get_fields().enumerate() {
let field_name = boogie_field_name(&field_env);
emitln!(
self.writer,
"const {}: $FieldName;\naxiom {} == {};",
field_name,
field_name,
i
);
}
// Emit TypeValue constructor function.
let type_params = struct_env
.get_type_parameters()
.iter()
.enumerate()
.map(|(i, _)| format!("$tv{}: $TypeValue", i))
.join(", ");
let type_args = struct_env
.get_type_parameters()
.iter()
.enumerate()
.map(|(i, _)| Type::TypeParameter(i as u16))
.collect_vec();
let type_args_array = boogie_type_value_array(struct_env.module_env.env, &type_args);
let type_value = format!("$StructType({}, {})", struct_name, type_args_array);
emitln!(
self.writer,
"function {}_type_value({}): $TypeValue {{\n {}\n}}",
struct_name,
type_params,
type_value
);
// Emit memory variable.
if struct_env.is_resource() {
let memory_name = boogie_resource_memory_name(
struct_env.module_env.env,
struct_env.get_qualified_id(),
&None,
);
emitln!(self.writer, "var {}: $Memory;", memory_name);
}
// Emit type assumption function.
self.spec_translator
.translate_assume_well_formed(&struct_env);
}
/// Translates all functions in the module.
fn translate_functions(&self) {
emitln!(
self.writer,
"\n\n// ** functions of module {}\n",
self.module_env
.get_name()
.display(self.module_env.symbol_pool())
);
for func_env in self.module_env.get_functions() {
if func_env.is_native() || func_env.is_intrinsic() {
continue;
}
let verification_info =
verification_analysis::get_info(&self.targets.get_annotated_target(&func_env));
for variant in self.targets.get_target_variants(&func_env) {
if verification_info.verified && variant == FunctionVariant::Verification
|| verification_info.inlined && variant == FunctionVariant::Baseline
{
self.translate_function(variant, &self.targets.get_target(&func_env, variant));
}
}
}
}
}
impl<'env> ModuleTranslator<'env> {
/// Translates the given function.
fn translate_function(&self, variant: FunctionVariant, fun_target: &FunctionTarget<'_>) {
self.generate_function_sig(variant, &fun_target);
self.generate_function_body(variant, &fun_target);
emitln!(self.writer);
}
/// Return a string for a boogie procedure header. Use inline attribute and name
/// suffix as indicated by `entry_point`.
fn generate_function_sig(&self, variant: FunctionVariant, fun_target: &FunctionTarget<'_>) {
let (args, rets) = self.generate_function_args_and_returns(fun_target);
let (suffix, attribs) = match variant {
FunctionVariant::Baseline => ("", "{:inline 1} ".to_string()),
FunctionVariant::Verification => {
let timeout = fun_target
.func_env
.get_num_pragma(TIMEOUT_PRAGMA, || self.options.vc_timeout);
let attribs = if fun_target.func_env.is_num_pragma_set(SEED_PRAGMA) {
let seed = fun_target
.func_env
.get_num_pragma(SEED_PRAGMA, || self.options.random_seed);
format!("{{:timeLimit {}}} {{:random_seed {}}} ", timeout, seed)
} else {
format!("{{:timeLimit {}}} ", timeout)
};
("$verify", attribs)
}
};
self.writer.set_location(&fun_target.get_loc());
emitln!(
self.writer,
"procedure {}{}{}({}) returns ({})",
attribs,
boogie_function_name(fun_target.func_env),
suffix,
args,
rets,
)
}
/// Generate boogie representation of function args and return args.
fn generate_function_args_and_returns(
&self,
fun_target: &FunctionTarget<'_>,
) -> (String, String) {
let args = fun_target
.get_type_parameters()
.iter()
.map(|TypeParameter(s, _)| {
format!("{}: $TypeValue", s.display(fun_target.symbol_pool()))
})
.chain((0..fun_target.get_parameter_count()).map(|i| {
let ty = fun_target.get_local_type(i);
// Boogie does not allow to assign to parameters, so we need to proxy them.
let prefix = if self.parameter_needs_to_be_mutable(fun_target, i) {
"_$"
} else {
"$"
};
format!("{}t{}: {}", prefix, i, boogie_local_type(ty))
}))
.join(", ");
let rets = fun_target
.get_return_types()
.iter()
.enumerate()
.map(|(i, ref s)| format!("$ret{}: {}", i, boogie_local_type(s)))
.join(", ");
(args, rets)
}
/// Generates boogie implementation body.
fn generate_function_body(&self, variant: FunctionVariant, fun_target: &FunctionTarget<'_>) {
// Be sure to set back location to the whole function definition as a default.
self.writer.set_location(&fun_target.get_loc().at_start());
emitln!(self.writer, "{");
self.writer.indent();
// Generate local variable declarations. They need to appear first in boogie.
emitln!(self.writer, "// declare local variables");
let num_args = fun_target.get_parameter_count();
for i in num_args..fun_target.get_local_count() {
let local_type = fun_target.get_local_type(i);
emitln!(
self.writer,
"var $t{}: {}; // {}",
i,
boogie_local_type(local_type),
boogie_type_value(self.module_env.env, local_type)
);
}
// Generate declarations for renamed parameters.
let proxied_parameters = self.get_mutable_parameters(fun_target);
for (idx, ty) in &proxied_parameters {
emitln!(self.writer, "var $t{}: {};", idx, boogie_local_type(ty));
}
// Generate declarations for modifies condition.
fun_target.get_modify_targets().keys().for_each(|ty| {
emitln!(
self.writer,
"var {}: {}",
boogie_modifies_memory_name(fun_target.global_env(), *ty),
"[$TypeValueArray, int]bool;"
);
});
// Declare temporaries for debug tracing.
emitln!(self.writer, "var $trace_abort_temp: int;");
emitln!(self.writer, "var $trace_local_temp: $Value;");
// Generate memory snapshot variable declarations.
let code = fun_target.get_bytecode();
let labels = code
.iter()
.filter_map(|bc| {
use Bytecode::*;
match bc {
SaveMem(_, lab, mem) => Some((lab, mem)),
SaveSpecVar(..) => panic!("spec var memory snapshots NYI"),
_ => None,
}
})
.collect::<BTreeMap<_, _>>();
for (lab, mem) in labels {
let name = boogie_resource_memory_name(self.module_env.env, *mem, &Some(*lab));
emitln!(self.writer, "var {}: $Memory;", name);
}
// Initialize renamed parameters.
for (idx, _) in proxied_parameters {
emitln!(self.writer, "$t{} := _$t{};", idx, idx);
}
// Initial assumptions
if variant == FunctionVariant::Verification {
self.translate_verify_entry_assumptions(fun_target);
}
// Generate bytecode
emitln!(self.writer, "\n// bytecode translation starts here");
let mut last_tracked_loc = None;
for bytecode in code.iter() {
self.translate_bytecode(fun_target, &mut last_tracked_loc, bytecode);
}
self.writer.unindent();
emitln!(self.writer, "}");
}
fn get_mutable_parameters(&self, fun_target: &FunctionTarget<'_>) -> Vec<(TempIndex, Type)> {
(0..fun_target.get_parameter_count())
.filter_map(|i| {
if self.parameter_needs_to_be_mutable(fun_target, i) {
Some((i, fun_target.get_local_type(i).clone()))
} else {
None
}
})
.collect_vec()
}
/// Determines whether the parameter of a function needs to be mutable.
/// Boogie does not allow to assign to procedure parameters. In some cases
/// (e.g. for memory instrumentation, but also as a result of copy propagation),
/// we may need to assign to parameters.
fn parameter_needs_to_be_mutable(
&self,
_fun_target: &FunctionTarget<'_>,
_idx: TempIndex,
) -> bool {
// For now, we just always say true. This could be optimized because the actual (known
// so far) sources for mutability are parameters which are used in WriteBack(LocalRoot(p))
// position.
true
}
fn translate_verify_entry_assumptions(&self, fun_target: &FunctionTarget<'_>) {
emitln!(self.writer, "\n// verification entrypoint assumptions");
// Prelude initialization
emitln!(self.writer, "call $InitVerification();");
// Assume reference parameters to be based on the Param(i) Location, ensuring
// they are disjoint from all other references. This prevents aliasing and is justified as
// follows:
// - for mutual references, by their exclusive access in Move.
// - for immutable references, by that mutation is not possible, and they are equivalent
// to some given but arbitrary value.
for i in 0..fun_target.get_parameter_count() {
let ty = fun_target.get_local_type(i);
if ty.is_reference() {
emitln!(self.writer, "assume l#$Mutation($t{}) == $Param({});", i, i);
emitln!(self.writer, "assume size#Path(p#$Mutation($t{})) == 0;", i);
}
}
// Initialize modify permissions.
self.initialize_modifies_permissions(fun_target);
}
/// Initializes modifies permissions.
fn initialize_modifies_permissions(&self, fun_target: &FunctionTarget<'_>) {
let env = fun_target.global_env();
for (ty, targets) in fun_target.get_modify_targets() {
emit!(
self.writer,
"{} := {}",
boogie_modifies_memory_name(fun_target.global_env(), *ty),
"$ConstMemoryDomain(false)"
);
for target in targets {
let node_id = target.node_id();
let args = target.call_args();
let rty = &env.get_node_instantiation(node_id)[0];
let (_, _, targs) = rty.require_struct();
let type_args = boogie_type_value_array(env, targs);
emit!(self.writer, "[{}, a#$Address(", type_args);
self.spec_translator.translate(&args[0]);
emit!(self.writer, ") := true]");
}
emitln!(self.writer, ";");
}
}
/// Translates one bytecode instruction.
fn translate_bytecode(
&'env self,
fun_target: &FunctionTarget<'_>,
last_tracked_loc: &mut Option<(Loc, LineIndex)>,
bytecode: &Bytecode,
) {
use Bytecode::*;
// Set location of this code in the CodeWriter.
let attr_id = bytecode.get_attr_id();
let loc = fun_target.get_bytecode_loc(attr_id);
self.writer.set_location(&loc);
// Print location.
emitln!(
self.writer,
"// {} {}",
bytecode.display(fun_target, &BTreeMap::default()),
loc.display(self.module_env.env)
);
// Print debug comments.
if let Some(comment) = fun_target.get_debug_comment(attr_id) {
emitln!(self.writer, "// {}", comment);
}
// Track location for execution traces.
if matches!(bytecode, Call(_, _, Operation::TraceAbort, ..)) {
// Ensure that aborts always has the precise location instead of the
// line-approximated one
*last_tracked_loc = None;
}
self.track_loc(fun_target, last_tracked_loc, &loc);
// Helper function to get a a string for a local
let str_local = |idx: usize| format!("$t{}", idx);
// Translate the bytecode instruction.
match bytecode {
SpecBlock(..) => panic!("deprecated"),
SaveMem(_, label, mem) => {
let snapshot =
boogie_resource_memory_name(self.module_env.env, *mem, &Some(*label));
let current = boogie_resource_memory_name(self.module_env.env, *mem, &None);
emitln!(self.writer, "{} := {};", snapshot, current);
}
SaveSpecVar(_, _label, _var) => {
panic!("spec var snapshot NYI")
}
Prop(id, kind, exp) => match kind {
PropKind::Assert => {
emit!(self.writer, "assert ");
let info = fun_target
.get_vc_info(*id)
.map(|s| s.as_str())
.unwrap_or("unknown assertion failed");
emit!(
self.writer,
"{{:msg \"assert_failed{}: {}\"}} ",
self.loc_str(&loc),
info
);
self.spec_translator.translate_unboxed(exp);
emitln!(self.writer, ";");
}
PropKind::Assume => {
emit!(self.writer, "assume ");
self.spec_translator.translate_unboxed(exp);
emitln!(self.writer, ";");
}
PropKind::Modifies => {
let ty = self.module_env.env.get_node_type(exp.node_id());
let (mid, sid, type_args) = ty.require_struct();
let boogie_mem =
boogie_resource_memory_name(self.module_env.env, mid.qualified(sid), &None);
let boogie_type_args = boogie_type_value_array(self.module_env.env, type_args);
emit!(
self.writer,
"call {} := $Modifies({}, {}, ",
boogie_mem,
boogie_mem,
boogie_type_args
);
self.spec_translator.translate_unboxed(&exp.call_args()[0]);
emitln!(self.writer, ");");
}
},
Label(_, label) => {
self.writer.unindent();
emitln!(self.writer, "L{}:", label.as_usize());
self.writer.indent();
}
Jump(_, target) => emitln!(self.writer, "goto L{};", target.as_usize()),
Branch(_, then_target, else_target, idx) => emitln!(
self.writer,
"if (b#$Boolean({})) {{ goto L{}; }} else {{ goto L{}; }}",
str_local(*idx),
then_target.as_usize(),
else_target.as_usize(),
),
Assign(_, dest, src, _) => {
if fun_target.get_local_type(*dest).is_reference() {
emitln!(
self.writer,
"call {} := $CopyOrMoveRef({});",
str_local(*dest),
str_local(*src)
);
} else {
emitln!(
self.writer,
"call {} := $CopyOrMoveValue({});",
str_local(*dest),
str_local(*src)
);
}
}
Ret(_, rets) => {
for (i, r) in rets.iter().enumerate() {
emitln!(self.writer, "$ret{} := {};", i, str_local(*r));
}
emitln!(self.writer, "return;");
}
Load(_, idx, c) => {
let value = match c {
Constant::Bool(true) => "$Boolean(true)".to_string(),
Constant::Bool(false) => "$Boolean(false)".to_string(),
Constant::U8(num) => format!("$Integer({})", num),
Constant::U64(num) => format!("$Integer({})", num),
Constant::U128(num) => format!("$Integer({})", num),
Constant::Address(val) => format!("$Address({})", val),
Constant::ByteArray(val) => {
format!("$Vector({})", boogie_byte_blob(self.options, val))
}
};
emitln!(self.writer, "{} := {};", str_local(*idx), value);
}
Call(_, dests, oper, srcs, aa) => {
use Operation::*;
match oper {
FreezeRef => unreachable!(),
UnpackRef | UnpackRefDeep | PackRef | PackRefDeep => {
// No effect
}
WriteBack(dest, _) => {
use BorrowNode::*;
let src = srcs[0];
match dest {
GlobalRoot(struct_decl) => {
let memory = struct_decl.module_id.qualified(struct_decl.id);
let memory_name = boogie_resource_memory_name(
fun_target.global_env(),
memory,
&None,
);
emitln!(
self.writer,
"call {} := $WritebackToGlobal({}, {});",
memory_name,
memory_name,
str_local(src),
);
}
LocalRoot(idx) => {
emitln!(
self.writer,
"call {} := $WritebackToValue({}, {}, {});",
str_local(*idx),
str_local(src),
idx,
str_local(*idx)
);
}
Reference(idx) => {
emitln!(
self.writer,
"call {} := $WritebackToReference({}, {});",
str_local(*idx),
str_local(src),
str_local(*idx)
);
}
}
}
Splice(map) => {
let src = srcs[0];
assert!(!map.is_empty());
emitln!(
self.writer,
"call {} := $Splice{}({}, {});",
str_local(src),
map.len(),
map.iter()
.map(|(pos, idx)| format!("{}, {}", pos, str_local(*idx)))
.join(", "),
str_local(src)
);
}
BorrowLoc => {
let src = srcs[0];
let dest = dests[0];
emitln!(
self.writer,
"call {} := $BorrowLoc({}, {});",
str_local(dest),
src,
str_local(src)
);
}
ReadRef => {
let src = srcs[0];
let dest = dests[0];
emitln!(
self.writer,
"call {} := $ReadRef({});",
str_local(dest),
str_local(src)
);
}
WriteRef => {
let reference = srcs[0];
let value = srcs[1];
emitln!(
self.writer,
"call {} := $WriteRef({}, {});",
str_local(reference),
str_local(reference),
str_local(value),
);
}
Function(mid, fid, type_actuals) => {
let callee_env = self.module_env.env.get_module(*mid).into_function(*fid);
let args_str = std::iter::once(boogie_type_values(
fun_target.func_env.module_env.env,
type_actuals,
))
.chain(srcs.iter().map(|arg_idx| str_local(*arg_idx)))
.filter(|s| !s.is_empty())
.join(", ");
let dest_str = dests.iter().map(|dest_idx| str_local(*dest_idx)).join(", ");
if dest_str.is_empty() {
emitln!(
self.writer,
"call {}({});",
boogie_function_name(&callee_env),
args_str
);
} else {
emitln!(
self.writer,
"call {} := {}({});",
dest_str,
boogie_function_name(&callee_env),
args_str
);
}
// Clear the last track location after function call, as the call inserted
// location tracks before it returns.
*last_tracked_loc = None;
}
Pack(mid, sid, _type_actuals) => {
let struct_env = fun_target
.func_env
.module_env
.env
.get_module(*mid)
.into_struct(*sid);
let mut ctor_expr = "$MapConstValue($DefaultValue())".to_owned();
for (i, field_env) in struct_env.get_fields().enumerate() {
ctor_expr = format!(
"{}[{} := {}]",
ctor_expr,
boogie_field_name(&field_env),
str_local(srcs[i])
);
}
emitln!(
self.writer,
"{} := $Vector($ValueArray({}, {}));",
str_local(dests[0]),
ctor_expr,
struct_env.get_field_count()
);
}
Unpack(mid, sid, _type_actuals) => {
let struct_env = fun_target
.func_env
.module_env
.env
.get_module(*mid)
.into_struct(*sid);
for (i, field_env) in struct_env.get_fields().enumerate() {
emitln!(
self.writer,
"{} := $SelectField({}, {});",
str_local(dests[i]),
str_local(srcs[0]),
boogie_field_name(&field_env)
);
let type_check = boogie_well_formed_check(
self.module_env.env,
&str_local(dests[i]),
&field_env.get_type(),
);
emit!(self.writer, &type_check);
}
}
BorrowField(mid, sid, _, field_offset) => {
let src = srcs[0];
let dest = dests[0];
let struct_env = fun_target
.func_env
.module_env
.env
.get_module(*mid)
.into_struct(*sid);
let field_env = &struct_env.get_field_by_offset(*field_offset);
emitln!(
self.writer,
"call {} := $BorrowField({}, {});",
str_local(dest),
str_local(src),
boogie_field_name(field_env)
);
}
GetField(mid, sid, _, field_offset) => {
let src = srcs[0];
let dest = dests[0];
let struct_env = fun_target
.func_env
.module_env
.env
.get_module(*mid)
.into_struct(*sid);
let field_env = &struct_env.get_field_by_offset(*field_offset);
let is_ref = fun_target.get_local_type(src).is_reference();
emitln!(
self.writer,
"call {} := {}({}, {});",
str_local(dest),
if is_ref {
"$GetFieldFromReference"
} else {
"$GetFieldFromValue"
},
str_local(src),
boogie_field_name(field_env)
);
}
Exists(mid, sid, type_actuals) => {
let addr = srcs[0];
let dest = dests[0];
let type_args = boogie_type_value_array(self.module_env.env, type_actuals);
let memory = boogie_resource_memory_name(
self.module_env.env,
mid.qualified(*sid),
&None,
);
emitln!(
self.writer,
"{} := $ResourceExists({}, {}, {});",
str_local(dest),
memory,
type_args,
str_local(addr),
);
}
BorrowGlobal(mid, sid, type_actuals) => {
let addr = srcs[0];
let dest = dests[0];
let type_args = boogie_type_value_array(self.module_env.env, type_actuals);
let addr_name = str_local(addr);
let memory = mid.qualified(*sid);
let memory_name =
boogie_resource_memory_name(self.module_env.env, memory, &None);
emitln!(
self.writer,
"call {} := $BorrowGlobal({}, {}, {});",
str_local(dest),
memory_name,
addr_name,
type_args,
);
}
GetGlobal(mid, sid, type_actuals) => {
let addr = srcs[0];
let dest = dests[0];
let type_args = boogie_type_value_array(self.module_env.env, type_actuals);
let memory = mid.qualified(*sid);
let memory_name =
boogie_resource_memory_name(self.module_env.env, memory, &None);
emitln!(
self.writer,
"call {} := $GetGlobal({}, {}, {});",
str_local(dest),
memory_name,
str_local(addr),
type_args,
);
}
MoveTo(mid, sid, type_actuals) => {
let value = srcs[0];
let signer = srcs[1];
let type_args = boogie_type_value_array(self.module_env.env, type_actuals);
let signer_name = str_local(signer);
let memory = mid.qualified(*sid);
let memory_name =
boogie_resource_memory_name(self.module_env.env, memory, &None);
emitln!(
self.writer,
"call {} := $MoveTo({}, {}, {}, {});",
memory_name,
memory_name,
type_args,
str_local(value),
signer_name,
);
}
MoveFrom(mid, sid, type_actuals) => {
let src = srcs[0];
let dest = dests[0];
let type_args = boogie_type_value_array(self.module_env.env, type_actuals);
let src_name = str_local(src);
let memory = mid.qualified(*sid);
let memory_name =
boogie_resource_memory_name(self.module_env.env, memory, &None);
emitln!(
self.writer,
"call {}, {} := $MoveFrom({}, {}, {});",
memory_name,
str_local(dest),
memory_name,
src_name,
type_args,
);
}
Havoc => {
let dest_str = str_local(dests[0]);
emitln!(self.writer, "havoc {};", dest_str);
let ty = fun_target.get_local_type(dests[0]);
let check = boogie_well_formed_check(self.module_env.env, &dest_str, ty);
if !check.is_empty() {
emitln!(self.writer, &check);
}
}
CastU8 => {
let src = srcs[0];
let dest = dests[0];
emitln!(
self.writer,
"call {} := $CastU8({});",
str_local(dest),
str_local(src)
);
}
CastU64 => {
let src = srcs[0];
let dest = dests[0];
emitln!(
self.writer,
"call {} := $CastU64({});",
str_local(dest),
str_local(src)
);
}
CastU128 => {
let src = srcs[0];
let dest = dests[0];
emitln!(
self.writer,
"call {} := $CastU128({});",
str_local(dest),
str_local(src)
);
}
Not => {
let src = srcs[0];
let dest = dests[0];
emitln!(
self.writer,
"call {} := $Not({});",
str_local(dest),
str_local(src)
);
}
Add => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
let unchecked = if fun_target
.is_pragma_true(ADDITION_OVERFLOW_UNCHECKED_PRAGMA, || false)
{
"_unchecked"
} else {
""
};
let add_type = match fun_target.get_local_type(dest) {
Type::Primitive(PrimitiveType::U8) => "U8".to_string(),
Type::Primitive(PrimitiveType::U64) => format!("U64{}", unchecked),
Type::Primitive(PrimitiveType::U128) => format!("U128{}", unchecked),
_ => unreachable!(),
};
emitln!(
self.writer,
"call {} := $Add{}({}, {});",
str_local(dest),
add_type,
str_local(op1),
str_local(op2)
);
}
Sub => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Sub({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Mul => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
let mul_type = match fun_target.get_local_type(dest) {
Type::Primitive(PrimitiveType::U8) => "U8",
Type::Primitive(PrimitiveType::U64) => "U64",
Type::Primitive(PrimitiveType::U128) => "U128",
_ => unreachable!(),
};
emitln!(
self.writer,
"call {} := $Mul{}({}, {});",
str_local(dest),
mul_type,
str_local(op1),
str_local(op2)
);
}
Div => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Div({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Mod => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Mod({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Shl => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Shl({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Shr => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Shr({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Lt => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Lt({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Gt => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Gt({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Le => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Le({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Ge => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Ge({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Or => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Or({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
And => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $And({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Eq => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"{} := $Boolean($IsEqual({}, {}));",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Neq => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"{} := $Boolean(!$IsEqual({}, {}));",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
BitOr | BitAnd | Xor => {
emitln!(
self.writer,
"// bit operation not supported: {:?}\nassert false;",
bytecode
);
}
Destroy => {}
TraceLocal(idx) => {
self.track_local(fun_target, *idx, srcs[0]);
}
TraceReturn(i) => {
self.track_return(fun_target, *i, srcs[0]);
}
TraceAbort => self.track_abort(fun_target, &str_local(srcs[0])),
}
if let Some(AbortAction(target, code)) = aa {
emitln!(self.writer, "if ($abort_flag) {");
self.writer.indent();
let code_str = str_local(*code);
emitln!(self.writer, "{} := $Integer($abort_code);", code_str);
self.track_abort(fun_target, &code_str);
emitln!(self.writer, "goto L{};", target.as_usize());
self.writer.unindent();
emitln!(self.writer, "}");
}
}
Abort(_, src) => {
emitln!(
self.writer,
"$abort_code := i#$Integer({});",
str_local(*src)
);
emitln!(self.writer, "$abort_flag := true;");
for (i, ty) in fun_target.get_return_types().iter().enumerate() {
let ret_str = format!("$ret{}", i);
if ty.is_reference() {
emitln!(self.writer, "{} := $DefaultMutation;", &ret_str);
} else {
emitln!(self.writer, "{} := $DefaultValue();", &ret_str);
}
}
emitln!(self.writer, "return;")
}
Nop(..) => {}
}
emitln!(self.writer);
}
/// Track location for execution trace, avoiding to track the same line multiple times.
fn track_loc(
&self,
_fun_target: &FunctionTarget<'_>,
last_tracked_loc: &mut Option<(Loc, LineIndex)>,
loc: &Loc,
) {
if let Some(l) = self.module_env.env.get_location(loc) {
if let Some((last_loc, last_line)) = last_tracked_loc {
if *last_line == l.line {
// This line already tracked.
return;
}
*last_loc = loc.clone();
*last_line = l.line;
} else {
*last_tracked_loc = Some((loc.clone(), l.line));
}
emitln!(
self.writer,
"assume {{:print \"$at{}\"}} true;",
self.loc_str(&loc)
);
}
}
fn track_abort(&self, fun_target: &FunctionTarget<'_>, code_var: &str) {
emitln!(self.writer, &boogie_debug_track_abort(fun_target, code_var));
}
/// Generates an update of the debug information about temporary.
fn track_local(&self, fun_target: &FunctionTarget<'_>, origin_idx: TempIndex, idx: TempIndex) {
// In order to determine whether we need to dereference, use the type of the temporary
// which actually holds the value, not the original temp we are tracing.
let ty = fun_target.get_local_type(idx);
let mut value = format!("$t{}", idx);
if ty.is_reference() {
value = format!("$Dereference({})", value);
}
let track = boogie_debug_track_local(fun_target, origin_idx, &value);
emitln!(self.writer, &track);
}
/// Generates an update of the debug information about the return value at given location.
fn track_return(&self, fun_target: &FunctionTarget<'_>, return_idx: usize, idx: TempIndex) {
let ty = fun_target.get_local_type(idx);
let mut value = format!("$t{}", idx);
if ty.is_reference() {
value = format!("$Dereference({})", value);
}
emitln!(
self.writer,
&boogie_debug_track_return(fun_target, return_idx, &value)
);
}
fn loc_str(&self, loc: &Loc) -> String {
let file_idx = self.module_env.env.file_id_to_idx(loc.file_id());
format!("({},{},{})", file_idx, loc.span().start(), loc.span().end())
}
}
| 41.069355 | 100 | 0.40174 |
ac9dca7db20beb182fb865fcf200dcab1ff95d46
| 666 |
pub fn spread<Item, Iterable, Visitor, Value>(
iterable: Iterable,
value: Value,
visitor: Visitor,
) where
Value: Clone,
Iterable: IntoIterator<Item = Item>,
Visitor: Fn(Item, Value),
{
let mut iterator = iterable.into_iter();
if let Some(mut next) = iterator.next() {
for following in iterator {
visitor(next, value.clone());
next = following;
}
visitor(next, value);
}
}
pub fn sorted<'a, I, T>(ids: I) -> Vec<T>
where
I: IntoIterator<Item = &'a T>,
T: 'a + Copy + Ord,
{
let mut ids = ids.into_iter().copied().collect::<Vec<_>>();
ids.sort_unstable();
ids
}
| 22.965517 | 63 | 0.569069 |
9003fd7fe94001539f513218fe0cb26746c8eeeb
| 132 |
mod poly_impl;
#[proc_macro]
pub fn poly(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
poly_impl::poly(input)
}
| 18.857143 | 72 | 0.727273 |
ab1356c80e5ea42794b6db874e9f859cfeea38a4
| 1,600 |
use crate::runtime;
use crate::task::JoinHandle;
use std::future::Future;
doc_rt_core! {
/// Spawns a new asynchronous task, returning a
/// [`JoinHandle`](super::JoinHandle) for it.
///
/// Spawning a task enables the task to execute concurrently to other tasks. The
/// spawned task may execute on the current thread, or it may be sent to a
/// different thread to be executed. The specifics depend on the current
/// [`Runtime`](crate::runtime::Runtime) configuration.
///
/// # Examples
///
/// In this example, a server is started and `spawn` is used to start a new task
/// that processes each received connection.
///
/// ```no_run
/// use tokio::net::{TcpListener, TcpStream};
///
/// use std::io;
///
/// async fn process(socket: TcpStream) {
/// // ...
/// # drop(socket);
/// }
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let mut listener = TcpListener::bind("127.0.0.1:8080").await?;
///
/// loop {
/// let (socket, _) = listener.accept().await?;
///
/// tokio::spawn(async move {
/// // Process each socket concurrently.
/// process(socket).await
/// });
/// }
/// }
/// ```
///
/// # Panics
///
/// Panics if called from **outside** of the Tokio runtime.
pub fn spawn<T>(task: T) -> JoinHandle<T::Output>
where
T: Future + Send + 'static,
T::Output: Send + 'static,
{
runtime::spawn(task)
}
}
| 28.571429 | 84 | 0.52375 |
228ea8d7978b690dc97c02abe159c91f12777009
| 16,666 |
//! `stdweb` implementation for the fetch service.
use super::Referrer;
use crate::callback::Callback;
use crate::format::{Binary, Format, Text};
use crate::services::Task;
use serde::Serialize;
use std::collections::HashMap;
use std::fmt;
use stdweb::serde::Serde;
use stdweb::unstable::{TryFrom, TryInto};
use stdweb::web::error::Error;
use stdweb::web::ArrayBuffer;
use stdweb::{JsSerialize, Value};
#[allow(unused_imports)]
use stdweb::{_js_impl, js};
use thiserror::Error;
pub use http::{HeaderMap, Method, Request, Response, StatusCode, Uri};
/// Type to set cache for fetch.
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub enum Cache {
/// `default` value of cache.
#[serde(rename = "default")]
DefaultCache,
/// `no-store` value of cache.
NoStore,
/// `reload` value of cache.
Reload,
/// `no-cache` value of cache.
NoCache,
/// `force-cache` value of cache
ForceCache,
/// `only-if-cached` value of cache
OnlyIfCached,
}
/// Type to set credentials for fetch.
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub enum Credentials {
/// `omit` value of credentials.
Omit,
/// `include` value of credentials.
Include,
/// `same-origin` value of credentials.
SameOrigin,
}
/// Type to set mode for fetch.
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub enum Mode {
/// `same-origin` value of mode.
SameOrigin,
/// `no-cors` value of mode.
NoCors,
/// `cors` value of mode.
Cors,
}
/// Type to set redirect behaviour for fetch.
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub enum Redirect {
/// `follow` value of redirect.
Follow,
/// `error` value of redirect.
Error,
/// `manual` value of redirect.
Manual,
}
impl Serialize for Referrer {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match *self {
Referrer::SameOriginUrl(ref s) => serializer.serialize_str(s),
Referrer::AboutClient => {
serializer.serialize_unit_variant("Referrer", 0, "about:client")
}
Referrer::Empty => serializer.serialize_unit_variant("Referrer", 1, ""),
}
}
}
/// Type to set referrer policy for fetch.
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub enum ReferrerPolicy {
/// `no-referrer` value of referrerPolicy.
NoReferrer,
/// `no-referrer-when-downgrade` value of referrerPolicy.
NoReferrerWhenDowngrade,
/// `same-origin` value of referrerPolicy.
SameOrigin,
/// `origin` value of referrerPolicy.
Origin,
/// `strict-origin` value of referrerPolicy.
StrictOrigin,
/// `origin-when-cross-origin` value of referrerPolicy.
OriginWhenCrossOrigin,
/// `strict-origin-when-cross-origin` value of referrerPolicy.
StrictOriginWhenCrossOrigin,
/// `unsafe-url` value of referrerPolicy.
UnsafeUrl,
}
/// Init options for `fetch()` function call.
/// https://developer.mozilla.org/en-US/docs/Web/API/WindowOrWorkerGlobalScope/fetch
#[derive(Serialize, Default, Debug)]
#[serde(rename_all = "camelCase")]
pub struct FetchOptions {
/// Cache of a fetch request.
#[serde(skip_serializing_if = "Option::is_none")]
pub cache: Option<Cache>,
/// Credentials of a fetch request.
#[serde(skip_serializing_if = "Option::is_none")]
pub credentials: Option<Credentials>,
/// Redirect behaviour of a fetch request.
#[serde(skip_serializing_if = "Option::is_none")]
pub redirect: Option<Redirect>,
/// Request mode of a fetch request.
#[serde(skip_serializing_if = "Option::is_none")]
pub mode: Option<Mode>,
/// Referrer of a fetch request.
#[serde(skip_serializing_if = "Option::is_none")]
pub referrer: Option<Referrer>,
/// Referrer policy of a fetch request.
#[serde(skip_serializing_if = "Option::is_none")]
pub referrer_policy: Option<ReferrerPolicy>,
/// Integrity of a fetch request.
#[serde(skip_serializing_if = "Option::is_none")]
pub integrity: Option<String>,
}
/// Represents errors of a fetch service.
#[derive(Debug, Error)]
enum FetchError {
#[error("failed response")]
FailedResponse,
}
/// A handle to control sent requests. Can be canceled with a `Task::cancel` call.
#[must_use]
pub struct FetchTask(Option<Value>);
impl fmt::Debug for FetchTask {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("FetchTask")
}
}
/// A service to fetch resources.
#[derive(Default, Debug)]
pub struct FetchService {}
impl FetchService {
/// Creates a new service instance connected to `App` by provided `sender`.
pub fn new() -> Self {
Self {}
}
/// Sends a request to a remote server given a Request object and a callback
/// fuction to convert a Response object into a loop's message.
///
/// You may use a Request builder to build your request declaratively as on the
/// following examples:
///
/// ```
///# use yew::format::{Nothing, Json};
///# use yew::services::fetch::Request;
///# use serde_json::json;
/// let post_request = Request::post("https://my.api/v1/resource")
/// .header("Content-Type", "application/json")
/// .body(Json(&json!({"foo": "bar"})))
/// .expect("Failed to build request.");
///
/// let get_request = Request::get("https://my.api/v1/resource")
/// .body(Nothing)
/// .expect("Failed to build request.");
/// ```
///
/// The callback function can build a loop message by passing or analizing the
/// response body and metadata.
///
/// ```
///# use yew::{Component, ComponentLink, Html, Renderable};
///# use yew::services::FetchService;
///# use yew::services::fetch::{Response, Request};
///# struct Comp;
///# impl Component for Comp {
///# type Message = Msg;type Properties = ();
///# fn create(props: Self::Properties,link: ComponentLink<Self>) -> Self {unimplemented!()}
///# fn update(&mut self,msg: Self::Message) -> bool {unimplemented!()}
///# fn view(&self) -> Html {unimplemented!()}
///# }
///# enum Msg {
///# Noop,
///# Error
///# }
///# fn dont_execute() {
///# let link: ComponentLink<Comp> = unimplemented!();
///# let mut fetch_service: FetchService = FetchService::new();
///# let post_request: Request<Result<String, anyhow::Error>> = unimplemented!();
/// let task = fetch_service.fetch(
/// post_request,
/// link.callback(|response: Response<Result<String, anyhow::Error>>| {
/// if response.status().is_success() {
/// Msg::Noop
/// } else {
/// Msg::Error
/// }
/// }),
/// );
///# }
/// ```
///
/// For a full example, you can specify that the response must be in the JSON format,
/// and be a specific serialized data type. If the mesage isn't Json, or isn't the specified
/// data type, then you will get a message indicating failure.
///
/// ```
///# use yew::format::{Json, Nothing, Format};
///# use yew::services::FetchService;
///# use http::Request;
///# use yew::services::fetch::Response;
///# use yew::{Component, ComponentLink, Renderable, Html};
///# use serde_derive::Deserialize;
///# struct Comp;
///# impl Component for Comp {
///# type Message = Msg;type Properties = ();
///# fn create(props: Self::Properties,link: ComponentLink<Self>) -> Self {unimplemented!()}
///# fn update(&mut self,msg: Self::Message) -> bool {unimplemented!()}
///# fn view(&self) -> Html {unimplemented!()}
///# }
///# enum Msg {
///# FetchResourceComplete(Data),
///# FetchResourceFailed
///# }
/// #[derive(Deserialize)]
/// struct Data {
/// value: String
/// }
///
///# fn dont_execute() {
///# let link: ComponentLink<Comp> = unimplemented!();
/// let get_request = Request::get("/thing").body(Nothing).unwrap();
/// let callback = link.callback(|response: Response<Json<Result<Data, anyhow::Error>>>| {
/// if let (meta, Json(Ok(body))) = response.into_parts() {
/// if meta.status.is_success() {
/// return Msg::FetchResourceComplete(body);
/// }
/// }
/// Msg::FetchResourceFailed
/// });
///
/// let task = FetchService::new().fetch(get_request, callback);
///# }
/// ```
///
pub fn fetch<IN, OUT: 'static>(
&mut self,
request: Request<IN>,
callback: Callback<Response<OUT>>,
) -> Result<FetchTask, &str>
where
IN: Into<Text>,
OUT: From<Text>,
{
fetch_impl::<IN, OUT, String, String>(false, request, None, callback)
}
/// `fetch` with provided `FetchOptions` object.
/// Use it if you need to send cookies with a request:
/// ```
///# use yew::format::Nothing;
///# use yew::services::fetch::{self, FetchOptions, Credentials};
///# use yew::{Renderable, Html, Component, ComponentLink};
///# use yew::services::FetchService;
///# use http::Response;
///# struct Comp;
///# impl Component for Comp {
///# type Message = Msg;
///# type Properties = ();
///# fn create(props: Self::Properties, link: ComponentLink<Self>) -> Self {unimplemented!()}
///# fn update(&mut self, msg: Self::Message) -> bool {unimplemented!()}
///# fn view(&self) -> Html {unimplemented!()}
///# }
///# pub enum Msg {}
///# fn dont_execute() {
///# let link: ComponentLink<Comp> = unimplemented!();
///# let callback = link.callback(|response: Response<Result<String, anyhow::Error>>| unimplemented!());
/// let request = fetch::Request::get("/path/")
/// .body(Nothing)
/// .unwrap();
/// let options = FetchOptions {
/// credentials: Some(Credentials::SameOrigin),
/// ..FetchOptions::default()
/// };
/// let task = FetchService::new().fetch_with_options(request, options, callback);
///# }
/// ```
pub fn fetch_with_options<IN, OUT: 'static>(
&mut self,
request: Request<IN>,
options: FetchOptions,
callback: Callback<Response<OUT>>,
) -> Result<FetchTask, &str>
where
IN: Into<Text>,
OUT: From<Text>,
{
fetch_impl::<IN, OUT, String, String>(false, request, Some(options), callback)
}
/// Fetch the data in binary format.
pub fn fetch_binary<IN, OUT: 'static>(
&mut self,
request: Request<IN>,
callback: Callback<Response<OUT>>,
) -> Result<FetchTask, &str>
where
IN: Into<Binary>,
OUT: From<Binary>,
{
fetch_impl::<IN, OUT, Vec<u8>, ArrayBuffer>(true, request, None, callback)
}
/// Fetch the data in binary format.
pub fn fetch_binary_with_options<IN, OUT: 'static>(
&mut self,
request: Request<IN>,
options: FetchOptions,
callback: Callback<Response<OUT>>,
) -> Result<FetchTask, &str>
where
IN: Into<Binary>,
OUT: From<Binary>,
{
fetch_impl::<IN, OUT, Vec<u8>, ArrayBuffer>(true, request, Some(options), callback)
}
}
fn fetch_impl<IN, OUT: 'static, T, X>(
binary: bool,
request: Request<IN>,
options: Option<FetchOptions>,
callback: Callback<Response<OUT>>,
) -> Result<FetchTask, &'static str>
where
IN: Into<Format<T>>,
OUT: From<Format<T>>,
T: JsSerialize,
X: TryFrom<Value> + Into<T>,
{
// Consume request as parts and body.
let (parts, body) = request.into_parts();
// Map headers into a Js `Header` to make sure it's supported.
let header_list = parts
.headers
.iter()
.map(|(k, v)| {
Ok((
k.as_str(),
v.to_str().map_err(|_| "Unparsable request header")?,
))
})
.collect::<Result<HashMap<_, _>, _>>()?;
let header_map = js! {
try {
return new Headers(@{header_list});
} catch(error) {
return error;
}
};
if Error::try_from(js!( return @{header_map.as_ref()}; )).is_ok() {
return Err("couldn't build headers");
}
// Formats URI.
let uri = parts.uri.to_string();
let method = parts.method.as_str();
let body = body.into().ok();
// Prepare the response callback.
// Notice that the callback signature must match the call from the javascript
// side. There is no static check at this point.
let callback = move |success: bool, status: u16, headers: HashMap<String, String>, data: X| {
let mut response_builder = Response::builder();
if let Ok(status) = StatusCode::from_u16(status) {
response_builder = response_builder.status(status);
}
for (key, values) in headers {
response_builder = response_builder.header(key.as_str(), values.as_str());
}
// Deserialize and wrap response data into a Text object.
let data = if success {
Ok(data.into())
} else {
Err(FetchError::FailedResponse.into())
};
let out = OUT::from(data);
let response = response_builder.body(out).unwrap();
callback.emit(response);
};
#[allow(clippy::too_many_arguments)]
let handle = js! {
var body = @{body};
if (@{binary} && body != null) {
body = Uint8Array.from(body);
}
var callback = @{callback};
var abortController = AbortController ? new AbortController() : null;
var handle = {
active: true,
callback,
abortController,
};
var init = {
method: @{method},
body: body,
headers: @{header_map},
};
var opts = @{Serde(options)} || {};
for (var attrname in opts) {
init[attrname] = opts[attrname];
}
if (abortController && !("signal" in init)) {
init.signal = abortController.signal;
}
fetch(@{uri}, init).then(function(response) {
var promise = (@{binary}) ? response.arrayBuffer() : response.text();
var status = response.status;
var headers = {};
response.headers.forEach(function(value, key) {
headers[key] = value;
});
promise.then(function(data) {
if (handle.active == true) {
handle.active = false;
callback(true, status, headers, data);
callback.drop();
}
}).catch(function(err) {
if (handle.active == true) {
handle.active = false;
callback(false, status, headers, data);
callback.drop();
}
});
}).catch(function(e) {
if (handle.active == true) {
var data = (@{binary}) ? new ArrayBuffer() : "";
handle.active = false;
callback(false, 408, {}, data);
callback.drop();
}
});
return handle;
};
Ok(FetchTask(Some(handle)))
}
impl Task for FetchTask {
fn is_active(&self) -> bool {
if let Some(ref task) = self.0 {
let result = js! {
var the_task = @{task};
return the_task.active &&
(!the_task.abortController || !the_task.abortController.signal.aborted);
};
result.try_into().unwrap_or(false)
} else {
false
}
}
}
impl Drop for FetchTask {
fn drop(&mut self) {
if self.is_active() {
// Fetch API doesn't support request cancelling in all browsers
// and we should use this workaround with a flag.
// In that case, request not canceled, but callback won't be called.
let handle = self
.0
.take()
.expect("tried to cancel request fetching twice");
js! { @(no_return)
var handle = @{handle};
handle.active = false;
handle.callback.drop();
if (handle.abortController) {
handle.abortController.abort();
}
}
}
}
}
| 32.678431 | 108 | 0.565463 |
01187d15ace911bbe3f5bac98f82c874d6d80405
| 881 |
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
pub mod ast;
pub mod blame_set;
pub mod decl_counters;
pub mod decl_env;
pub mod direct_decl_parser;
pub mod doc_comment;
pub mod i_map;
pub mod i_set;
pub mod ident;
pub mod internal_type_set;
pub mod lazy;
pub mod local_id;
pub mod local_id_map;
pub mod method_flags;
pub mod opaque_digest;
pub mod phase_map;
pub mod pos;
pub mod prop_flags;
pub mod relative_path;
pub mod s_map;
pub mod s_set;
pub mod shape_map;
pub mod tany_sentinel;
pub mod typing_continuations;
pub mod typing_defs_flags;
pub mod typing_logic;
pub mod typing_set;
mod ast_defs_impl;
mod decl_parser_options_impl;
mod errors_impl;
mod global_options_impl;
mod tast_impl;
mod typing_defs_core_impl;
mod typing_reason_impl;
| 21.487805 | 66 | 0.795687 |
dd1c7227603aa3b7584065415a23fe8591ae3555
| 7,218 |
//!
//! A simple reverse proxy, to be used with [Hyper].
//!
//! The implementation ensures that [Hop-by-hop headers] are stripped correctly in both directions,
//! and adds the client's IP address to a comma-space-separated list of forwarding addresses in the
//! `X-Forwarded-For` header.
//!
//! The implementation is based on Go's [`httputil.ReverseProxy`].
//!
//! [Hyper]: http://hyper.rs/
//! [Hop-by-hop headers]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html
//! [`httputil.ReverseProxy`]: https://golang.org/pkg/net/http/httputil/#ReverseProxy
//!
//! # Example
//!
//! Add these dependencies to your `Cargo.toml` file.
//!
//! ```toml
//! [dependencies]
//! hyper-reverse-proxy = "0.5"
//! hyper = "0.13"
//! tokio = { version = "0.2", features = ["full"] }
//! ```
//!
//! The following example will set up a reverse proxy listening on `127.0.0.1:13900`,
//! and will proxy these calls:
//!
//! * `"/target/first"` will be proxied to `http://127.0.0.1:13901`
//!
//! * `"/target/second"` will be proxied to `http://127.0.0.1:13902`
//!
//! * All other URLs will be handled by `debug_request` function, that will display request information.
//!
//! ```rust,no_run
//! use hyper::server::conn::AddrStream;
//! use hyper::{Body, Request, Response, Server, StatusCode};
//! use hyper::service::{service_fn, make_service_fn};
//! use std::{convert::Infallible, net::SocketAddr};
//! use hyper::http::uri::InvalidUri;
//! use std::net::IpAddr;
//!
//! fn debug_request(req: Request<Body>) -> Result<Response<Body>, Infallible> {
//! let body_str = format!("{:?}", req);
//! Ok(Response::new(Body::from(body_str)))
//! }
//!
//! async fn handle(client_ip: IpAddr, req: Request<Body>) -> Result<Response<Body>, Infallible> {
//! if req.uri().path().starts_with("/target/first") {
//! // will forward requests to port 13901
//! match hyper_reverse_proxy::call(client_ip, "http://127.0.0.1:13901", req).await {
//! Ok(response) => {Ok(response)}
//! Err(error) => {Ok(Response::builder()
//! .status(StatusCode::INTERNAL_SERVER_ERROR)
//! .body(Body::empty())
//! .unwrap())}
//! }
//! } else if req.uri().path().starts_with("/target/second") {
//!
//! // will forward requests to port 13902
//! match hyper_reverse_proxy::call(client_ip, "http://127.0.0.1:13902", req).await {
//! Ok(response) => {Ok(response)}
//! Err(error) => {Ok(Response::builder()
//! .status(StatusCode::INTERNAL_SERVER_ERROR)
//! .body(Body::empty())
//! .unwrap())}
//! }
//! } else {
//! debug_request(req)
//! }
//! }
//!
//! #[tokio::main]
//! async fn main() {
//! let bind_addr = "127.0.0.1:8000";
//! let addr:SocketAddr = bind_addr.parse().expect("Could not parse ip:port.");
//!
//! let make_svc = make_service_fn(|conn: &AddrStream| {
//! let remote_addr = conn.remote_addr().ip();
//! async move {
//! Ok::<_, Infallible>(service_fn(move |req| handle(remote_addr, req)))
//! }
//! });
//!
//! let server = Server::bind(&addr).serve(make_svc);
//!
//! if let Err(e) = server.await {
//! eprintln!("server error: {}", e);
//! }
//!
//! println!("Running server on {:?}", addr);
//! }
//! ```
//!
use hyper::header::{HeaderMap, HeaderValue};
use hyper::http::header::{InvalidHeaderValue, ToStrError};
use hyper::http::uri::InvalidUri;
use hyper::{Body, Client, Error, Request, Response, Uri};
use lazy_static::lazy_static;
use std::net::IpAddr;
use std::str::FromStr;
pub enum ProxyError {
InvalidUri(InvalidUri),
HyperError(Error),
ForwardHeaderError,
}
impl From<Error> for ProxyError {
fn from(err: Error) -> ProxyError {
ProxyError::HyperError(err)
}
}
impl From<InvalidUri> for ProxyError {
fn from(err: InvalidUri) -> ProxyError {
ProxyError::InvalidUri(err)
}
}
impl From<ToStrError> for ProxyError {
fn from(_err: ToStrError) -> ProxyError {
ProxyError::ForwardHeaderError
}
}
impl From<InvalidHeaderValue> for ProxyError {
fn from(_err: InvalidHeaderValue) -> ProxyError {
ProxyError::ForwardHeaderError
}
}
pub fn is_hop_header(name: &str) -> bool {
use unicase::Ascii;
// A list of the headers, using `unicase` to help us compare without
// worrying about the case, and `lazy_static!` to prevent reallocation
// of the vector.
lazy_static! {
static ref HOP_HEADERS: Vec<Ascii<&'static str>> = vec![
Ascii::new("Connection"),
Ascii::new("Keep-Alive"),
Ascii::new("Proxy-Authenticate"),
Ascii::new("Proxy-Authorization"),
Ascii::new("Te"),
Ascii::new("Trailers"),
Ascii::new("Transfer-Encoding"),
Ascii::new("Upgrade"),
];
}
HOP_HEADERS.iter().any(|h| h == &name)
}
/// Returns a clone of the headers without the [hop-by-hop headers].
///
/// [hop-by-hop headers]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html
pub fn remove_hop_headers(headers: &HeaderMap<HeaderValue>) -> HeaderMap<HeaderValue> {
let mut result = HeaderMap::new();
for (k, v) in headers.iter() {
if !is_hop_header(k.as_str()) {
result.insert(k.clone(), v.clone());
}
}
result
}
pub fn create_proxied_response<B>(mut response: Response<B>) -> Response<B> {
*response.headers_mut() = remove_hop_headers(response.headers());
response
}
pub fn forward_uri<B>(forward_url: &str, req: &Request<B>) -> Result<Uri, InvalidUri> {
let forward_uri = match req.uri().query() {
Some(query) => format!("{}{}?{}", forward_url, req.uri().path(), query),
None => format!("{}{}", forward_url, req.uri().path()),
};
Uri::from_str(forward_uri.as_str())
}
pub fn create_proxied_request<B>(
client_ip: IpAddr,
forward_url: &str,
mut request: Request<B>,
) -> Result<Request<B>, ProxyError> {
*request.headers_mut() = remove_hop_headers(request.headers());
*request.uri_mut() = forward_uri(forward_url, &request)?;
let x_forwarded_for_header_name = "x-forwarded-for";
// Add forwarding information in the headers
match request.headers_mut().entry(x_forwarded_for_header_name) {
hyper::header::Entry::Vacant(entry) => {
entry.insert(client_ip.to_string().parse()?);
}
hyper::header::Entry::Occupied(mut entry) => {
let addr = format!("{}, {}", entry.get().to_str()?, client_ip);
entry.insert(addr.parse()?);
}
}
Ok(request)
}
pub async fn call(
client_ip: IpAddr,
forward_uri: &str,
request: Request<Body>,
) -> Result<Response<Body>, ProxyError> {
let proxied_request = create_proxied_request(client_ip, &forward_uri, request)?;
let client = Client::new();
let response = client.request(proxied_request).await?;
let proxied_response = create_proxied_response(response);
Ok(proxied_response)
}
| 32.958904 | 104 | 0.596149 |
167cc015c2da9b1e04097592ba6ad1d4003e00ae
| 280 |
use std::alloc::{alloc, dealloc, Layout};
// error-pattern: has size 1 and alignment 1, but gave size 2 and alignment 1
fn main() {
unsafe {
let x = alloc(Layout::from_size_align_unchecked(1, 1));
dealloc(x, Layout::from_size_align_unchecked(2, 1));
}
}
| 25.454545 | 77 | 0.65 |
aca6e719db423b84f4bf56c322d3236eb8611af2
| 2,557 |
// Data is pulled from https://covid.ourworldindata.org/data/owid-covid-data.json
use plotters::prelude::*;
use std::fs::File;
use std::io::BufReader;
#[derive(serde_derive::Deserialize)]
struct DailyData {
#[serde(default)]
new_cases: f64,
#[serde(default)]
total_cases: f64,
}
#[derive(serde_derive::Deserialize)]
struct CountryData {
data: Vec<DailyData>,
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let root =
SVGBackend::new("plotters-doc-data/tick_control.svg", (1024, 768)).into_drawing_area();
root.fill(&WHITE)?;
let (upper, lower) = root.split_vertically(750);
lower.titled(
"Data Source: https://covid.ourworldindata.org/data/owid-covid-data.json",
("sans-serif", 10).into_font().color(&BLACK.mix(0.5)),
)?;
let mut chart = ChartBuilder::on(&upper)
.caption("World COVID-19 Cases", ("sans-serif", (5).percent_height()))
.set_label_area_size(LabelAreaPosition::Left, (8).percent())
.set_label_area_size(LabelAreaPosition::Bottom, (4).percent())
.margin((1).percent())
.build_cartesian_2d(
(20u32..5000_0000u32)
.log_scale()
.with_key_points(vec![50, 100, 1000, 10000, 100000, 1000000, 10000000]),
(0u32..50_0000u32)
.log_scale()
.with_key_points(vec![10, 50, 100, 1000, 10000, 100000, 200000]),
)?;
chart
.configure_mesh()
.x_desc("Total Cases")
.y_desc("New Cases")
.draw()?;
let data: std::collections::HashMap<String, CountryData> = serde_json::from_reader(
BufReader::new(File::open("plotters-doc-data/covid-data.json")?),
)?;
for (idx, &series) in ["CHN", "USA", "RUS", "JPN", "DEU", "IND", "OWID_WRL"]
.iter()
.enumerate()
{
let color = Palette99::pick(idx).mix(0.9);
chart
.draw_series(LineSeries::new(
data[series].data.iter().map(
|&DailyData {
new_cases,
total_cases,
..
}| (total_cases as u32, new_cases as u32),
),
color.stroke_width(3),
))?
.label(series)
.legend(move |(x, y)| Rectangle::new([(x, y - 5), (x + 10, y + 5)], color.filled()));
}
chart
.configure_series_labels()
.border_style(&BLACK)
.draw()?;
Ok(())
}
#[test]
fn entry_point() {
main().unwrap()
}
| 29.732558 | 97 | 0.543215 |
cc54878f6a47fd46712e3258aeefd57e5dfd02f1
| 3,548 |
use sys;
use get_api;
use Variant;
use ToVariant;
use VariantArray;
use Color;
use std::mem::transmute;
/// A reference-counted vector of `ColorArray` that uses Godot's pool allocator.
pub struct ColorArray(pub(crate) sys::godot_pool_color_array);
impl ColorArray {
/// Creates an empty `ColorArray`.
pub fn new() -> Self { ColorArray::default() }
/// Creates an array by trying to convert each variant.
///
/// See `Variant::to_color_array`.
pub fn from_variant_array(array: &VariantArray) -> Self {
unsafe {
let mut result = sys::godot_pool_color_array::default();
(get_api().godot_pool_color_array_new_with_array)(&mut result, &array.0);
ColorArray(result)
}
}
/// Appends an element at the end of the array
pub fn push(&mut self, color: &Color) {
unsafe {
(get_api().godot_pool_color_array_append)(&mut self.0, transmute(color));
}
}
/// Appends a `ColorArray` at the end of this array.
pub fn push_array(&mut self, array: &ColorArray) {
unsafe {
(get_api().godot_pool_color_array_append_array)(&mut self.0, transmute(array));
}
}
// TODO(error handling)
/// Insert a new int at a given position in the array.
pub fn insert(&mut self, offset: i32, color: &Color) -> bool {
unsafe {
let status = (get_api().godot_pool_color_array_insert)(&mut self.0, offset, transmute(color));
status != sys::godot_error::GODOT_OK
}
}
/// Inverts the order of the elements in the array.
pub fn invert(&mut self) {
unsafe {
(get_api().godot_pool_color_array_invert)(&mut self.0)
}
}
/// Removes an element at the given offset.
pub fn remove(&mut self, idx: i32) {
unsafe {
(get_api().godot_pool_color_array_remove)(&mut self.0, idx);
}
}
/// Changes the size of the array, possibly removing elements or pushing default values.
pub fn resize(&mut self, size: i32) {
unsafe {
(get_api().godot_pool_color_array_resize)(&mut self.0, size);
}
}
/// Returns a copy of the element at the given offset.
pub fn get(&self, idx: i32) -> Color {
unsafe {
transmute((get_api().godot_pool_color_array_get)(&self.0, idx))
}
}
/// Sets the value of the element at the given offset.
pub fn set(&mut self, idx: i32, color: &Color) {
unsafe {
(get_api().godot_pool_color_array_set)(&mut self.0, idx, transmute(color));
}
}
/// Returns the number of elements in the array.
pub fn len(&self) -> i32 {
unsafe {
(get_api().godot_pool_color_array_size)(&self.0)
}
}
#[doc(hidden)]
pub fn sys(&self) -> *const sys::godot_pool_color_array {
&self.0
}
#[doc(hidden)]
pub fn from_sys(sys: sys::godot_pool_color_array) -> Self {
ColorArray(sys)
}
impl_common_methods! {
/// Creates a new reference to this array.
pub fn new_ref(&self) -> ColorArray : godot_pool_color_array_new_copy;
}
}
impl_basic_traits!(
for ColorArray as godot_pool_color_array {
Drop => godot_pool_color_array_destroy;
Default => godot_pool_color_array_new;
}
);
impl ToVariant for ColorArray {
fn to_variant(&self) -> Variant { Variant::from_color_array(self) }
fn from_variant(variant: &Variant) -> Option<Self> { variant.try_to_color_array() }
}
| 29.566667 | 106 | 0.611894 |
fe992169ca934552f49e08970fa94034a70e8ca6
| 34,950 |
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::env;
use std::ffi::OsString;
use std::io::prelude::*;
use std::io;
use std::path::{Path, PathBuf};
use std::panic::{self, AssertUnwindSafe};
use std::process::Command;
use std::str;
use rustc_data_structures::sync::Lrc;
use std::sync::{Arc, Mutex};
use testing;
use rustc_lint;
use rustc::hir;
use rustc::hir::intravisit;
use rustc::session::{self, CompileIncomplete, config};
use rustc::session::config::{OutputType, OutputTypes, Externs, CodegenOptions};
use rustc::session::search_paths::{SearchPaths, PathKind};
use rustc_metadata::dynamic_lib::DynamicLibrary;
use tempfile::Builder as TempFileBuilder;
use rustc_driver::{self, driver, target_features, Compilation};
use rustc_driver::driver::phase_2_configure_and_expand;
use rustc_metadata::cstore::CStore;
use rustc_resolve::MakeGlobMap;
use syntax::ast;
use syntax::codemap::CodeMap;
use syntax::edition::Edition;
use syntax::feature_gate::UnstableFeatures;
use syntax::with_globals;
use syntax_pos::{BytePos, DUMMY_SP, Pos, Span, FileName};
use errors;
use errors::emitter::ColorConfig;
use clean::Attributes;
use html::markdown;
#[derive(Clone, Default)]
pub struct TestOptions {
/// Whether to disable the default `extern crate my_crate;` when creating doctests.
pub no_crate_inject: bool,
/// Whether to emit compilation warnings when compiling doctests. Setting this will suppress
/// the default `#![allow(unused)]`.
pub display_warnings: bool,
/// Additional crate-level attributes to add to doctests.
pub attrs: Vec<String>,
}
pub fn run(input_path: &Path,
cfgs: Vec<String>,
libs: SearchPaths,
externs: Externs,
mut test_args: Vec<String>,
crate_name: Option<String>,
maybe_sysroot: Option<PathBuf>,
display_warnings: bool,
linker: Option<PathBuf>,
edition: Edition,
cg: CodegenOptions)
-> isize {
let input = config::Input::File(input_path.to_owned());
let sessopts = config::Options {
maybe_sysroot: maybe_sysroot.clone().or_else(
|| Some(env::current_exe().unwrap().parent().unwrap().parent().unwrap().to_path_buf())),
search_paths: libs.clone(),
crate_types: vec![config::CrateTypeDylib],
cg: cg.clone(),
externs: externs.clone(),
unstable_features: UnstableFeatures::from_environment(),
lint_cap: Some(::rustc::lint::Level::Allow),
actually_rustdoc: true,
debugging_opts: config::DebuggingOptions {
..config::basic_debugging_options()
},
edition,
..config::basic_options().clone()
};
driver::spawn_thread_pool(sessopts, |sessopts| {
let codemap = Lrc::new(CodeMap::new(sessopts.file_path_mapping()));
let handler =
errors::Handler::with_tty_emitter(ColorConfig::Auto,
true, false,
Some(codemap.clone()));
let mut sess = session::build_session_(
sessopts, Some(input_path.to_owned()), handler, codemap.clone(),
);
let codegen_backend = rustc_driver::get_codegen_backend(&sess);
let cstore = CStore::new(codegen_backend.metadata_loader());
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
let mut cfg = config::build_configuration(&sess, config::parse_cfgspecs(cfgs.clone()));
target_features::add_configuration(&mut cfg, &sess, &*codegen_backend);
sess.parse_sess.config = cfg;
let krate = panictry!(driver::phase_1_parse_input(&driver::CompileController::basic(),
&sess,
&input));
let driver::ExpansionResult { defs, mut hir_forest, .. } = {
phase_2_configure_and_expand(
&sess,
&cstore,
krate,
None,
"rustdoc-test",
None,
MakeGlobMap::No,
|_| Ok(()),
).expect("phase_2_configure_and_expand aborted in rustdoc!")
};
let crate_name = crate_name.unwrap_or_else(|| {
::rustc_codegen_utils::link::find_crate_name(None, &hir_forest.krate().attrs, &input)
});
let mut opts = scrape_test_config(hir_forest.krate());
opts.display_warnings |= display_warnings;
let mut collector = Collector::new(
crate_name,
cfgs,
libs,
cg,
externs,
false,
opts,
maybe_sysroot,
Some(codemap),
None,
linker,
edition
);
{
let map = hir::map::map_crate(&sess, &cstore, &mut hir_forest, &defs);
let krate = map.krate();
let mut hir_collector = HirCollector {
sess: &sess,
collector: &mut collector,
map: &map
};
hir_collector.visit_testable("".to_string(), &krate.attrs, |this| {
intravisit::walk_crate(this, krate);
});
}
test_args.insert(0, "rustdoctest".to_string());
testing::test_main(&test_args,
collector.tests.into_iter().collect(),
testing::Options::new().display_output(display_warnings));
0
})
}
// Look for #![doc(test(no_crate_inject))], used by crates in the std facade
fn scrape_test_config(krate: &::rustc::hir::Crate) -> TestOptions {
use syntax::print::pprust;
let mut opts = TestOptions {
no_crate_inject: false,
display_warnings: false,
attrs: Vec::new(),
};
let test_attrs: Vec<_> = krate.attrs.iter()
.filter(|a| a.check_name("doc"))
.flat_map(|a| a.meta_item_list().unwrap_or_else(Vec::new))
.filter(|a| a.check_name("test"))
.collect();
let attrs = test_attrs.iter().flat_map(|a| a.meta_item_list().unwrap_or(&[]));
for attr in attrs {
if attr.check_name("no_crate_inject") {
opts.no_crate_inject = true;
}
if attr.check_name("attr") {
if let Some(l) = attr.meta_item_list() {
for item in l {
opts.attrs.push(pprust::meta_list_item_to_string(item));
}
}
}
}
opts
}
fn run_test(test: &str, cratename: &str, filename: &FileName, line: usize,
cfgs: Vec<String>, libs: SearchPaths,
cg: CodegenOptions, externs: Externs,
should_panic: bool, no_run: bool, as_test_harness: bool,
compile_fail: bool, mut error_codes: Vec<String>, opts: &TestOptions,
maybe_sysroot: Option<PathBuf>, linker: Option<PathBuf>, edition: Edition) {
// the test harness wants its own `main` & top level functions, so
// never wrap the test in `fn main() { ... }`
let (test, line_offset) = make_test(test, Some(cratename), as_test_harness, opts);
// FIXME(#44940): if doctests ever support path remapping, then this filename
// needs to be the result of CodeMap::span_to_unmapped_path
let input = config::Input::Str {
name: filename.to_owned(),
input: test.to_owned(),
};
let outputs = OutputTypes::new(&[(OutputType::Exe, None)]);
let sessopts = config::Options {
maybe_sysroot: maybe_sysroot.or_else(
|| Some(env::current_exe().unwrap().parent().unwrap().parent().unwrap().to_path_buf())),
search_paths: libs,
crate_types: vec![config::CrateTypeExecutable],
output_types: outputs,
externs,
cg: config::CodegenOptions {
prefer_dynamic: true,
linker,
..cg
},
test: as_test_harness,
unstable_features: UnstableFeatures::from_environment(),
debugging_opts: config::DebuggingOptions {
..config::basic_debugging_options()
},
edition,
..config::basic_options().clone()
};
let (libdir, outdir) = driver::spawn_thread_pool(sessopts, |sessopts| {
// Shuffle around a few input and output handles here. We're going to pass
// an explicit handle into rustc to collect output messages, but we also
// want to catch the error message that rustc prints when it fails.
//
// We take our thread-local stderr (likely set by the test runner) and replace
// it with a sink that is also passed to rustc itself. When this function
// returns the output of the sink is copied onto the output of our own thread.
//
// The basic idea is to not use a default Handler for rustc, and then also
// not print things by default to the actual stderr.
struct Sink(Arc<Mutex<Vec<u8>>>);
impl Write for Sink {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
Write::write(&mut *self.0.lock().unwrap(), data)
}
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
struct Bomb(Arc<Mutex<Vec<u8>>>, Box<Write+Send>);
impl Drop for Bomb {
fn drop(&mut self) {
let _ = self.1.write_all(&self.0.lock().unwrap());
}
}
let data = Arc::new(Mutex::new(Vec::new()));
let codemap = Lrc::new(CodeMap::new_doctest(
sessopts.file_path_mapping(), filename.clone(), line as isize - line_offset as isize
));
let emitter = errors::emitter::EmitterWriter::new(box Sink(data.clone()),
Some(codemap.clone()),
false,
false);
let old = io::set_panic(Some(box Sink(data.clone())));
let _bomb = Bomb(data.clone(), old.unwrap_or(box io::stdout()));
// Compile the code
let diagnostic_handler = errors::Handler::with_emitter(true, false, box emitter);
let mut sess = session::build_session_(
sessopts, None, diagnostic_handler, codemap,
);
let codegen_backend = rustc_driver::get_codegen_backend(&sess);
let cstore = CStore::new(codegen_backend.metadata_loader());
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
let outdir = Mutex::new(
TempFileBuilder::new().prefix("rustdoctest").tempdir().expect("rustdoc needs a tempdir")
);
let libdir = sess.target_filesearch(PathKind::All).get_lib_path();
let mut control = driver::CompileController::basic();
let mut cfg = config::build_configuration(&sess, config::parse_cfgspecs(cfgs.clone()));
target_features::add_configuration(&mut cfg, &sess, &*codegen_backend);
sess.parse_sess.config = cfg;
let out = Some(outdir.lock().unwrap().path().to_path_buf());
if no_run {
control.after_analysis.stop = Compilation::Stop;
}
let res = panic::catch_unwind(AssertUnwindSafe(|| {
driver::compile_input(
codegen_backend,
&sess,
&cstore,
&None,
&input,
&out,
&None,
None,
&control
)
}));
let compile_result = match res {
Ok(Ok(())) | Ok(Err(CompileIncomplete::Stopped)) => Ok(()),
Err(_) | Ok(Err(CompileIncomplete::Errored(_))) => Err(())
};
match (compile_result, compile_fail) {
(Ok(()), true) => {
panic!("test compiled while it wasn't supposed to")
}
(Ok(()), false) => {}
(Err(()), true) => {
if error_codes.len() > 0 {
let out = String::from_utf8(data.lock().unwrap().to_vec()).unwrap();
error_codes.retain(|err| !out.contains(err));
}
}
(Err(()), false) => {
panic!("couldn't compile the test")
}
}
if error_codes.len() > 0 {
panic!("Some expected error codes were not found: {:?}", error_codes);
}
(libdir, outdir)
});
if no_run { return }
// Run the code!
//
// We're careful to prepend the *target* dylib search path to the child's
// environment to ensure that the target loads the right libraries at
// runtime. It would be a sad day if the *host* libraries were loaded as a
// mistake.
let mut cmd = Command::new(&outdir.lock().unwrap().path().join("rust_out"));
let var = DynamicLibrary::envvar();
let newpath = {
let path = env::var_os(var).unwrap_or(OsString::new());
let mut path = env::split_paths(&path).collect::<Vec<_>>();
path.insert(0, libdir.clone());
env::join_paths(path).unwrap()
};
cmd.env(var, &newpath);
match cmd.output() {
Err(e) => panic!("couldn't run the test: {}{}", e,
if e.kind() == io::ErrorKind::PermissionDenied {
" - maybe your tempdir is mounted with noexec?"
} else { "" }),
Ok(out) => {
if should_panic && out.status.success() {
panic!("test executable succeeded when it should have failed");
} else if !should_panic && !out.status.success() {
panic!("test executable failed:\n{}\n{}\n",
str::from_utf8(&out.stdout).unwrap_or(""),
str::from_utf8(&out.stderr).unwrap_or(""));
}
}
}
}
/// Makes the test file. Also returns the number of lines before the code begins
pub fn make_test(s: &str,
cratename: Option<&str>,
dont_insert_main: bool,
opts: &TestOptions)
-> (String, usize) {
let (crate_attrs, everything_else) = partition_source(s);
let everything_else = everything_else.trim();
let mut line_offset = 0;
let mut prog = String::new();
if opts.attrs.is_empty() && !opts.display_warnings {
// If there aren't any attributes supplied by #![doc(test(attr(...)))], then allow some
// lints that are commonly triggered in doctests. The crate-level test attributes are
// commonly used to make tests fail in case they trigger warnings, so having this there in
// that case may cause some tests to pass when they shouldn't have.
prog.push_str("#![allow(unused)]\n");
line_offset += 1;
}
// Next, any attributes that came from the crate root via #![doc(test(attr(...)))].
for attr in &opts.attrs {
prog.push_str(&format!("#![{}]\n", attr));
line_offset += 1;
}
// Now push any outer attributes from the example, assuming they
// are intended to be crate attributes.
prog.push_str(&crate_attrs);
// Don't inject `extern crate std` because it's already injected by the
// compiler.
if !s.contains("extern crate") && !opts.no_crate_inject && cratename != Some("std") {
if let Some(cratename) = cratename {
if s.contains(cratename) {
prog.push_str(&format!("extern crate {};\n", cratename));
line_offset += 1;
}
}
}
// FIXME (#21299): prefer libsyntax or some other actual parser over this
// best-effort ad hoc approach
let already_has_main = s.lines()
.map(|line| {
let comment = line.find("//");
if let Some(comment_begins) = comment {
&line[0..comment_begins]
} else {
line
}
})
.any(|code| code.contains("fn main"));
if dont_insert_main || already_has_main {
prog.push_str(everything_else);
} else {
prog.push_str("fn main() {\n");
line_offset += 1;
prog.push_str(everything_else);
prog.push_str("\n}");
}
info!("final test program: {}", prog);
(prog, line_offset)
}
// FIXME(aburka): use a real parser to deal with multiline attributes
fn partition_source(s: &str) -> (String, String) {
let mut after_header = false;
let mut before = String::new();
let mut after = String::new();
for line in s.lines() {
let trimline = line.trim();
let header = trimline.chars().all(|c| c.is_whitespace()) ||
trimline.starts_with("#![") ||
trimline.starts_with("#[macro_use] extern crate") ||
trimline.starts_with("extern crate");
if !header || after_header {
after_header = true;
after.push_str(line);
after.push_str("\n");
} else {
before.push_str(line);
before.push_str("\n");
}
}
(before, after)
}
pub struct Collector {
pub tests: Vec<testing::TestDescAndFn>,
// The name of the test displayed to the user, separated by `::`.
//
// In tests from Rust source, this is the path to the item
// e.g. `["std", "vec", "Vec", "push"]`.
//
// In tests from a markdown file, this is the titles of all headers (h1~h6)
// of the sections that contain the code block, e.g. if the markdown file is
// written as:
//
// ``````markdown
// # Title
//
// ## Subtitle
//
// ```rust
// assert!(true);
// ```
// ``````
//
// the `names` vector of that test will be `["Title", "Subtitle"]`.
names: Vec<String>,
cfgs: Vec<String>,
libs: SearchPaths,
cg: CodegenOptions,
externs: Externs,
use_headers: bool,
cratename: String,
opts: TestOptions,
maybe_sysroot: Option<PathBuf>,
position: Span,
codemap: Option<Lrc<CodeMap>>,
filename: Option<PathBuf>,
linker: Option<PathBuf>,
edition: Edition,
}
impl Collector {
pub fn new(cratename: String, cfgs: Vec<String>, libs: SearchPaths, cg: CodegenOptions,
externs: Externs, use_headers: bool, opts: TestOptions,
maybe_sysroot: Option<PathBuf>, codemap: Option<Lrc<CodeMap>>,
filename: Option<PathBuf>, linker: Option<PathBuf>, edition: Edition) -> Collector {
Collector {
tests: Vec::new(),
names: Vec::new(),
cfgs,
libs,
cg,
externs,
use_headers,
cratename,
opts,
maybe_sysroot,
position: DUMMY_SP,
codemap,
filename,
linker,
edition,
}
}
fn generate_name(&self, line: usize, filename: &FileName) -> String {
format!("{} - {} (line {})", filename, self.names.join("::"), line)
}
pub fn add_test(&mut self, test: String,
should_panic: bool, no_run: bool, should_ignore: bool,
as_test_harness: bool, compile_fail: bool, error_codes: Vec<String>,
line: usize, filename: FileName, allow_fail: bool) {
let name = self.generate_name(line, &filename);
let cfgs = self.cfgs.clone();
let libs = self.libs.clone();
let cg = self.cg.clone();
let externs = self.externs.clone();
let cratename = self.cratename.to_string();
let opts = self.opts.clone();
let maybe_sysroot = self.maybe_sysroot.clone();
let linker = self.linker.clone();
let edition = self.edition;
debug!("Creating test {}: {}", name, test);
self.tests.push(testing::TestDescAndFn {
desc: testing::TestDesc {
name: testing::DynTestName(name),
ignore: should_ignore,
// compiler failures are test failures
should_panic: testing::ShouldPanic::No,
allow_fail,
},
testfn: testing::DynTestFn(box move || {
let panic = io::set_panic(None);
let print = io::set_print(None);
match {
rustc_driver::in_rustc_thread(move || with_globals(move || {
io::set_panic(panic);
io::set_print(print);
run_test(&test,
&cratename,
&filename,
line,
cfgs,
libs,
cg,
externs,
should_panic,
no_run,
as_test_harness,
compile_fail,
error_codes,
&opts,
maybe_sysroot,
linker,
edition)
}))
} {
Ok(()) => (),
Err(err) => panic::resume_unwind(err),
}
}),
});
}
pub fn get_line(&self) -> usize {
if let Some(ref codemap) = self.codemap {
let line = self.position.lo().to_usize();
let line = codemap.lookup_char_pos(BytePos(line as u32)).line;
if line > 0 { line - 1 } else { line }
} else {
0
}
}
pub fn set_position(&mut self, position: Span) {
self.position = position;
}
pub fn get_filename(&self) -> FileName {
if let Some(ref codemap) = self.codemap {
let filename = codemap.span_to_filename(self.position);
if let FileName::Real(ref filename) = filename {
if let Ok(cur_dir) = env::current_dir() {
if let Ok(path) = filename.strip_prefix(&cur_dir) {
return path.to_owned().into();
}
}
}
filename
} else if let Some(ref filename) = self.filename {
filename.clone().into()
} else {
FileName::Custom("input".to_owned())
}
}
pub fn register_header(&mut self, name: &str, level: u32) {
if self.use_headers {
// we use these headings as test names, so it's good if
// they're valid identifiers.
let name = name.chars().enumerate().map(|(i, c)| {
if (i == 0 && c.is_xid_start()) ||
(i != 0 && c.is_xid_continue()) {
c
} else {
'_'
}
}).collect::<String>();
// Here we try to efficiently assemble the header titles into the
// test name in the form of `h1::h2::h3::h4::h5::h6`.
//
// Suppose originally `self.names` contains `[h1, h2, h3]`...
let level = level as usize;
if level <= self.names.len() {
// ... Consider `level == 2`. All headers in the lower levels
// are irrelevant in this new level. So we should reset
// `self.names` to contain headers until <h2>, and replace that
// slot with the new name: `[h1, name]`.
self.names.truncate(level);
self.names[level - 1] = name;
} else {
// ... On the other hand, consider `level == 5`. This means we
// need to extend `self.names` to contain five headers. We fill
// in the missing level (<h4>) with `_`. Thus `self.names` will
// become `[h1, h2, h3, "_", name]`.
if level - 1 > self.names.len() {
self.names.resize(level - 1, "_".to_owned());
}
self.names.push(name);
}
}
}
}
struct HirCollector<'a, 'hir: 'a> {
sess: &'a session::Session,
collector: &'a mut Collector,
map: &'a hir::map::Map<'hir>
}
impl<'a, 'hir> HirCollector<'a, 'hir> {
fn visit_testable<F: FnOnce(&mut Self)>(&mut self,
name: String,
attrs: &[ast::Attribute],
nested: F) {
let mut attrs = Attributes::from_ast(self.sess.diagnostic(), attrs);
if let Some(ref cfg) = attrs.cfg {
if !cfg.matches(&self.sess.parse_sess, Some(&self.sess.features_untracked())) {
return;
}
}
let has_name = !name.is_empty();
if has_name {
self.collector.names.push(name);
}
attrs.collapse_doc_comments();
attrs.unindent_doc_comments();
// the collapse-docs pass won't combine sugared/raw doc attributes, or included files with
// anything else, this will combine them for us
if let Some(doc) = attrs.collapsed_doc_value() {
markdown::find_testable_code(&doc,
self.collector,
attrs.span.unwrap_or(DUMMY_SP),
Some(self.sess));
}
nested(self);
if has_name {
self.collector.names.pop();
}
}
}
impl<'a, 'hir> intravisit::Visitor<'hir> for HirCollector<'a, 'hir> {
fn nested_visit_map<'this>(&'this mut self) -> intravisit::NestedVisitorMap<'this, 'hir> {
intravisit::NestedVisitorMap::All(&self.map)
}
fn visit_item(&mut self, item: &'hir hir::Item) {
let name = if let hir::ItemImpl(.., ref ty, _) = item.node {
self.map.node_to_pretty_string(ty.id)
} else {
item.name.to_string()
};
self.visit_testable(name, &item.attrs, |this| {
intravisit::walk_item(this, item);
});
}
fn visit_trait_item(&mut self, item: &'hir hir::TraitItem) {
self.visit_testable(item.name.to_string(), &item.attrs, |this| {
intravisit::walk_trait_item(this, item);
});
}
fn visit_impl_item(&mut self, item: &'hir hir::ImplItem) {
self.visit_testable(item.name.to_string(), &item.attrs, |this| {
intravisit::walk_impl_item(this, item);
});
}
fn visit_foreign_item(&mut self, item: &'hir hir::ForeignItem) {
self.visit_testable(item.name.to_string(), &item.attrs, |this| {
intravisit::walk_foreign_item(this, item);
});
}
fn visit_variant(&mut self,
v: &'hir hir::Variant,
g: &'hir hir::Generics,
item_id: ast::NodeId) {
self.visit_testable(v.node.name.to_string(), &v.node.attrs, |this| {
intravisit::walk_variant(this, v, g, item_id);
});
}
fn visit_struct_field(&mut self, f: &'hir hir::StructField) {
self.visit_testable(f.ident.to_string(), &f.attrs, |this| {
intravisit::walk_struct_field(this, f);
});
}
fn visit_macro_def(&mut self, macro_def: &'hir hir::MacroDef) {
self.visit_testable(macro_def.name.to_string(), ¯o_def.attrs, |_| ());
}
}
#[cfg(test)]
mod tests {
use super::{TestOptions, make_test};
#[test]
fn make_test_basic() {
//basic use: wraps with `fn main`, adds `#![allow(unused)]`
let opts = TestOptions::default();
let input =
"assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
fn main() {
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, None, false, &opts);
assert_eq!(output, (expected.clone(), 2));
}
#[test]
fn make_test_crate_name_no_use() {
//if you give a crate name but *don't* use it within the test, it won't bother inserting
//the `extern crate` statement
let opts = TestOptions::default();
let input =
"assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
fn main() {
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 2));
}
#[test]
fn make_test_crate_name() {
//if you give a crate name and use it within the test, it will insert an `extern crate`
//statement before `fn main`
let opts = TestOptions::default();
let input =
"use asdf::qwop;
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
extern crate asdf;
fn main() {
use asdf::qwop;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 3));
}
#[test]
fn make_test_no_crate_inject() {
//even if you do use the crate within the test, setting `opts.no_crate_inject` will skip
//adding it anyway
let opts = TestOptions {
no_crate_inject: true,
display_warnings: false,
attrs: vec![],
};
let input =
"use asdf::qwop;
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
fn main() {
use asdf::qwop;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 2));
}
#[test]
fn make_test_ignore_std() {
//even if you include a crate name, and use it in the doctest, we still won't include an
//`extern crate` statement if the crate is "std" - that's included already by the compiler!
let opts = TestOptions::default();
let input =
"use std::*;
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
fn main() {
use std::*;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("std"), false, &opts);
assert_eq!(output, (expected, 2));
}
#[test]
fn make_test_manual_extern_crate() {
//when you manually include an `extern crate` statement in your doctest, make_test assumes
//you've included one for your own crate too
let opts = TestOptions::default();
let input =
"extern crate asdf;
use asdf::qwop;
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
extern crate asdf;
fn main() {
use asdf::qwop;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 2));
}
#[test]
fn make_test_manual_extern_crate_with_macro_use() {
let opts = TestOptions::default();
let input =
"#[macro_use] extern crate asdf;
use asdf::qwop;
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
#[macro_use] extern crate asdf;
fn main() {
use asdf::qwop;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 2));
}
#[test]
fn make_test_opts_attrs() {
//if you supplied some doctest attributes with #![doc(test(attr(...)))], it will use those
//instead of the stock #![allow(unused)]
let mut opts = TestOptions::default();
opts.attrs.push("feature(sick_rad)".to_string());
let input =
"use asdf::qwop;
assert_eq!(2+2, 4);";
let expected =
"#![feature(sick_rad)]
extern crate asdf;
fn main() {
use asdf::qwop;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 3));
//adding more will also bump the returned line offset
opts.attrs.push("feature(hella_dope)".to_string());
let expected =
"#![feature(sick_rad)]
#![feature(hella_dope)]
extern crate asdf;
fn main() {
use asdf::qwop;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 4));
}
#[test]
fn make_test_crate_attrs() {
//including inner attributes in your doctest will apply them to the whole "crate", pasting
//them outside the generated main function
let opts = TestOptions::default();
let input =
"#![feature(sick_rad)]
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
#![feature(sick_rad)]
fn main() {
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, None, false, &opts);
assert_eq!(output, (expected, 2));
}
#[test]
fn make_test_with_main() {
//including your own `fn main` wrapper lets the test use it verbatim
let opts = TestOptions::default();
let input =
"fn main() {
assert_eq!(2+2, 4);
}";
let expected =
"#![allow(unused)]
fn main() {
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, None, false, &opts);
assert_eq!(output, (expected, 1));
}
#[test]
fn make_test_fake_main() {
//...but putting it in a comment will still provide a wrapper
let opts = TestOptions::default();
let input =
"//Ceci n'est pas une `fn main`
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
fn main() {
//Ceci n'est pas une `fn main`
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, None, false, &opts);
assert_eq!(output, (expected.clone(), 2));
}
#[test]
fn make_test_dont_insert_main() {
//even with that, if you set `dont_insert_main`, it won't create the `fn main` wrapper
let opts = TestOptions::default();
let input =
"//Ceci n'est pas une `fn main`
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
//Ceci n'est pas une `fn main`
assert_eq!(2+2, 4);".to_string();
let output = make_test(input, None, true, &opts);
assert_eq!(output, (expected.clone(), 1));
}
#[test]
fn make_test_display_warnings() {
//if the user is asking to display doctest warnings, suppress the default allow(unused)
let mut opts = TestOptions::default();
opts.display_warnings = true;
let input =
"assert_eq!(2+2, 4);";
let expected =
"fn main() {
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, None, false, &opts);
assert_eq!(output, (expected.clone(), 1));
}
}
| 34.776119 | 100 | 0.548612 |
016f714c987accedc1af3fddc54b46102eb91a9b
| 66,463 |
/// Macro to implement a [system of quantities](https://jcgm.bipm.org/vim/en/1.3.html). `@...` match
/// arms are considered private.
///
/// * `$quantities_attr`: System of quantities attributes. Generally used to set documentation
/// comments for the system of quantities.
/// * `$quantities`: Name of the system of quantities (e.g. `ISQ`).
/// * `$name_attr`: Base quantity attributes. Generally used to set documentation comments for base
/// units.
/// * `$name`: Name of the base quantities for the system of quantities (e.g. `length`, `mass`,
/// ...). Note that this name must match the module name of the quantity.
/// * `$unit`: Base unit of the quantity (e.g. `meter`, `kilogram`).
/// * `$symbol`: Dimension symbol of the quantity.
/// * `$units_attr`: System of units attributes. Generally used to set documentation comments for
/// the system of units.
/// * `$units`: Name of the system of units (e.g. `SI`).
/// * `$module`: Module name of the quantity. When prefixed by the `mod` keyword the module must
/// already be defined with the `#[macro_use]` attribute. A `#[macro_use] pub mod $module;`
/// statement is generated if this variable is not prefixed by the `mod` keyword.
/// * `$quantity`: Quantity name (e.g. `Length`, `Mass`, ...).
///
/// An example invocation is given below for a meter-kilogram-second system. The `#[macro_use]`
/// attribute must be used when including the `uom` crate to make the `system!` macro available.
///
/// ```
/// #[macro_use]
/// extern crate uom;
///
/// # fn main() { }
/// # mod mks {
/// # #[macro_use]
/// # mod length {
/// # quantity! {
/// # /// Length (base unit meter, m).
/// # quantity: Length; "length";
/// # /// Length dimension, m.
/// # dimension: Q<P1 /*length*/, Z0 /*mass*/, Z0 /*time*/>;
/// # units {
/// # @meter: 1.0E0; "m", "meter", "meters";
/// # @foot: 3.048E-1; "ft", "foot", "feet";
/// # }
/// # }
/// # }
/// # #[macro_use]
/// # mod mass {
/// # quantity! {
/// # /// Mass (base unit kilogram, kg).
/// # quantity: Mass; "mass";
/// # /// Mass dimension, kg.
/// # dimension: Q<Z0 /*length*/, P1 /*mass*/, Z0 /*time*/>;
/// # units {
/// # @kilogram: 1.0; "kg", "kilogram", "kilograms";
/// # }
/// # }
/// # }
/// # #[macro_use]
/// # mod time {
/// # quantity! {
/// # /// Time (base unit second, s).
/// # quantity: Time; "time";
/// # /// Time dimension, s.
/// # dimension: Q<Z0 /*length*/, Z0 /*mass*/, P1 /*time*/>;
/// # units {
/// # @second: 1.0; "s", "second", "seconds";
/// # }
/// # }
/// # }
/// system! {
/// /// System of quantities, Q.
/// quantities: Q {
/// length: meter, L;
/// mass: kilogram, M;
/// time: second, T;
/// }
/// /// System of units, U.
/// units: U {
/// mod length::Length,
/// mod mass::Mass,
/// mod time::Time,
/// }
/// }
/// # mod f32 {
/// # Q!(crate::mks, f32/*, (centimeter, gram, second)*/);
/// # }
/// # }
/// ```
#[macro_export]
macro_rules! system {
(
$(#[$quantities_attr:meta])* quantities: $quantities:ident {
$($(#[$name_attr:meta])* $name:ident: $unit:ident, $symbol:ident;)+
}
$(#[$units_attr:meta])* units: $units:ident {
$($module:ident::$quantity:ident,)+
}
) => {
$(#[macro_use]
pub mod $module;)+
system! {
$(#[$quantities_attr])*
quantities: $quantities {
$($(#[$name_attr])* $name: $unit, $symbol;)+
}
$(#[$units_attr])*
units: $units {
$(mod $module::$quantity,)+
}
}
};
(
$(#[$quantities_attr:meta])* quantities: $quantities:ident {
$($(#[$name_attr:meta])* $name:ident: $unit:ident, $symbol:ident;)+
}
$(#[$units_attr:meta])* units: $units:ident {
$(mod $module:ident::$quantity:ident,)+
}
) => {
/// Marker trait to express the dependence of a [quantity][quantity] on the
/// [base quantities][base] of a [system of quantities][quantities] as a product of powers
/// of factors corresponding to the base quantities, omitting any numerical factor.
///
/// * <https://jcgm.bipm.org/vim/en/1.7.html>
///
/// [quantity]: https://jcgm.bipm.org/vim/en/1.1.html
/// [base]: https://jcgm.bipm.org/vim/en/1.4.html
/// [quantities]: https://jcgm.bipm.org/vim/en/1.3.html
pub trait Dimension:
Send
+ Sync
+ Unpin
+ $crate::lib::panic::RefUnwindSafe
+ $crate::lib::panic::UnwindSafe
{
$($(#[$name_attr])*
///
/// Quantity dimension.
type $symbol: $crate::typenum::Integer;)+
/// [Kind](https://jcgm.bipm.org/vim/en/1.2.html) of the quantity. Quantities of the
/// same dimension but differing kinds are not comparable.
type Kind: ?Sized;
}
/// Marker trait to identify a [system of units][units] based on a set of [base units][base]
/// of a [system of quantities][quantities].
///
/// ## Generic Parameters
/// * `V`: Underlying storage type trait is implemented for.
///
/// [units]: https://jcgm.bipm.org/vim/en/1.13.html
/// [base]: https://jcgm.bipm.org/vim/en/1.10.html
/// [quantities]: https://jcgm.bipm.org/vim/en/1.3.html
pub trait Units<V>:
Send
+ Sync
+ Unpin
+ $crate::lib::panic::RefUnwindSafe
+ $crate::lib::panic::UnwindSafe
where
V: $crate::Conversion<V>,
{
$($(#[$name_attr])*
///
/// Base unit.
#[allow(non_camel_case_types)]
type $name: Unit + $crate::Conversion<V, T = V::T>;)+
}
/// Trait to identify [measurement units][measurement] of individual
/// [quantities][quantity].
///
/// [measurement]: https://jcgm.bipm.org/vim/en/1.9.html
/// [quantity]: https://jcgm.bipm.org/vim/en/1.1.html
pub trait Unit: Copy {
/// Unit abbreviation.
fn abbreviation() -> &'static str;
/// Unit singular description.
fn singular() -> &'static str;
/// Unit plural description.
fn plural() -> &'static str;
}
/// Property of a phenomenon, body or substance, where the property has a magnitude that
/// can be expressed as a number and a reference.
///
/// The preferred method of creating a `Quantity` instance is to use the `new` constructor
/// which is generic over the input unit and accepts the input value as it's only
/// parameter.
///
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::f32::*;
/// # use uom::si::length::meter;
/// // Create a length of 1 meter.
/// let l = Length::new::<meter>(1.0);
/// ```
///
/// `Quantity` fields are public to allow for the creation of `const` values and instances
/// of non-named `Quantity`s. This functionality will be deprecated and subsequently removed
/// once the [`const fn`](https://github.com/rust-lang/rust/issues/24111) feature is
/// stabilized.
///
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::{Quantity, ISQ, SI};
/// # use uom::si::f32::*;
/// # use uom::lib::marker::PhantomData;
/// # use uom::typenum::{P2, Z0};
/// // Create a `const` length of 1 meter.
/// const L: Length = Length { dimension: PhantomData, units: PhantomData, value: 1.0, };
/// // Create a `const` area of 1 square meter explicitly without using the `Area` alias.
/// const A: Quantity<ISQ<P2, Z0, Z0, Z0, Z0, Z0, Z0>, SI<f32>, f32> =
/// Quantity { dimension: PhantomData, units: PhantomData, value: 1.0, };
/// ```
///
/// Using units for the wrong quantity will cause a compile error:
///
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust,compile_fail")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::f32::*;
/// # use uom::si::time::second;
/// // error[E0277]: the trait bound `second: length::Unit` is not satisfied
/// let l = Length::new::<second>(1.0);
/// ```
///
/// Mixing quantities will also cause a compile error:
///
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust,compile_fail")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::f32::*;
/// # use uom::si::length::meter;
/// # use uom::si::time::second;
/// // error[E0308]: mismatched types
/// let r = Length::new::<meter>(1.0) + Time::new::<second>(1.0);
/// ```
///
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust,compile_fail")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::f32::*;
/// # use uom::si::length::meter;
/// # use uom::si::time::second;
/// // error[E0308]: mismatched types
/// let v: Velocity = Length::new::<meter>(1.0) * Time::new::<second>(1.0);
/// ```
///
/// * <https://jcgm.bipm.org/vim/en/1.1.html>
///
/// ## Generic Parameters
/// * `D`: Quantity dimension. See [`Dimension`](./trait.Dimension.html).
/// * `U`: Quantity base units. See [`Units`](./trait.Units.html).
/// * `V`: Quantity value underlying storage type.
#[repr(transparent)]
pub struct Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V>,
{
/// Quantity dimension. See [`Dimension`](./trait.Dimension.html).
pub dimension: $crate::lib::marker::PhantomData<D>,
/// Quantity base units. See [`Units`](./trait.Units.html).
pub units: $crate::lib::marker::PhantomData<U>,
/// Quantity value stored in the base units for the quantity.
pub value: V,
}
// Type alias for dimensions where all exponents of the factors are the given value.
type DN<N> = dyn Dimension<$($symbol = system!(@replace $symbol N),)+
Kind = dyn $crate::Kind>;
/// Type alias for [dimension one][one] for which all the exponents of the factors
/// corresponding to the [base quantities][base] are zero.
///
/// [one]: https://jcgm.bipm.org/vim/en/1.8.html
/// [base]: https://jcgm.bipm.org/vim/en/1.4.html
#[allow(dead_code)]
pub type DimensionOne = DN<$crate::typenum::Z0>;
$(#[$quantities_attr])*
pub type $quantities<$($symbol,)+ K = dyn $crate::Kind> =
dyn Dimension<$($symbol = $symbol,)+ Kind = K>;
$(#[$units_attr])*
///
/// ## Generic Parameters
/// * `V`: Underlying storage type.
#[allow(unused_qualifications)]
pub type $units<V> = dyn Units<V, $($name = $name::$unit),+>;
/// Convert a value from base units to the given unit.
///
/// ## Generic Parameters
/// * `D`: Dimension.
/// * `U`: Base units.
/// * `V`: Value underlying storage type.
/// * `N`: Unit.
#[inline(always)]
fn from_base<D, U, V, N>(v: &V) -> V
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::Conversion<V>,
N: $crate::Conversion<V, T = V::T>,
{
use $crate::typenum::Integer;
use $crate::{Conversion, ConversionFactor};
let v = v.conversion();
let n_coef = N::coefficient();
let f = V::coefficient() $(* U::$name::coefficient().powi(D::$symbol::to_i32()))+;
let n_cons = N::constant($crate::ConstantOp::Sub);
if n_coef < f {
(v * (f / n_coef) - n_cons).value()
}
else {
(v / (n_coef / f) - n_cons).value()
}
}
/// Convert a value from the given unit to base units.
///
/// ## Generic Parameters
/// * `D`: Dimension.
/// * `U`: Base units.
/// * `V`: Value underlying storage type.
/// * `N`: Unit.
#[inline(always)]
fn to_base<D, U, V, N>(v: &V) -> V
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::Conversion<V>,
N: $crate::Conversion<V, T = V::T>,
{
use $crate::typenum::Integer;
use $crate::{Conversion, ConversionFactor};
let v = v.conversion();
let n_coef = N::coefficient();
let f = V::coefficient() $(* U::$name::coefficient().powi(D::$symbol::to_i32()))+;
let n_cons = N::constant($crate::ConstantOp::Add);
if n_coef >= f {
((v + n_cons) * (n_coef / f)).value()
}
else {
(((v + n_cons) * n_coef) / f).value()
}
}
autoconvert_test! {
/// Convert a value from one set of base units to a second.
///
/// ## Generic Parameters
/// * `D`: Dimension.
/// * `Ul`: Base units for left quantity.
/// * `Ur`: Base units for right quantity.
/// * `V`: Value underlying storage type.
#[allow(dead_code)]
#[inline(always)]
fn change_base<D, Ul, Ur, V>(v: &V) -> V
where
D: Dimension + ?Sized,
Ul: Units<V> + ?Sized,
Ur: Units<V> + ?Sized,
V: $crate::Conversion<V> + $crate::lib::ops::Mul<V, Output = V>,
{
use $crate::typenum::Integer;
use $crate::{Conversion, ConversionFactor};
(v.conversion() $(* Ur::$name::coefficient().powi(D::$symbol::to_i32())
/ Ul::$name::coefficient().powi(D::$symbol::to_i32()))+)
.value()
}}
#[doc(hidden)]
macro_rules! impl_ops {
(
$AddSubTrait:ident, $addsub_fun:ident, $addsub_op:tt,
$AddSubAssignTrait:ident, $addsubassign_fun:ident, $addsubassign_op:tt,
$AddSubAlias:ident,
$MulDivTrait:ident, $muldiv_fun:ident, $muldiv_op:tt,
$MulDivAssignTrait:ident, $muldivassign_fun:ident, $muldivassign_op:tt,
$Mod:ident
) => {
autoconvert! {
impl<D, Ul, Ur, V> $crate::lib::ops::$AddSubTrait<Quantity<D, Ur, V>>
for Quantity<D, Ul, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::$AddSubTrait,
Ul: Units<V> + ?Sized,
Ur: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V>,
{
type Output = Quantity<D, Ul, V>;
#[inline(always)]
fn $addsub_fun(self, rhs: Quantity<D, Ur, V>) -> Self::Output {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value $addsub_op change_base::<D, Ul, Ur, V>(&rhs.value),
}
}
}}
not_autoconvert! {
impl<D, U, V> $crate::lib::ops::$AddSubTrait for Quantity<D, U, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::$AddSubTrait,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V>,
{
type Output = Self;
#[inline(always)]
fn $addsub_fun(self, rhs: Self) -> Self::Output {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value $addsub_op rhs.value,
}
}
}}
autoconvert! {
impl<D, Ul, Ur, V> $crate::lib::ops::$AddSubAssignTrait<Quantity<D, Ur, V>>
for Quantity<D, Ul, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::$AddSubAssignTrait,
Ul: Units<V> + ?Sized,
Ur: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V>
+ $crate::lib::ops::$AddSubAssignTrait<V>,
{
#[inline(always)]
fn $addsubassign_fun(&mut self, rhs: Quantity<D, Ur, V>) {
self.value $addsubassign_op change_base::<D, Ul, Ur, V>(&rhs.value);
}
}}
not_autoconvert! {
impl<D, U, V> $crate::lib::ops::$AddSubAssignTrait for Quantity<D, U, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::$AddSubAssignTrait,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V>
+ $crate::lib::ops::$AddSubAssignTrait<V>,
{
#[inline(always)]
fn $addsubassign_fun(&mut self, rhs: Self) {
self.value $addsubassign_op rhs.value;
}
}}
autoconvert! {
impl<Dl, Dr, Ul, Ur, V> $crate::lib::ops::$MulDivTrait<Quantity<Dr, Ur, V>>
for Quantity<Dl, Ul, V>
where
Dl: Dimension + ?Sized,
$(Dl::$symbol: $crate::lib::ops::$AddSubTrait<Dr::$symbol>,
<Dl::$symbol as $crate::lib::ops::$AddSubTrait<Dr::$symbol>>::Output: $crate::typenum::Integer,)+
Dl::Kind: $crate::marker::$MulDivTrait,
Dr: Dimension + ?Sized,
Dr::Kind: $crate::marker::$MulDivTrait,
Ul: Units<V> + ?Sized,
Ur: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::ops::$MulDivTrait<V>,
{
type Output = Quantity<
$quantities<$($crate::typenum::$AddSubAlias<Dl::$symbol, Dr::$symbol>,)+>,
Ul, V>;
#[inline(always)]
fn $muldiv_fun(self, rhs: Quantity<Dr, Ur, V>) -> Self::Output {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value $muldiv_op change_base::<Dr, Ul, Ur, V>(&rhs.value),
}
}
}}
not_autoconvert! {
impl<Dl, Dr, U, V> $crate::lib::ops::$MulDivTrait<Quantity<Dr, U, V>>
for Quantity<Dl, U, V>
where
Dl: Dimension + ?Sized,
$(Dl::$symbol: $crate::lib::ops::$AddSubTrait<Dr::$symbol>,
<Dl::$symbol as $crate::lib::ops::$AddSubTrait<Dr::$symbol>>::Output: $crate::typenum::Integer,)+
Dl::Kind: $crate::marker::$MulDivTrait,
Dr: Dimension + ?Sized,
Dr::Kind: $crate::marker::$MulDivTrait,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::ops::$MulDivTrait<V>,
{
type Output = Quantity<
$quantities<$($crate::typenum::$AddSubAlias<Dl::$symbol, Dr::$symbol>,)+>,
U, V>;
#[inline(always)]
fn $muldiv_fun(self, rhs: Quantity<Dr, U, V>) -> Self::Output {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value $muldiv_op rhs.value,
}
}
}}
impl<D, U, V> $crate::lib::ops::$MulDivTrait<V> for Quantity<D, U, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::$MulDivTrait,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V>,
{
type Output = Quantity<D, U, V>;
#[inline(always)]
fn $muldiv_fun(self, rhs: V) -> Self::Output {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value $muldiv_op rhs,
}
}
}
impl<D, U, V> $crate::lib::ops::$MulDivAssignTrait<V> for Quantity<D, U, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::$MulDivAssignTrait,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V>
+ $crate::lib::ops::$MulDivAssignTrait<V>,
{
#[inline(always)]
fn $muldivassign_fun(&mut self, rhs: V) {
self.value $muldivassign_op rhs;
}
}
#[doc(hidden)]
mod $Mod {
storage_types! {
use super::super::*;
impl<D, U> $crate::lib::ops::$MulDivTrait<Quantity<D, U, V>> for V
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::$MulDivTrait,
U: Units<V> + ?Sized,
$($crate::typenum::Z0: $crate::lib::ops::$AddSubTrait<D::$symbol>,
<$crate::typenum::Z0 as $crate::lib::ops::$AddSubTrait<D::$symbol>>::Output: $crate::typenum::Integer,)+
{
type Output = Quantity<
$quantities<
$($crate::typenum::$AddSubAlias<
$crate::typenum::Z0,
D::$symbol>,)+
D::Kind>,
U, V>;
#[inline(always)]
fn $muldiv_fun(self, rhs: Quantity<D, U, V>) -> Self::Output {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self $muldiv_op rhs.value,
}
}
}
}
}
};
}
impl_ops!(Add, add, +, AddAssign, add_assign, +=, Sum,
Mul, mul, *, MulAssign, mul_assign, *=, add_mul);
impl_ops!(Sub, sub, -, SubAssign, sub_assign, -=, Diff,
Div, div, /, DivAssign, div_assign, /=, sub_div);
impl<D, U, V> Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V>,
{
/// Returns the floating point category of the number. If only one property is
/// going to be tested, it is generally faster to use the specific predicate
/// instead.
#[inline(always)]
pub fn classify(self) -> $crate::lib::num::FpCategory
where
V: $crate::num::Float,
{
self.value.classify()
}
std! {
autoconvert! {
/// Calculates the length of the hypotenuse of a right-angle triangle given the legs.
#[inline(always)]
pub fn hypot<Ur>(self, other: Quantity<D, Ur, V>) -> Self
where
V: $crate::num::Float,
Ur: Units<V> + ?Sized,
{
Self {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value.hypot(change_base::<D, U, Ur, V>(&other.value)),
}
}}
not_autoconvert! {
/// Calculates the length of the hypotenuse of a right-angle triangle given the legs.
#[inline(always)]
pub fn hypot(self, other: Self) -> Self
where
V: $crate::num::Float,
{
Self {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value.hypot(other.value),
}
}}}
/// Computes the absolute value of `self`. Returns `NAN` if the quantity is
/// `NAN`.
#[inline(always)]
pub fn abs(self) -> Self
where
V: $crate::num::Signed,
{
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value.abs(),
}
}
/// Returns a quantity that represents the sign of `self`.
///
/// * `1.0` of the base unit if the number is positive, `+0.0`, or `INFINITY`.
/// * `-1.0` of the base unit if the number is negative, `-0.0`, or
/// `NEG_INFINITY`.
/// * `NAN` if the number is `NAN`.
#[inline(always)]
pub fn signum(self) -> Self
where
V: $crate::num::Signed,
{
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value.signum(),
}
}
/// Returns `true` if `self`'s sign bit is positive, including `+0.0` and
/// `INFINITY`.
#[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))]
#[inline(always)]
pub fn is_sign_positive(self) -> bool
where
V: $crate::num::Float,
{
self.value.is_sign_positive()
}
/// Returns `true` if `self`'s sign is negative, including `-0.0` and
/// `NEG_INFINITY`.
#[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))]
#[inline(always)]
pub fn is_sign_negative(self) -> bool
where
V: $crate::num::Float,
{
self.value.is_sign_negative()
}
/// Takes the reciprocal (inverse) of a number, `1/x`.
///
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::f32::*;
/// # use uom::si::time::second;
/// let f: Frequency = Time::new::<second>(1.0).recip();
/// ```
#[inline(always)]
pub fn recip(
self
) -> Quantity<$quantities<$($crate::typenum::Negate<D::$symbol>),+>, U, V>
where
$(D::$symbol: $crate::lib::ops::Neg,
<D::$symbol as $crate::lib::ops::Neg>::Output: $crate::typenum::Integer,)+
D::Kind: $crate::marker::Div,
V: $crate::num::Float,
{
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value.recip(),
}
}
/// Returns the maximum of the two quantities.
#[inline(always)]
pub fn max(self, other: Self) -> Self
where
V: $crate::num::Float,
{
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value.max(other.value),
}
}
/// Returns the minimum of the two quantities.
#[inline(always)]
pub fn min(self, other: Self) -> Self
where
V: $crate::num::Float,
{
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value.min(other.value),
}
}
}
// Explicitly definte floating point methods for float and complex storage types.
// `Complex<T>` doesn't implement `Float`/`FloatCore`, but it does implement these methods
// when the underlying type, `T`, implements `FloatCore`.
mod float {
storage_types! {
types: Float, Complex;
use super::super::*;
impl<D, U> Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
{
/// Returns `true` if this value is `NAN` and `false` otherwise.
#[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))]
#[inline(always)]
pub fn is_nan(self) -> bool
{
self.value.is_nan()
}
/// Returns `true` if this value is positive infinity or negative infinity and
/// `false` otherwise.
#[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))]
#[inline(always)]
pub fn is_infinite(self) -> bool
{
self.value.is_infinite()
}
/// Returns `true` if this number is neither infinite nor `NAN`.
#[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))]
#[inline(always)]
pub fn is_finite(self) -> bool
{
self.value.is_finite()
}
/// Returns `true` if the number is neither zero, infinite, subnormal, or `NAN`.
#[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))]
#[inline(always)]
pub fn is_normal(self) -> bool
{
self.value.is_normal()
}
std! {
/// Takes the cubic root of a number.
///
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::f32::*;
/// # use uom::si::volume::cubic_meter;
/// let l: Length = Volume::new::<cubic_meter>(8.0).cbrt();
/// ```
///
/// The input type must have dimensions divisible by three:
///
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust,compile_fail")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::f32::*;
/// # use uom::si::area::square_meter;
/// // error[E0271]: type mismatch resolving ...
/// let r = Area::new::<square_meter>(8.0).cbrt();
/// ```
#[inline(always)]
pub fn cbrt(
self
) -> Quantity<
$quantities<$($crate::typenum::PartialQuot<D::$symbol, $crate::typenum::P3>),+>,
U, V>
where
$(D::$symbol: $crate::lib::ops::PartialDiv<$crate::typenum::P3>,
<D::$symbol as $crate::lib::ops::PartialDiv<$crate::typenum::P3>>::Output: $crate::typenum::Integer,)+
D::Kind: $crate::marker::Div,
{
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value.cbrt(),
}
}
/// Fused multiply-add. Computes `(self * a) + b` with only one rounding error.
/// This produces a more accurate result with better performance than a separate
/// multiplication operation followed by an add.
///
/// ## Generic Parameters
/// * `Da`: Dimension for parameter `a`.
/// * `Ua`: Base units for parameter `a`.
/// * `Ub`: Base units for parameter `b`.
#[inline(always)]
pub fn mul_add<Da, Ua, Ub>(
self,
a: Quantity<Da, Ua, V>,
b: Quantity<$quantities<$($crate::typenum::Sum<D::$symbol, Da::$symbol>),+>, Ub, V>,
) -> Quantity<$quantities<$($crate::typenum::Sum<D::$symbol, Da::$symbol>),+>, U, V>
where
$(D::$symbol: $crate::lib::ops::Add<Da::$symbol>,
<D::$symbol as $crate::lib::ops::Add<Da::$symbol>>::Output: $crate::typenum::Integer,)+
D::Kind: $crate::marker::Mul,
Da: Dimension + ?Sized,
Da::Kind: $crate::marker::Mul,
Ua: Units<V> + ?Sized,
Ub: Units<V> + ?Sized,
{
#[allow(unused_imports)]
use $crate::num_traits::MulAdd;
// (self * a) + b
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value.mul_add(a.value, b.value),
}
}
/// Raises a quantity to an integer power.
///
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::f32::*;
/// # use uom::si::length::meter;
/// use uom::typenum::P2;
///
/// let a: Area = Length::new::<meter>(3.0).powi(P2::new());
/// ```
///
/// ## Generic Parameters
/// * `E`: `typenum::Integer` power.
#[inline(always)]
pub fn powi<E>(
self, _e: E
) -> Quantity<$quantities<$($crate::typenum::Prod<D::$symbol, E>),+>, U, V>
where
$(D::$symbol: $crate::lib::ops::Mul<E>,
<D::$symbol as $crate::lib::ops::Mul<E>>::Output: $crate::typenum::Integer,)+
D::Kind: $crate::marker::Mul,
E: $crate::typenum::Integer,
{
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value.powi(E::to_i32()),
}
}
/// Takes the square root of a number. Returns `NAN` if `self` is a negative
/// number.
///
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::f32::*;
/// # use uom::si::area::square_meter;
/// let l: Length = Area::new::<square_meter>(4.0).sqrt();
/// ```
///
/// The input type must have dimensions divisible by two:
///
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust,compile_fail")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::f32::*;
/// # use uom::si::length::meter;
/// // error[E0271]: type mismatch resolving ...
/// let r = Length::new::<meter>(4.0).sqrt();
/// ```
#[inline(always)]
pub fn sqrt(
self
) -> Quantity<
$quantities<$($crate::typenum::PartialQuot<D::$symbol, $crate::typenum::P2>),+>,
U, V>
where
$(D::$symbol: $crate::typenum::PartialDiv<$crate::typenum::P2>,
<D::$symbol as $crate::typenum::PartialDiv<$crate::typenum::P2>>::Output: $crate::typenum::Integer,)+
D::Kind: $crate::marker::Div,
{
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value.sqrt(),
}
}}
}
}
}
impl<D, U, V> $crate::lib::clone::Clone for Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::clone::Clone,
{
#[inline(always)]
fn clone(&self) -> Self {
match *self {
Quantity { ref value, .. } => {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: $crate::lib::clone::Clone::clone(&(*value)),
}
}
}
}
}
impl<D, U, V> $crate::lib::marker::Copy for Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::marker::Copy,
{
}
#[allow(non_camel_case_types)]
impl<D, U, V> $crate::lib::fmt::Debug for Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::fmt::Debug,
{
fn fmt<'a>(&self, f: &mut $crate::lib::fmt::Formatter<'a>) -> $crate::lib::fmt::Result {
self.value.fmt(f)
$(.and_then(|_| {
let d = <D::$symbol as $crate::typenum::Integer>::to_i32();
if 0 != d {
write!(f, " {}^{}", U::$name::abbreviation(), d)
}
else {
Ok(())
}
}))+
}
}
impl<D, U, V> $crate::lib::default::Default for Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::default::Default,
{
fn default() -> Self {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: V::default(),
}
}
}
impl<D, U, V> $crate::lib::cmp::Eq for Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::cmp::Eq,
{
}
impl<D, U, V> $crate::lib::hash::Hash for Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::hash::Hash,
{
fn hash<H: $crate::lib::hash::Hasher>(&self, state: &mut H) {
self.value.hash(state);
}
}
impl<D, U, V> $crate::lib::ops::Neg for Quantity<D, U, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::Neg,
U: Units<V> + ?Sized,
V: $crate::num::Signed + $crate::Conversion<V>,
{
type Output = Quantity<D, U, V>;
#[inline(always)]
fn neg(self) -> Self::Output {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: -self.value,
}
}
}
impl<D, U, V> $crate::lib::cmp::Ord for Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::cmp::Ord,
{
#[inline(always)]
fn cmp(&self, other: &Self) -> $crate::lib::cmp::Ordering {
self.value.cmp(&other.value)
}
#[inline(always)]
fn max(self, other: Self) -> Self {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value.max(other.value),
}
}
#[inline(always)]
fn min(self, other: Self) -> Self {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value.min(other.value),
}
}
}
autoconvert! {
impl<D, Ul, Ur, V> $crate::lib::cmp::PartialEq<Quantity<D, Ur, V>> for Quantity<D, Ul, V>
where
D: Dimension + ?Sized,
Ul: Units<V> + ?Sized,
Ur: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V>,
{
#[inline(always)]
fn eq(&self, other: &Quantity<D, Ur, V>) -> bool {
self.value == change_base::<D, Ul, Ur, V>(&other.value)
}
}}
not_autoconvert! {
impl<D, U, V> $crate::lib::cmp::PartialEq for Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V>,
{
#[inline(always)]
fn eq(&self, other: &Self) -> bool {
self.value == other.value
}
}}
autoconvert! {
impl<D, Ul, Ur, V> $crate::lib::cmp::PartialOrd<Quantity<D, Ur, V>> for Quantity<D, Ul, V>
where
D: Dimension + ?Sized,
Ul: Units<V> + ?Sized,
Ur: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::cmp::PartialOrd,
{
#[inline(always)]
fn partial_cmp(
&self, other: &Quantity<D, Ur, V>
) -> Option<$crate::lib::cmp::Ordering>
{
self.value.partial_cmp(&change_base::<D, Ul, Ur, V>(&other.value))
}
#[inline(always)]
fn lt(&self, other: &Quantity<D, Ur, V>) -> bool {
self.value.lt(&change_base::<D, Ul, Ur, V>(&other.value))
}
#[inline(always)]
fn le(&self, other: &Quantity<D, Ur, V>) -> bool {
self.value.le(&change_base::<D, Ul, Ur, V>(&other.value))
}
#[inline(always)]
fn gt(&self, other: &Quantity<D, Ur, V>) -> bool {
self.value.gt(&change_base::<D, Ul, Ur, V>(&other.value))
}
#[inline(always)]
fn ge(&self, other: &Quantity<D, Ur, V>) -> bool {
self.value.ge(&change_base::<D, Ul, Ur, V>(&other.value))
}
}}
not_autoconvert! {
impl<D, U, V> $crate::lib::cmp::PartialOrd for Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::cmp::PartialOrd,
{
#[inline(always)]
fn partial_cmp(&self, other: &Self) -> Option<$crate::lib::cmp::Ordering> {
self.value.partial_cmp(&other.value)
}
#[inline(always)]
fn lt(&self, other: &Self) -> bool {
self.value.lt(&other.value)
}
#[inline(always)]
fn le(&self, other: &Self) -> bool {
self.value.le(&other.value)
}
#[inline(always)]
fn gt(&self, other: &Self) -> bool {
self.value.gt(&other.value)
}
#[inline(always)]
fn ge(&self, other: &Self) -> bool {
self.value.ge(&other.value)
}
}}
autoconvert! {
impl<D, Ul, Ur, V> $crate::lib::ops::Rem<Quantity<D, Ur, V>> for Quantity<D, Ul, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::Rem,
Ul: Units<V> + ?Sized,
Ur: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V>,
{
type Output = Quantity<D, Ul, V>;
#[inline(always)]
fn rem(self, rhs: Quantity<D, Ur, V>) -> Self::Output {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value % change_base::<D, Ul, Ur, V>(&rhs.value)
}
}
}}
not_autoconvert! {
impl<D, U, V> $crate::lib::ops::Rem for Quantity<D, U, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::Rem,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V>,
{
type Output = Self;
#[inline(always)]
fn rem(self, rhs: Self) -> Self::Output {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: self.value % rhs.value
}
}
}}
autoconvert! {
impl<D, Ul, Ur, V> $crate::lib::ops::RemAssign<Quantity<D, Ur, V>> for Quantity<D, Ul, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::RemAssign,
Ul: Units<V> + ?Sized,
Ur: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::ops::RemAssign,
{
#[inline(always)]
fn rem_assign(&mut self, rhs: Quantity<D, Ur, V>) {
self.value %= change_base::<D, Ul, Ur, V>(&rhs.value)
}
}}
not_autoconvert! {
impl<D, U, V> $crate::lib::ops::RemAssign for Quantity<D, U, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::RemAssign,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::ops::RemAssign,
{
#[inline(always)]
fn rem_assign(&mut self, rhs: Self) {
self.value %= rhs.value
}
}}
impl<D, U, V> $crate::num::Saturating for Quantity<D, U, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::Saturating,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::num::Saturating,
{
fn saturating_add(self, v: Self) -> Self {
Quantity { value: self.value.saturating_add(v.value), ..self }
}
fn saturating_sub(self, v: Self) -> Self {
Quantity { value: self.value.saturating_sub(v.value), ..self }
}
}
impl<D, U, V> $crate::lib::iter::Sum for Quantity<D, U, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::Add,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::iter::Sum,
{
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = Self>,
{
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: iter.map(|v| { v.value }).sum(),
}
}
}
test! {
impl<D, U, V> $crate::tests::Test for Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::tests::Test,
{
fn assert_eq(lhs: &Self, rhs: &Self) {
$crate::tests::Test::assert_eq(&lhs.value, &rhs.value);
}
fn assert_approx_eq(lhs: &Self, rhs: &Self) {
$crate::tests::Test::assert_approx_eq(&lhs.value, &rhs.value);
}
fn eq(lhs: &Self, rhs: &Self) -> bool {
$crate::tests::Test::eq(&lhs.value, &rhs.value)
}
fn approx_eq(lhs: &Self, rhs: &Self) -> bool {
$crate::tests::Test::approx_eq(&lhs.value, &rhs.value)
}
}}
impl<D, U, V> $crate::num::Zero for Quantity<D, U, V>
where
D: Dimension + ?Sized,
D::Kind: $crate::marker::Add,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V>,
{
fn zero() -> Self {
Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: V::zero(),
}
}
fn is_zero(&self) -> bool {
self.value.is_zero()
}
}
impl<D, U, V> $crate::ConstZero for Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::ConstZero,
{
const ZERO: Self = Self {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value: V::ZERO,
};
}
serde! {
impl<D, U, V> $crate::serde::Serialize for Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: $crate::serde::Serializer
{
self.value.serialize(serializer)
}
}
impl<'de, D, U, V> $crate::serde::Deserialize<'de> for Quantity<D, U, V>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::serde::Deserialize<'de>,
{
fn deserialize<De>(deserializer: De) -> Result<Self, De::Error>
where
De: $crate::serde::Deserializer<'de>,
{
let value: V = $crate::serde::Deserialize::deserialize(deserializer)?;
Ok(Quantity {
dimension: $crate::lib::marker::PhantomData,
units: $crate::lib::marker::PhantomData,
value,
})
}
}}
/// Utilities for formatting and printing quantities.
pub mod fmt {
use $crate::lib::fmt;
use super::{Dimension, Quantity, Unit, Units, from_base};
use $crate::num::Num;
use $crate::Conversion;
use $crate::fmt::DisplayStyle;
/// A struct to specify a display style and unit.
///
/// # Usage
/// ## Indirect style
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::f32::*;
/// # use uom::si::length::{centimeter, meter};
/// # use uom::si::fmt::Arguments;
/// # use uom::fmt::DisplayStyle::*;
/// let l = Length::new::<meter>(1.0);
/// let a = Length::format_args(centimeter, Description);
///
/// assert_eq!("100 centimeters", format!("{}", a.with(l)));
/// ```
///
/// ## Direct style
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::f32::*;
/// # use uom::si::length::{centimeter, meter};
/// # use uom::si::fmt::Arguments;
/// # use uom::fmt::DisplayStyle::*;
/// let l = Length::new::<meter>(1.0);
/// let a = l.into_format_args(centimeter, Description);
///
/// assert_eq!("100 centimeters", format!("{}", a));
/// ```
///
/// ## Generic Parameters
/// * `D`: Dimension.
/// * `N`: Unit.
#[allow(missing_debug_implementations)] // Prevent accidental direct use.
pub struct Arguments<D, N>
where
D: Dimension + ?Sized,
N: Unit,
{
pub(super) dimension: $crate::lib::marker::PhantomData<D>,
pub(super) unit: N,
pub(super) style: DisplayStyle,
}
/// A struct to specify a display style and unit for a given quantity.
///
#[cfg_attr(all(feature = "si", feature = "f32"), doc = " ```rust")]
#[cfg_attr(not(all(feature = "si", feature = "f32")), doc = " ```rust,ignore")]
/// # use uom::si::f32::*;
/// # use uom::si::length::{centimeter, meter};
/// # use uom::si::fmt::Arguments;
/// # use uom::fmt::DisplayStyle::*;
/// let l = Length::new::<meter>(1.0);
/// let a = l.into_format_args(centimeter, Description);
///
/// assert_eq!("100 centimeters", format!("{}", a));
/// ```
///
/// ## Generic Parameters
/// * `D`: Dimension.
/// * `U`: Base units.
/// * `V`: Value underlying storage type.
/// * `N`: Unit.
pub struct QuantityArguments<D, U, V, N>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: Num + Conversion<V>,
N: Unit,
{
pub(super) arguments: Arguments<D, N>,
pub(super) quantity: Quantity<D, U, V>,
}
impl<D, N> $crate::lib::clone::Clone for Arguments<D, N>
where
D: Dimension + ?Sized,
N: Unit,
{
fn clone(&self) -> Self {
Self {
dimension: $crate::lib::marker::PhantomData,
unit: self.unit.clone(),
style: self.style.clone(),
}
}
}
impl<D, N> $crate::lib::marker::Copy for Arguments<D, N>
where
D: Dimension + ?Sized,
N: Unit,
{
}
impl<D, U, V, N> $crate::lib::clone::Clone for QuantityArguments<D, U, V, N>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::clone::Clone,
N: Unit,
{
fn clone(&self) -> Self {
Self {
arguments: self.arguments.clone(),
quantity: self.quantity.clone(),
}
}
}
impl<D, U, V, N> $crate::lib::marker::Copy for QuantityArguments<D, U, V, N>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: $crate::num::Num + $crate::Conversion<V> + $crate::lib::marker::Copy,
N: Unit,
{
}
macro_rules! format_arguments {
($style:ident) => {
impl<D, U, V, N> fmt::$style for QuantityArguments<D, U, V, N>
where
D: Dimension + ?Sized,
U: Units<V> + ?Sized,
V: Num + Conversion<V> + fmt::$style,
N: Unit + Conversion<V, T = V::T>,
{
fn fmt<'a>(&self, f: &mut fmt::Formatter<'a>) -> fmt::Result {
let value = from_base::<D, U, V, N>(&self.quantity.value);
value.fmt(f)?;
write!(f, " {}",
match self.arguments.style {
DisplayStyle::Abbreviation => N::abbreviation(),
DisplayStyle::Description => {
if value.is_one() { N::singular() } else { N::plural() }
},
})
}
}
};
}
format_arguments!(Binary);
format_arguments!(Debug);
format_arguments!(Display);
format_arguments!(LowerExp);
format_arguments!(LowerHex);
format_arguments!(Octal);
format_arguments!(UpperExp);
format_arguments!(UpperHex);
}
/// Macro to implement [`quantity`](si/struct.Quantity.html) type aliases for a specific
/// [system of units][units] and value storage type.
///
/// * `$system`: Path to the module where the [`system!`](macro.system.html) macro was run
/// (e.g. `uom::si`).
/// * `$V`: Underlying value storage type (e.g. `f32`).
/// * `$U`: Optional. Base units. Pass as a tuple with the desired units: `(meter, kilogram,
/// second, ampere, kelvin, mole, candela)`. The system's base units will be used if no
/// value is provided. Note that a unit with a non-zero constant factor is not currently
/// supported as a base unit.
///
/// An example invocation is given below for a meter-kilogram-second system setup in the
/// module `mks` with a system of quantities name `Q`. The `#[macro_use]` attribute must be
/// used when including the `uom` crate to make macros for predefined systems available.
/// The optional units parameter to change the base units is included commented out.
///
/// ```
/// #[macro_use]
/// extern crate uom;
///
/// # fn main() { }
/// # mod mks {
/// # #[macro_use]
/// # mod length {
/// # quantity! {
/// # /// Length (base unit meter, m).
/// # quantity: Length; "length";
/// # /// Length dimension, m.
/// # dimension: Q<P1 /*length*/, Z0 /*mass*/, Z0 /*time*/>;
/// # units {
/// # @meter: 1.0E0; "m", "meter", "meters";
/// # @foot: 3.048E-1; "ft", "foot", "feet";
/// # }
/// # }
/// # }
/// # #[macro_use]
/// # mod mass {
/// # quantity! {
/// # /// Mass (base unit kilogram, kg).
/// # quantity: Mass; "mass";
/// # /// Mass dimension, kg.
/// # dimension: Q<Z0 /*length*/, P1 /*mass*/, Z0 /*time*/>;
/// # units {
/// # @kilogram: 1.0; "kg", "kilogram", "kilograms";
/// # }
/// # }
/// # }
/// # #[macro_use]
/// # mod time {
/// # quantity! {
/// # /// Time (base unit second, s).
/// # quantity: Time; "time";
/// # /// Time dimension, s.
/// # dimension: Q<Z0 /*length*/, Z0 /*mass*/, P1 /*time*/>;
/// # units {
/// # @second: 1.0; "s", "second", "seconds";
/// # }
/// # }
/// # }
/// # system! {
/// # /// System of quantities, Q.
/// # quantities: Q {
/// # length: meter, L;
/// # mass: kilogram, M;
/// # time: second, T;
/// # }
/// # /// System of units, U.
/// # units: U {
/// # mod length::Length,
/// # mod mass::Mass,
/// # mod time::Time,
/// # }
/// # }
/// mod f32 {
/// Q!(crate::mks, f32/*, (centimeter, gram, second)*/);
/// }
/// # }
/// ```
///
/// [units]: https://jcgm.bipm.org/vim/en/1.13.html
#[macro_export]
macro_rules! $quantities {
($path:path) => {
use $path as __system;
$(/// [`Quantity`](struct.Quantity.html) type alias using the default base units
/// parameterized on the underlying storage type.
///
/// ## Generic Parameters
/// * `V`: Underlying storage type.
#[allow(dead_code)]
#[allow(unused_qualifications)]
pub type $quantity<V> = __system::$module::$quantity<__system::$units<V>, V>;)+
};
($path:path, $V:ty) => {
use $path as __system;
$(/// [`Quantity`](struct.Quantity.html) type alias using the default base units.
#[allow(dead_code)]
#[allow(unused_qualifications)]
pub type $quantity = __system::$module::$quantity<__system::$units<$V>, $V>;)+
};
($path:path, $V:ty, $U:tt) => {
system!(@quantities $path, $V; $($name),+; $U; $($module::$quantity),+);
};
}
};
(
@quantities $path:path,
$V:ty;
$($name:ident),+;
($($U:ident),+);
$($module:ident::$quantity:ident),+
) => {
use $path as __system;
type Units = dyn __system::Units<$V, $($name = __system::$name::$U,)+>;
$(/// [`Quantity`](struct.Quantity.html) type alias using the given base units.
#[allow(dead_code)]
#[allow(unused_qualifications)]
pub type $quantity = __system::$module::$quantity<Units, $V>;)+
};
(@replace $_t:tt $sub:ty) => { $sub };
}
| 40.353977 | 132 | 0.427682 |
613664a08ef7aa684de8d4043e5cd8836f2c8ff3
| 1,321 |
use std::env;
use futures::executor::block_on;
use userapi::apis::configuration;
use userapi::apis::configuration::ApiKey;
use userapi::apis::profile_api::{get_user_profile};
#[tokio::main]
async fn main() {
// Get the token from the command line.
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
panic!("Needs a token")
}
// Create the api key struct for the token
let api_key = ApiKey{
prefix: None,
key: String::from(&args[1]),
};
// Create configuration and add the API key struct to it.
let mut config = configuration::Configuration::new();
config.api_key = Some(api_key);
// Finally -- get the profile
let profile_result = get_user_profile(&config);
// ..and print the result
match block_on(profile_result) {
Ok(resp) => {
println!("Profile");
println!("=======");
println!("User ID: {}", resp.user_id.unwrap_or_default());
println!("Name: {}", resp.name.unwrap_or_default());
println!("Email: {}", resp.email.unwrap_or_default());
println!("Avatar: {}", resp.avatar_url.unwrap_or_default());
},
Err(error) => {
println!("error: {:?}", error);
panic!("Query failed!")
}
}
}
| 30.022727 | 73 | 0.572294 |
4bbd65bc86b11d77f0a745b8e4ad0155998f1140
| 41 |
net.sf.jasperreports.summary.SummaryTest
| 20.5 | 40 | 0.878049 |
095899e30608e35650d2c23522fe755d9a9acb59
| 9,794 |
use crate::{
compilation::{context::CompilationContext, JSONSchema},
error::{error, no_error, ErrorIterator, ValidationError},
keywords::{helpers, CompilationResult},
validator::Validate,
};
use serde_json::{Map, Number, Value};
use std::f64::EPSILON;
use crate::paths::{InstancePath, JSONPointer};
struct ConstArrayValidator {
value: Vec<Value>,
schema_path: JSONPointer,
}
impl ConstArrayValidator {
#[inline]
pub(crate) fn compile(value: &[Value], schema_path: JSONPointer) -> CompilationResult {
Ok(Box::new(ConstArrayValidator {
value: value.to_vec(),
schema_path,
}))
}
}
impl Validate for ConstArrayValidator {
#[inline]
fn validate<'a, 'b>(
&self,
schema: &'a JSONSchema,
instance: &'b Value,
instance_path: &InstancePath,
) -> ErrorIterator<'b> {
if self.is_valid(schema, instance) {
no_error()
} else {
error(ValidationError::constant_array(
self.schema_path.clone(),
instance_path.into(),
instance,
&self.value,
))
}
}
#[inline]
fn is_valid(&self, _: &JSONSchema, instance: &Value) -> bool {
if let Value::Array(instance_value) = instance {
helpers::equal_arrays(&self.value, instance_value)
} else {
false
}
}
}
impl core::fmt::Display for ConstArrayValidator {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"const: [{}]",
self.value
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join(", ")
)
}
}
struct ConstBooleanValidator {
value: bool,
schema_path: JSONPointer,
}
impl ConstBooleanValidator {
#[inline]
pub(crate) fn compile<'a>(value: bool, schema_path: JSONPointer) -> CompilationResult<'a> {
Ok(Box::new(ConstBooleanValidator { value, schema_path }))
}
}
impl Validate for ConstBooleanValidator {
#[inline]
fn validate<'a, 'b>(
&self,
schema: &'a JSONSchema,
instance: &'b Value,
instance_path: &InstancePath,
) -> ErrorIterator<'b> {
if self.is_valid(schema, instance) {
no_error()
} else {
error(ValidationError::constant_boolean(
self.schema_path.clone(),
instance_path.into(),
instance,
self.value,
))
}
}
#[inline]
fn is_valid(&self, _: &JSONSchema, instance: &Value) -> bool {
if let Value::Bool(instance_value) = instance {
&self.value == instance_value
} else {
false
}
}
}
impl core::fmt::Display for ConstBooleanValidator {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "const: {}", self.value)
}
}
struct ConstNullValidator {
schema_path: JSONPointer,
}
impl ConstNullValidator {
#[inline]
pub(crate) fn compile<'a>(schema_path: JSONPointer) -> CompilationResult<'a> {
Ok(Box::new(ConstNullValidator { schema_path }))
}
}
impl Validate for ConstNullValidator {
#[inline]
fn validate<'a, 'b>(
&self,
schema: &'a JSONSchema,
instance: &'b Value,
instance_path: &InstancePath,
) -> ErrorIterator<'b> {
if self.is_valid(schema, instance) {
no_error()
} else {
error(ValidationError::constant_null(
self.schema_path.clone(),
instance_path.into(),
instance,
))
}
}
#[inline]
fn is_valid(&self, _: &JSONSchema, instance: &Value) -> bool {
instance.is_null()
}
}
impl core::fmt::Display for ConstNullValidator {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "const: {}", Value::Null)
}
}
struct ConstNumberValidator {
// This is saved in order to ensure that the error message is not altered by precision loss
original_value: Number,
value: f64,
schema_path: JSONPointer,
}
impl ConstNumberValidator {
#[inline]
pub(crate) fn compile(original_value: &Number, schema_path: JSONPointer) -> CompilationResult {
Ok(Box::new(ConstNumberValidator {
original_value: original_value.clone(),
value: original_value
.as_f64()
.expect("A JSON number will always be representable as f64"),
schema_path,
}))
}
}
impl Validate for ConstNumberValidator {
fn validate<'a, 'b>(
&self,
schema: &'a JSONSchema,
instance: &'b Value,
instance_path: &InstancePath,
) -> ErrorIterator<'b> {
if self.is_valid(schema, instance) {
no_error()
} else {
error(ValidationError::constant_number(
self.schema_path.clone(),
instance_path.into(),
instance,
&self.original_value,
))
}
}
fn is_valid(&self, _: &JSONSchema, instance: &Value) -> bool {
if let Value::Number(item) = instance {
(self.value - item.as_f64().expect("Always representable as f64")).abs() < EPSILON
} else {
false
}
}
}
impl core::fmt::Display for ConstNumberValidator {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "const: {}", self.original_value)
}
}
pub(crate) struct ConstObjectValidator {
value: Map<String, Value>,
schema_path: JSONPointer,
}
impl ConstObjectValidator {
#[inline]
pub(crate) fn compile(
value: &Map<String, Value>,
schema_path: JSONPointer,
) -> CompilationResult {
Ok(Box::new(ConstObjectValidator {
value: value.clone(),
schema_path,
}))
}
}
impl Validate for ConstObjectValidator {
fn validate<'a, 'b>(
&self,
schema: &'a JSONSchema,
instance: &'b Value,
instance_path: &InstancePath,
) -> ErrorIterator<'b> {
if self.is_valid(schema, instance) {
no_error()
} else {
error(ValidationError::constant_object(
self.schema_path.clone(),
instance_path.into(),
instance,
&self.value,
))
}
}
fn is_valid(&self, _: &JSONSchema, instance: &Value) -> bool {
if let Value::Object(item) = instance {
helpers::equal_objects(&self.value, item)
} else {
false
}
}
}
impl core::fmt::Display for ConstObjectValidator {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"const: {{{}}}",
self.value
.iter()
.map(|(key, value)| format!(r#""{}":{}"#, key, value))
.collect::<Vec<_>>()
.join(", ")
)
}
}
pub(crate) struct ConstStringValidator {
value: String,
schema_path: JSONPointer,
}
impl ConstStringValidator {
#[inline]
pub(crate) fn compile(value: &str, schema_path: JSONPointer) -> CompilationResult {
Ok(Box::new(ConstStringValidator {
value: value.to_string(),
schema_path,
}))
}
}
impl Validate for ConstStringValidator {
fn validate<'a, 'b>(
&self,
schema: &'a JSONSchema,
instance: &'b Value,
instance_path: &InstancePath,
) -> ErrorIterator<'b> {
if self.is_valid(schema, instance) {
no_error()
} else {
error(ValidationError::constant_string(
self.schema_path.clone(),
instance_path.into(),
instance,
&self.value,
))
}
}
fn is_valid(&self, _: &JSONSchema, instance: &Value) -> bool {
if let Value::String(item) = instance {
&self.value == item
} else {
false
}
}
}
impl core::fmt::Display for ConstStringValidator {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "const: {}", self.value)
}
}
#[inline]
pub(crate) fn compile<'a>(
_: &'a Map<String, Value>,
schema: &'a Value,
context: &CompilationContext,
) -> Option<CompilationResult<'a>> {
let schema_path = context.as_pointer_with("const");
match schema {
Value::Array(items) => Some(ConstArrayValidator::compile(items, schema_path)),
Value::Bool(item) => Some(ConstBooleanValidator::compile(*item, schema_path)),
Value::Null => Some(ConstNullValidator::compile(schema_path)),
Value::Number(item) => Some(ConstNumberValidator::compile(item, schema_path)),
Value::Object(map) => Some(ConstObjectValidator::compile(map, schema_path)),
Value::String(string) => Some(ConstStringValidator::compile(string, schema_path)),
}
}
#[cfg(test)]
mod tests {
use crate::tests_util;
use serde_json::{json, Value};
use test_case::test_case;
#[test_case(&json!({"const": 1}), &json!(2), "/const")]
#[test_case(&json!({"const": null}), &json!(3), "/const")]
#[test_case(&json!({"const": false}), &json!(4), "/const")]
#[test_case(&json!({"const": []}), &json!(5), "/const")]
#[test_case(&json!({"const": {}}), &json!(6), "/const")]
#[test_case(&json!({"const": ""}), &json!(7), "/const")]
fn schema_path(schema: &Value, instance: &Value, expected: &str) {
tests_util::assert_schema_path(schema, instance, expected)
}
}
| 28.143678 | 99 | 0.550541 |
d6c2e2168cc9a5efc2799ddb45b99b7a575a2426
| 3,543 |
use crate::util::{print_part_1, print_part_2};
use std::fs::read_to_string;
use std::time::Instant;
fn sum_unmarked(numbers: &[usize], board: &[[usize; 5]; 5]) -> usize {
let mut sum = 0;
board.iter().for_each(|line| {
line.iter().for_each(|num| {
if !numbers.contains(num) {
sum += num
}
})
});
sum
}
fn board_wins(numbers: &[usize], board: &[[usize; 5]; 5]) -> bool {
if board
.iter()
.any(|line| line.iter().all(|num| numbers.contains(num)))
{
return true;
}
for col in 0..board[0].len() {
if board.iter().all(|line| numbers.contains(&line[col])) {
return true;
}
}
false
}
fn play_bingo(numbers: &[usize], boards: &[[[usize; 5]; 5]], find_first: bool) -> usize {
let mut filtered_boards = vec![];
for draw_index in 0..numbers.len() {
let drawn_numers = &numbers[..draw_index + 1];
for (board_index, board) in boards.iter().enumerate() {
if filtered_boards.contains(&board_index) {
continue;
}
if board_wins(drawn_numers, board) {
if find_first || filtered_boards.len() == boards.len() - 1 {
return sum_unmarked(drawn_numers, board) * numbers[draw_index];
} else {
filtered_boards.push(board_index)
}
}
}
}
unreachable!();
}
fn parse_input(input: &str) -> (Vec<usize>, Vec<[[usize; 5]; 5]>) {
let mut input_lines = input.lines();
let numbers: Vec<usize> = input_lines
.next()
.expect("Cannot parse input")
.split(",")
.map(|num| num.parse().expect("Cannot parse input"))
.collect();
let mut boards: Vec<[[usize; 5]; 5]> = vec![];
let mut board_row_index = 0;
loop {
let line = input_lines.next();
if !line.is_some() {
break;
}
let line = line.unwrap();
if line.is_empty() {
boards.push([[0; 5]; 5]);
board_row_index = 0;
continue;
}
line.split_whitespace()
.map(|num| num.parse().expect("Cannot parse input"))
.enumerate()
.for_each(|(i, num)| boards.last_mut().unwrap()[board_row_index][i] = num);
board_row_index += 1;
}
(numbers, boards)
}
pub fn main() {
let (numbers, boards) =
parse_input(&read_to_string("inputs/day4.txt").expect("Input not found.."));
// PART 1
let start = Instant::now();
let known_answer = "51034";
let part_1: usize = play_bingo(&numbers, &boards, true);
let duration = start.elapsed();
print_part_1(&part_1.to_string(), &known_answer, duration);
// PART 2
let start = Instant::now();
let known_answer = "5434";
let part_2: usize = play_bingo(&numbers, &boards, false);
let duration = start.elapsed();
print_part_2(&part_2.to_string(), &known_answer, duration);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_example_1() {
let (numbers, boards) =
parse_input(&read_to_string("inputs/day4_test.txt").expect("Input not found.."));
assert_eq!(play_bingo(&numbers, &boards, true), 4512);
}
#[test]
fn test_example_2() {
let (numbers, boards) =
parse_input(&read_to_string("inputs/day4_test.txt").expect("Input not found.."));
assert_eq!(play_bingo(&numbers, &boards, false), 1924);
}
}
| 27.465116 | 93 | 0.542478 |
f5ca7e1c3d322eb43ca561625f96cddfdce0f5ea
| 35,341 |
//! Traits, utilities, and a macro for easy database connection pooling.
//!
//! # Overview
//!
//! This module provides traits, utilities, and a procedural macro that allows
//! you to easily connect your Rocket application to databases through
//! connection pools. A _database connection pool_ is a data structure that
//! maintains active database connections for later use in the application.
//! This implementation of connection pooling support is based on
//! [`r2d2`] and exposes connections through [request guards]. Databases are
//! individually configured through Rocket's regular configuration mechanisms: a
//! `Rocket.toml` file, environment variables, or procedurally.
//!
//! Connecting your Rocket application to a database using this library occurs
//! in three simple steps:
//!
//! 1. Configure your databases in `Rocket.toml`.
//! (see [Configuration](#configuration))
//! 2. Associate a request guard type and fairing with each database.
//! (see [Guard Types](#guard-types))
//! 3. Use the request guard to retrieve a connection in a handler.
//! (see [Handlers](#handlers))
//!
//! For a list of supported databases, see [Provided Databases](#provided). This
//! support can be easily extended by implementing the [`Poolable`] trait. See
//! [Extending](#extending) for more.
//!
//! ## Example
//!
//! Before using this library, the feature corresponding to your database type
//! in `rocket_contrib` must be enabled:
//!
//! ```toml
//! [dependencies.rocket_contrib]
//! version = "0.5.0-dev"
//! default-features = false
//! features = ["diesel_sqlite_pool"]
//! ```
//!
//! See [Provided](#provided) for a list of supported database and their
//! associated feature name.
//!
//! In `Rocket.toml` or the equivalent via environment variables:
//!
//! ```toml
//! [global.databases]
//! sqlite_logs = { url = "/path/to/database.sqlite" }
//! ```
//!
//! In your application's source code, one-time:
//!
//! ```rust
//! #[macro_use] extern crate rocket;
//! #[macro_use] extern crate rocket_contrib;
//!
//! # #[cfg(feature = "diesel_sqlite_pool")]
//! # mod test {
//! use rocket_contrib::databases::diesel;
//!
//! #[database("sqlite_logs")]
//! struct LogsDbConn(diesel::SqliteConnection);
//!
//! #[rocket::launch]
//! fn rocket() -> rocket::Rocket {
//! rocket::ignite().attach(LogsDbConn::fairing())
//! }
//! # } fn main() {}
//! ```
//!
//! Whenever a connection to the database is needed:
//!
//! ```rust
//! # #[macro_use] extern crate rocket;
//! # #[macro_use] extern crate rocket_contrib;
//! #
//! # #[cfg(feature = "diesel_sqlite_pool")]
//! # mod test {
//! # use rocket_contrib::databases::diesel;
//! #
//! # #[database("sqlite_logs")]
//! # struct LogsDbConn(diesel::SqliteConnection);
//! #
//! # type Logs = ();
//! # type Result<T> = std::result::Result<T, ()>;
//! #
//! #[get("/logs/<id>")]
//! fn get_logs(conn: LogsDbConn, id: usize) -> Result<Logs> {
//! # /*
//! Logs::by_id(&*conn, id)
//! # */
//! # Ok(())
//! }
//! # } fn main() {}
//! ```
//!
//! # Usage
//!
//! ## Configuration
//!
//! Databases can be configured via various mechanisms: `Rocket.toml`,
//! procedurally via `rocket::custom()`, or via environment variables.
//!
//! ### `Rocket.toml`
//!
//! To configure a database via `Rocket.toml`, add a table for each database
//! to the `databases` table where the key is a name of your choice. The table
//! should have a `url` key and, optionally, a `pool_size` key. This looks as
//! follows:
//!
//! ```toml
//! # Option 1:
//! [global.databases]
//! sqlite_db = { url = "db.sqlite" }
//!
//! # Option 2:
//! [global.databases.my_db]
//! url = "mysql://root:root@localhost/my_db"
//!
//! # With a `pool_size` key:
//! [global.databases]
//! sqlite_db = { url = "db.sqlite", pool_size = 20 }
//! ```
//!
//! The table _requires_ one key:
//!
//! * `url` - the URl to the database
//!
//! Additionally, all configurations accept the following _optional_ keys:
//!
//! * `pool_size` - the size of the pool, i.e., the number of connections to
//! pool (defaults to the configured number of workers)
//!
//! Additional options may be required or supported by other adapters.
//!
//! ### Procedurally
//!
//! Databases can also be configured procedurally via `rocket::custom()`.
//! The example below does just this:
//!
//! ```rust
//! extern crate rocket;
//!
//! # #[cfg(feature = "diesel_sqlite_pool")]
//! # mod test {
//! use std::collections::HashMap;
//! use rocket::config::{Config, Environment, Value};
//!
//! #[rocket::launch]
//! fn rocket() -> rocket::Rocket {
//! let mut database_config = HashMap::new();
//! let mut databases = HashMap::new();
//!
//! // This is the same as the following TOML:
//! // my_db = { url = "database.sqlite" }
//! database_config.insert("url", Value::from("database.sqlite"));
//! databases.insert("my_db", Value::from(database_config));
//!
//! let config = Config::build(Environment::Development)
//! .extra("databases", databases)
//! .finalize()
//! .unwrap();
//!
//! rocket::custom(config)
//! }
//! # } fn main() {}
//! ```
//!
//! ### Environment Variables
//!
//! Lastly, databases can be configured via environment variables by specifying
//! the `databases` table as detailed in the [Environment Variables
//! configuration
//! guide](https://rocket.rs/v0.5/guide/configuration/#environment-variables):
//!
//! ```bash
//! ROCKET_DATABASES='{my_db={url="db.sqlite"}}'
//! ```
//!
//! Multiple databases can be specified in the `ROCKET_DATABASES` environment variable
//! as well by comma separating them:
//!
//! ```bash
//! ROCKET_DATABASES='{my_db={url="db.sqlite"},my_pg_db={url="postgres://root:root@localhost/my_pg_db"}}'
//! ```
//!
//! ## Guard Types
//!
//! Once a database has been configured, the `#[database]` attribute can be used
//! to tie a type in your application to a configured database. The database
//! attributes accepts a single string parameter that indicates the name of the
//! database. This corresponds to the database name set as the database's
//! configuration key.
//!
//! The attribute can only be applied to unit-like structs with one type. The
//! internal type of the structure must implement [`Poolable`].
//!
//! ```rust
//! # extern crate rocket;
//! # #[macro_use] extern crate rocket_contrib;
//! # #[cfg(feature = "diesel_sqlite_pool")]
//! # mod test {
//! use rocket_contrib::databases::diesel;
//!
//! #[database("my_db")]
//! struct MyDatabase(diesel::SqliteConnection);
//! # }
//! ```
//!
//! Other databases can be used by specifying their respective [`Poolable`]
//! type:
//!
//! ```rust
//! # extern crate rocket;
//! # #[macro_use] extern crate rocket_contrib;
//! # #[cfg(feature = "postgres_pool")]
//! # mod test {
//! use rocket_contrib::databases::postgres;
//!
//! #[database("my_pg_db")]
//! struct MyPgDatabase(postgres::Client);
//! # }
//! ```
//!
//! The macro generates a [`FromRequest`] implementation for the decorated type,
//! allowing the type to be used as a request guard. This implementation
//! retrieves a connection from the database pool or fails with a
//! `Status::ServiceUnavailable` if no connections are available. The macro also
//! generates an implementation of the [`Deref`](std::ops::Deref) trait with
//! the internal `Poolable` type as the target.
//!
//! The macro will also generate two inherent methods on the decorated type:
//!
//! * `fn fairing() -> impl Fairing`
//!
//! Returns a fairing that initializes the associated database connection
//! pool.
//!
//! * `fn get_one(&Cargo) -> Option<Self>`
//!
//! Retrieves a connection from the configured pool. Returns `Some` as long
//! as `Self::fairing()` has been attached and there is at least one
//! connection in the pool.
//!
//! The fairing returned from the generated `fairing()` method _must_ be
//! attached for the request guard implementation to succeed. Putting the pieces
//! together, a use of the `#[database]` attribute looks as follows:
//!
//! ```rust
//! # extern crate rocket;
//! # #[macro_use] extern crate rocket_contrib;
//! #
//! # #[cfg(feature = "diesel_sqlite_pool")]
//! # mod test {
//! # use std::collections::HashMap;
//! # use rocket::config::{Config, Environment, Value};
//! #
//! use rocket_contrib::databases::diesel;
//!
//! #[database("my_db")]
//! struct MyDatabase(diesel::SqliteConnection);
//!
//! #[rocket::launch]
//! fn rocket() -> rocket::Rocket {
//! # let mut db_config = HashMap::new();
//! # let mut databases = HashMap::new();
//! #
//! # db_config.insert("url", Value::from("database.sqlite"));
//! # db_config.insert("pool_size", Value::from(10));
//! # databases.insert("my_db", Value::from(db_config));
//! #
//! # let config = Config::build(Environment::Development)
//! # .extra("databases", databases)
//! # .finalize()
//! # .unwrap();
//! #
//! rocket::custom(config).attach(MyDatabase::fairing())
//! }
//! # } fn main() {}
//! ```
//!
//! ## Handlers
//!
//! Finally, simply use your type as a request guard in a handler to retrieve a
//! connection to a given database:
//!
//! ```rust
//! # #[macro_use] extern crate rocket;
//! # #[macro_use] extern crate rocket_contrib;
//! #
//! # #[cfg(feature = "diesel_sqlite_pool")]
//! # mod test {
//! # use rocket_contrib::databases::diesel;
//! #[database("my_db")]
//! struct MyDatabase(diesel::SqliteConnection);
//!
//! #[get("/")]
//! fn my_handler(conn: MyDatabase) {
//! // ...
//! }
//! # }
//! ```
//!
//! The generated `Deref` implementation allows easy access to the inner
//! connection type:
//!
//! ```rust
//! # #[macro_use] extern crate rocket;
//! # #[macro_use] extern crate rocket_contrib;
//! #
//! # #[cfg(feature = "diesel_sqlite_pool")]
//! # mod test {
//! # use rocket_contrib::databases::diesel;
//! # type Data = ();
//! #[database("my_db")]
//! struct MyDatabase(diesel::SqliteConnection);
//!
//! fn load_from_db(conn: &diesel::SqliteConnection) -> Data {
//! // Do something with connection, return some data.
//! # ()
//! }
//!
//! #[get("/")]
//! fn my_handler(conn: MyDatabase) -> Data {
//! load_from_db(&*conn)
//! }
//! # }
//! ```
//!
//! # Database Support
//!
//! Built-in support is provided for many popular databases and drivers. Support
//! can be easily extended by [`Poolable`] implementations.
//!
//! ## Provided
//!
//! The list below includes all presently supported database adapters and their
//! corresponding [`Poolable`] type.
//!
// Note: Keep this table in sync with site/guite/6-state.md
//! | Kind | Driver | Version | `Poolable` Type | Feature |
//! |----------|-----------------------|-----------|--------------------------------|------------------------|
//! | MySQL | [Diesel] | `1` | [`diesel::MysqlConnection`] | `diesel_mysql_pool` |
//! | MySQL | [`rust-mysql-simple`] | `18` | [`mysql::Conn`] | `mysql_pool` |
//! | Postgres | [Diesel] | `1` | [`diesel::PgConnection`] | `diesel_postgres_pool` |
//! | Postgres | [Rust-Postgres] | `0.17` | [`postgres::Client`] | `postgres_pool` |
//! | Sqlite | [Diesel] | `1` | [`diesel::SqliteConnection`] | `diesel_sqlite_pool` |
//! | Sqlite | [`Rusqlite`] | `0.23` | [`rusqlite::Connection`] | `sqlite_pool` |
//! | Memcache | [`memcache`] | `0.14` | [`memcache::Client`] | `memcache_pool` |
//!
//! [Diesel]: https://diesel.rs
//! [`rusqlite::Connection`]: https://docs.rs/rusqlite/0.23.0/rusqlite/struct.Connection.html
//! [`diesel::SqliteConnection`]: http://docs.diesel.rs/diesel/prelude/struct.SqliteConnection.html
//! [`postgres::Client`]: https://docs.rs/postgres/0.17/postgres/struct.Client.html
//! [`diesel::PgConnection`]: http://docs.diesel.rs/diesel/pg/struct.PgConnection.html
//! [`mysql::Conn`]: https://docs.rs/mysql/18/mysql/struct.Conn.html
//! [`diesel::MysqlConnection`]: http://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
//! [`Rusqlite`]: https://github.com/jgallagher/rusqlite
//! [Rust-Postgres]: https://github.com/sfackler/rust-postgres
//! [`rust-mysql-simple`]: https://github.com/blackbeam/rust-mysql-simple
//! [`diesel::PgConnection`]: http://docs.diesel.rs/diesel/pg/struct.PgConnection.html
//! [`memcache`]: https://github.com/aisk/rust-memcache
//! [`memcache::Client`]: https://docs.rs/memcache/0.14/memcache/struct.Client.html
//!
//! The above table lists all the supported database adapters in this library.
//! In order to use particular `Poolable` type that's included in this library,
//! you must first enable the feature listed in the "Feature" column. The
//! interior type of your decorated database type should match the type in the
//! "`Poolable` Type" column.
//!
//! ## Extending
//!
//! Extending Rocket's support to your own custom database adapter (or other
//! database-like struct that can be pooled by `r2d2`) is as easy as
//! implementing the [`Poolable`] trait. See the documentation for [`Poolable`]
//! for more details on how to implement it.
//!
//! [`FromRequest`]: rocket::request::FromRequest
//! [request guards]: rocket::request::FromRequest
//! [`Poolable`]: crate::databases::Poolable
pub extern crate r2d2;
#[doc(hidden)]
pub use tokio::task::spawn_blocking;
#[cfg(any(feature = "diesel_sqlite_pool",
feature = "diesel_postgres_pool",
feature = "diesel_mysql_pool"))]
pub extern crate diesel;
use std::fmt::{self, Display, Formatter};
use std::marker::{Send, Sized};
use rocket::config::{self, Value};
use self::r2d2::ManageConnection;
#[doc(hidden)] pub use rocket_contrib_codegen::*;
#[cfg(feature = "postgres_pool")] pub extern crate postgres;
#[cfg(feature = "postgres_pool")] pub extern crate r2d2_postgres;
#[cfg(feature = "mysql_pool")] pub extern crate mysql;
#[cfg(feature = "mysql_pool")] pub extern crate r2d2_mysql;
#[cfg(feature = "sqlite_pool")] pub extern crate rusqlite;
#[cfg(feature = "sqlite_pool")] pub extern crate r2d2_sqlite;
#[cfg(feature = "memcache_pool")] pub extern crate memcache;
#[cfg(feature = "memcache_pool")] pub extern crate r2d2_memcache;
/// A structure representing a particular database configuration.
///
/// For the following configuration:
///
/// ```toml
/// [global.databases.my_database]
/// url = "postgres://root:root@localhost/my_database"
/// pool_size = 10
/// certs = "sample_cert.pem"
/// key = "key.pem"
/// ```
///
/// The following structure would be generated after calling
/// [`database_config`]`("my_database", &config)`:
///
/// ```rust,ignore
/// DatabaseConfig {
/// url: "dummy_db.sqlite",
/// pool_size: 10,
/// extras: {
/// "certs": String("certs.pem"),
/// "key": String("key.pem"),
/// },
/// }
/// ```
#[derive(Debug, Clone, PartialEq)]
pub struct DatabaseConfig<'a> {
/// The connection URL specified in the Rocket configuration.
pub url: &'a str,
/// The size of the pool to be initialized. Defaults to the number of
/// Rocket workers.
pub pool_size: u32,
/// Any extra options that are included in the configuration, **excluding**
/// the url and pool_size.
pub extras: rocket::config::Map<String, Value>,
}
/// A wrapper around `r2d2::Error`s or a custom database error type.
///
/// This type is only relevant to implementors of the [`Poolable`] trait. See
/// the [`Poolable`] documentation for more information on how to use this type.
#[derive(Debug)]
pub enum DbError<T> {
/// The custom error type to wrap alongside `r2d2::Error`.
Custom(T),
/// The error returned by an r2d2 pool.
PoolError(r2d2::Error),
}
/// Error returned on invalid database configurations.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ConfigError {
/// The `databases` configuration key is missing or is empty.
MissingTable,
/// The requested database configuration key is missing from the active
/// configuration.
MissingKey,
/// The configuration associated with the key isn't a
/// [`Table`](rocket::config::Table).
MalformedConfiguration,
/// The required `url` key is missing.
MissingUrl,
/// The value for `url` isn't a string.
MalformedUrl,
/// The `pool_size` exceeds `u32::max_value()` or is negative.
InvalidPoolSize(i64),
}
/// Retrieves the database configuration for the database named `name`.
///
/// This function is primarily used by the code generated by the `#[database]`
/// attribute.
///
/// # Example
///
/// Consider the following configuration:
///
/// ```toml
/// [global.databases]
/// my_db = { url = "db/db.sqlite", pool_size = 25 }
/// my_other_db = { url = "mysql://root:root@localhost/database" }
/// ```
///
/// The following example uses `database_config` to retrieve the configurations
/// for the `my_db` and `my_other_db` databases:
///
/// ```rust
/// # extern crate rocket;
/// # extern crate rocket_contrib;
/// #
/// # use std::{collections::BTreeMap, mem::drop};
/// # use rocket::{fairing::AdHoc, config::{Config, Environment, Value}};
/// use rocket_contrib::databases::{database_config, ConfigError};
///
/// # let mut databases = BTreeMap::new();
/// #
/// # let mut my_db = BTreeMap::new();
/// # my_db.insert("url".to_string(), Value::from("db/db.sqlite"));
/// # my_db.insert("pool_size".to_string(), Value::from(25));
/// #
/// # let mut my_other_db = BTreeMap::new();
/// # my_other_db.insert("url".to_string(),
/// # Value::from("mysql://root:root@localhost/database"));
/// #
/// # databases.insert("my_db".to_string(), Value::from(my_db));
/// # databases.insert("my_other_db".to_string(), Value::from(my_other_db));
/// #
/// # let config = Config::build(Environment::Development)
/// # .extra("databases", databases)
/// # .expect("custom config okay");
/// #
/// # rocket::custom(config).attach(AdHoc::on_attach("Testing", |mut rocket| async {
/// # {
/// let rocket_config = rocket.config().await;
/// let config = database_config("my_db", rocket_config).unwrap();
/// assert_eq!(config.url, "db/db.sqlite");
/// assert_eq!(config.pool_size, 25);
///
/// let other_config = database_config("my_other_db", rocket_config).unwrap();
/// assert_eq!(other_config.url, "mysql://root:root@localhost/database");
///
/// let error = database_config("invalid_db", rocket_config).unwrap_err();
/// assert_eq!(error, ConfigError::MissingKey);
/// # }
/// #
/// # Ok(rocket)
/// # }));
/// ```
pub fn database_config<'a>(
name: &str,
from: &'a config::Config
) -> Result<DatabaseConfig<'a>, ConfigError> {
// Find the first `databases` config that's a table with a key of 'name'
// equal to `name`.
let connection_config = from.get_table("databases")
.map_err(|_| ConfigError::MissingTable)?
.get(name)
.ok_or(ConfigError::MissingKey)?
.as_table()
.ok_or(ConfigError::MalformedConfiguration)?;
let maybe_url = connection_config.get("url")
.ok_or(ConfigError::MissingUrl)?;
let url = maybe_url.as_str().ok_or(ConfigError::MalformedUrl)?;
let pool_size = connection_config.get("pool_size")
.and_then(Value::as_integer)
.unwrap_or(from.workers as i64);
if pool_size < 1 || pool_size > u32::max_value() as i64 {
return Err(ConfigError::InvalidPoolSize(pool_size));
}
let mut extras = connection_config.clone();
extras.remove("url");
extras.remove("pool_size");
Ok(DatabaseConfig { url, pool_size: pool_size as u32, extras: extras })
}
impl<'a> Display for ConfigError {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
ConfigError::MissingTable => {
write!(f, "A table named `databases` was not found for this configuration")
},
ConfigError::MissingKey => {
write!(f, "An entry in the `databases` table was not found for this key")
},
ConfigError::MalformedConfiguration => {
write!(f, "The configuration for this database is malformed")
}
ConfigError::MissingUrl => {
write!(f, "The connection URL is missing for this database")
},
ConfigError::MalformedUrl => {
write!(f, "The specified connection URL is malformed")
},
ConfigError::InvalidPoolSize(invalid_size) => {
write!(f, "'{}' is not a valid value for `pool_size`", invalid_size)
},
}
}
}
/// Trait implemented by `r2d2`-based database adapters.
///
/// # Provided Implementations
///
/// Implementations of `Poolable` are provided for the following types:
///
/// * `diesel::MysqlConnection`
/// * `diesel::PgConnection`
/// * `diesel::SqliteConnection`
/// * `postgres::Connection`
/// * `mysql::Conn`
/// * `rusqlite::Connection`
///
/// # Implementation Guide
///
/// As a r2d2-compatible database (or other resource) adapter provider,
/// implementing `Poolable` in your own library will enable Rocket users to
/// consume your adapter with its built-in connection pooling support.
///
/// ## Example
///
/// Consider a library `foo` with the following types:
///
/// * `foo::ConnectionManager`, which implements [`r2d2::ManageConnection`]
/// * `foo::Connection`, the `Connection` associated type of
/// `foo::ConnectionManager`
/// * `foo::Error`, errors resulting from manager instantiation
///
/// In order for Rocket to generate the required code to automatically provision
/// a r2d2 connection pool into application state, the `Poolable` trait needs to
/// be implemented for the connection type. The following example implements
/// `Poolable` for `foo::Connection`:
///
/// ```rust
/// use rocket_contrib::databases::{r2d2, DbError, DatabaseConfig, Poolable};
/// # mod foo {
/// # use std::fmt;
/// # use rocket_contrib::databases::r2d2;
/// # #[derive(Debug)] pub struct Error;
/// # impl std::error::Error for Error { }
/// # impl fmt::Display for Error {
/// # fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Ok(()) }
/// # }
/// #
/// # pub struct Connection;
/// # pub struct ConnectionManager;
/// #
/// # type Result<T> = std::result::Result<T, Error>;
/// #
/// # impl ConnectionManager {
/// # pub fn new(url: &str) -> Result<Self> { Err(Error) }
/// # }
/// #
/// # impl self::r2d2::ManageConnection for ConnectionManager {
/// # type Connection = Connection;
/// # type Error = Error;
/// # fn connect(&self) -> Result<Connection> { panic!(()) }
/// # fn is_valid(&self, _: &mut Connection) -> Result<()> { panic!() }
/// # fn has_broken(&self, _: &mut Connection) -> bool { panic!() }
/// # }
/// # }
/// #
/// impl Poolable for foo::Connection {
/// type Manager = foo::ConnectionManager;
/// type Error = DbError<foo::Error>;
///
/// fn pool(config: DatabaseConfig) -> Result<r2d2::Pool<Self::Manager>, Self::Error> {
/// let manager = foo::ConnectionManager::new(config.url)
/// .map_err(DbError::Custom)?;
///
/// r2d2::Pool::builder()
/// .max_size(config.pool_size)
/// .build(manager)
/// .map_err(DbError::PoolError)
/// }
/// }
/// ```
///
/// In this example, `ConnectionManager::new()` method returns a `foo::Error` on
/// failure. For convenience, the [`DbError`] enum is used to consolidate this
/// error type and the `r2d2::Error` type that can result from
/// `r2d2::Pool::builder()`.
///
/// In the event that a connection manager isn't fallible (as is the case with
/// Diesel's r2d2 connection manager, for instance), the associated error type
/// for the `Poolable` implementation can simply be `r2d2::Error` as this is the
/// only error that can be result. For more concrete example, consult Rocket's
/// existing implementations of [`Poolable`].
pub trait Poolable: Send + Sized + 'static {
/// The associated connection manager for the given connection type.
type Manager: ManageConnection<Connection=Self>;
/// The associated error type in the event that constructing the connection
/// manager and/or the connection pool fails.
type Error;
/// Creates an `r2d2` connection pool for `Manager::Connection`, returning
/// the pool on success.
fn pool(config: DatabaseConfig<'_>) -> Result<r2d2::Pool<Self::Manager>, Self::Error>;
}
#[cfg(feature = "diesel_sqlite_pool")]
impl Poolable for diesel::SqliteConnection {
type Manager = diesel::r2d2::ConnectionManager<diesel::SqliteConnection>;
type Error = r2d2::Error;
fn pool(config: DatabaseConfig<'_>) -> Result<r2d2::Pool<Self::Manager>, Self::Error> {
let manager = diesel::r2d2::ConnectionManager::new(config.url);
r2d2::Pool::builder().max_size(config.pool_size).build(manager)
}
}
#[cfg(feature = "diesel_postgres_pool")]
impl Poolable for diesel::PgConnection {
type Manager = diesel::r2d2::ConnectionManager<diesel::PgConnection>;
type Error = r2d2::Error;
fn pool(config: DatabaseConfig<'_>) -> Result<r2d2::Pool<Self::Manager>, Self::Error> {
let manager = diesel::r2d2::ConnectionManager::new(config.url);
r2d2::Pool::builder().max_size(config.pool_size).build(manager)
}
}
#[cfg(feature = "diesel_mysql_pool")]
impl Poolable for diesel::MysqlConnection {
type Manager = diesel::r2d2::ConnectionManager<diesel::MysqlConnection>;
type Error = r2d2::Error;
fn pool(config: DatabaseConfig<'_>) -> Result<r2d2::Pool<Self::Manager>, Self::Error> {
let manager = diesel::r2d2::ConnectionManager::new(config.url);
r2d2::Pool::builder().max_size(config.pool_size).build(manager)
}
}
// TODO: Come up with a way to handle TLS
#[cfg(feature = "postgres_pool")]
impl Poolable for postgres::Client {
type Manager = r2d2_postgres::PostgresConnectionManager<postgres::tls::NoTls>;
type Error = DbError<postgres::Error>;
fn pool(config: DatabaseConfig<'_>) -> Result<r2d2::Pool<Self::Manager>, Self::Error> {
let manager = r2d2_postgres::PostgresConnectionManager::new(
config.url.parse().map_err(DbError::Custom)?,
postgres::tls::NoTls,
);
r2d2::Pool::builder().max_size(config.pool_size).build(manager)
.map_err(DbError::PoolError)
}
}
#[cfg(feature = "mysql_pool")]
impl Poolable for mysql::Conn {
type Manager = r2d2_mysql::MysqlConnectionManager;
type Error = r2d2::Error;
fn pool(config: DatabaseConfig<'_>) -> Result<r2d2::Pool<Self::Manager>, Self::Error> {
let opts = mysql::OptsBuilder::from_opts(config.url);
let manager = r2d2_mysql::MysqlConnectionManager::new(opts);
r2d2::Pool::builder().max_size(config.pool_size).build(manager)
}
}
#[cfg(feature = "sqlite_pool")]
impl Poolable for rusqlite::Connection {
type Manager = r2d2_sqlite::SqliteConnectionManager;
type Error = r2d2::Error;
fn pool(config: DatabaseConfig<'_>) -> Result<r2d2::Pool<Self::Manager>, Self::Error> {
let manager = r2d2_sqlite::SqliteConnectionManager::file(config.url);
r2d2::Pool::builder().max_size(config.pool_size).build(manager)
}
}
#[cfg(feature = "memcache_pool")]
impl Poolable for memcache::Client {
type Manager = r2d2_memcache::MemcacheConnectionManager;
type Error = DbError<memcache::MemcacheError>;
fn pool(config: DatabaseConfig<'_>) -> Result<r2d2::Pool<Self::Manager>, Self::Error> {
let manager = r2d2_memcache::MemcacheConnectionManager::new(config.url);
r2d2::Pool::builder().max_size(config.pool_size).build(manager).map_err(DbError::PoolError)
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use rocket::{Config, config::{Environment, Value}};
use super::{ConfigError::*, database_config};
#[test]
fn no_database_entry_in_config_returns_error() {
let config = Config::build(Environment::Development)
.finalize()
.unwrap();
let database_config_result = database_config("dummy_db", &config);
assert_eq!(Err(MissingTable), database_config_result);
}
#[test]
fn no_matching_connection_returns_error() {
// Laboriously setup the config extras
let mut database_extra = BTreeMap::new();
let mut connection_config = BTreeMap::new();
connection_config.insert("url".to_string(), Value::from("dummy_db.sqlite"));
connection_config.insert("pool_size".to_string(), Value::from(10));
database_extra.insert("dummy_db".to_string(), Value::from(connection_config));
let config = Config::build(Environment::Development)
.extra("databases", database_extra)
.finalize()
.unwrap();
let database_config_result = database_config("real_db", &config);
assert_eq!(Err(MissingKey), database_config_result);
}
#[test]
fn incorrectly_structured_config_returns_error() {
let mut database_extra = BTreeMap::new();
let connection_config = vec!["url", "dummy_db.slqite"];
database_extra.insert("dummy_db".to_string(), Value::from(connection_config));
let config = Config::build(Environment::Development)
.extra("databases", database_extra)
.finalize()
.unwrap();
let database_config_result = database_config("dummy_db", &config);
assert_eq!(Err(MalformedConfiguration), database_config_result);
}
#[test]
fn missing_connection_string_returns_error() {
let mut database_extra = BTreeMap::new();
let connection_config: BTreeMap<String, Value> = BTreeMap::new();
database_extra.insert("dummy_db", connection_config);
let config = Config::build(Environment::Development)
.extra("databases", database_extra)
.finalize()
.unwrap();
let database_config_result = database_config("dummy_db", &config);
assert_eq!(Err(MissingUrl), database_config_result);
}
#[test]
fn invalid_connection_string_returns_error() {
let mut database_extra = BTreeMap::new();
let mut connection_config = BTreeMap::new();
connection_config.insert("url".to_string(), Value::from(42));
database_extra.insert("dummy_db", connection_config);
let config = Config::build(Environment::Development)
.extra("databases", database_extra)
.finalize()
.unwrap();
let database_config_result = database_config("dummy_db", &config);
assert_eq!(Err(MalformedUrl), database_config_result);
}
#[test]
fn negative_pool_size_returns_error() {
let mut database_extra = BTreeMap::new();
let mut connection_config = BTreeMap::new();
connection_config.insert("url".to_string(), Value::from("dummy_db.sqlite"));
connection_config.insert("pool_size".to_string(), Value::from(-1));
database_extra.insert("dummy_db", connection_config);
let config = Config::build(Environment::Development)
.extra("databases", database_extra)
.finalize()
.unwrap();
let database_config_result = database_config("dummy_db", &config);
assert_eq!(Err(InvalidPoolSize(-1)), database_config_result);
}
#[test]
fn pool_size_beyond_u32_max_returns_error() {
let mut database_extra = BTreeMap::new();
let mut connection_config = BTreeMap::new();
let over_max = (u32::max_value()) as i64 + 1;
connection_config.insert("url".to_string(), Value::from("dummy_db.sqlite"));
connection_config.insert("pool_size".to_string(), Value::from(over_max));
database_extra.insert("dummy_db", connection_config);
let config = Config::build(Environment::Development)
.extra("databases", database_extra)
.finalize()
.unwrap();
let database_config_result = database_config("dummy_db", &config);
// The size of `0` is an overflow wrap-around
assert_eq!(Err(InvalidPoolSize(over_max)), database_config_result);
}
#[test]
fn happy_path_database_config() {
let url = "dummy_db.sqlite";
let pool_size = 10;
let mut database_extra = BTreeMap::new();
let mut connection_config = BTreeMap::new();
connection_config.insert("url".to_string(), Value::from(url));
connection_config.insert("pool_size".to_string(), Value::from(pool_size));
database_extra.insert("dummy_db", connection_config);
let config = Config::build(Environment::Development)
.extra("databases", database_extra)
.finalize()
.unwrap();
let database_config = database_config("dummy_db", &config).unwrap();
assert_eq!(url, database_config.url);
assert_eq!(pool_size, database_config.pool_size);
assert_eq!(0, database_config.extras.len());
}
#[test]
fn extras_do_not_contain_required_keys() {
let url = "dummy_db.sqlite";
let pool_size = 10;
let mut database_extra = BTreeMap::new();
let mut connection_config = BTreeMap::new();
connection_config.insert("url".to_string(), Value::from(url));
connection_config.insert("pool_size".to_string(), Value::from(pool_size));
database_extra.insert("dummy_db", connection_config);
let config = Config::build(Environment::Development)
.extra("databases", database_extra)
.finalize()
.unwrap();
let database_config = database_config("dummy_db", &config).unwrap();
assert_eq!(url, database_config.url);
assert_eq!(pool_size, database_config.pool_size);
assert_eq!(false, database_config.extras.contains_key("url"));
assert_eq!(false, database_config.extras.contains_key("pool_size"));
}
#[test]
fn extra_values_are_placed_in_extras_map() {
let url = "dummy_db.sqlite";
let pool_size = 10;
let tls_cert = "certs.pem";
let tls_key = "key.pem";
let mut database_extra = BTreeMap::new();
let mut connection_config = BTreeMap::new();
connection_config.insert("url".to_string(), Value::from(url));
connection_config.insert("pool_size".to_string(), Value::from(pool_size));
connection_config.insert("certs".to_string(), Value::from(tls_cert));
connection_config.insert("key".to_string(), Value::from(tls_key));
database_extra.insert("dummy_db", connection_config);
let config = Config::build(Environment::Development)
.extra("databases", database_extra)
.finalize()
.unwrap();
let database_config = database_config("dummy_db", &config).unwrap();
assert_eq!(url, database_config.url);
assert_eq!(pool_size, database_config.pool_size);
assert_eq!(true, database_config.extras.contains_key("certs"));
assert_eq!(true, database_config.extras.contains_key("key"));
println!("{:#?}", database_config);
}
}
| 35.988798 | 110 | 0.636824 |
5d27ae0bea4f0c923e277e31cfd4ade8c34a6dfe
| 2,837 |
use super::{BuilderChain, DrunkardsWalkBuilder, XStart, YStart, AreaStartingPosition,
CullUnreachable, VoronoiSpawning, MetaMapBuilder, BuilderMap, TileType, DistantExit};
use rltk::RandomNumberGenerator;
use crate::map;
pub fn limestone_cavern_builder(new_depth: i32, _rng: &mut rltk::RandomNumberGenerator, width: i32, height: i32) -> BuilderChain {
let mut chain = BuilderChain::new(new_depth, width, height, "Limestone Caverns");
chain.start_with(DrunkardsWalkBuilder::winding_passages());
chain.with(AreaStartingPosition::new(XStart::CENTER, YStart::CENTER));
chain.with(CullUnreachable::new());
chain.with(AreaStartingPosition::new(XStart::LEFT, YStart::CENTER));
chain.with(VoronoiSpawning::new());
chain.with(DistantExit::new());
chain.with(CaveDecorator::new());
chain
}
pub struct CaveDecorator {}
impl MetaMapBuilder for CaveDecorator {
fn build_map(&mut self, rng: &mut rltk::RandomNumberGenerator, build_data : &mut BuilderMap) {
self.build(rng, build_data);
}
}
impl CaveDecorator {
#[allow(dead_code)]
pub fn new() -> Box<CaveDecorator> {
Box::new(CaveDecorator{})
}
fn build(&mut self, rng : &mut RandomNumberGenerator, build_data : &mut BuilderMap) {
let old_map = build_data.map.clone();
for (idx,tt) in build_data.map.tiles.iter_mut().enumerate() {
// Gravel Spawning
if *tt == TileType::Floor && rng.roll_dice(1, 6)==1 {
*tt = TileType::Gravel;
} else if *tt == TileType::Floor && rng.roll_dice(1, 10)==1 {
// Spawn passable pools
*tt = TileType::ShallowWater;
} else if *tt == TileType::Wall {
// Spawn deep pools and stalactites
let mut neighbors = 0;
let x = idx as i32 % old_map.width;
let y = idx as i32 / old_map.width;
if x > 0 && old_map.tiles[idx-1] == TileType::Wall { neighbors += 1; }
if x < old_map.width - 2 && old_map.tiles[idx+1] == TileType::Wall { neighbors += 1; }
if y > 0 && old_map.tiles[idx-old_map.width as usize] == TileType::Wall { neighbors += 1; }
if y < old_map.height - 2 && old_map.tiles[idx+old_map.width as usize] == TileType::Wall { neighbors += 1; }
if neighbors == 2 {
*tt = TileType::DeepWater;
} else if neighbors == 1 {
let roll = rng.roll_dice(1, 4);
match roll {
1 => *tt = TileType::Stalactite,
2 => *tt = TileType::Stalagmite,
_ => {}
}
}
}
}
build_data.take_snapshot();
build_data.map.outdoors = false;
}
}
| 42.984848 | 130 | 0.573141 |
1c97628654dd30ca2c3df5020a48053f212a2cea
| 8,901 |
//! Establish connection with FUSE kernel driver.
#![allow(
clippy::cast_possible_wrap,
clippy::cast_possible_truncation,
clippy::cast_sign_loss
)]
use futures::{
io::{AsyncRead, AsyncWrite},
ready,
task::{self, Poll},
};
use libc::{c_int, c_void, iovec};
use mio::{
unix::{EventedFd, UnixReady},
Evented, PollOpt, Ready, Token,
};
use polyfuse::io::Writer;
use std::{
cmp,
ffi::OsStr,
io::{self, IoSlice, IoSliceMut},
mem::{self, MaybeUninit},
os::unix::{
io::{AsRawFd, IntoRawFd, RawFd},
net::UnixDatagram,
process::CommandExt,
},
path::{Path, PathBuf},
pin::Pin,
process::Command,
ptr,
};
use tokio::io::PollEvented;
const FUSERMOUNT_PROG: &str = "fusermount";
const FUSE_COMMFD_ENV: &str = "_FUSE_COMMFD";
macro_rules! syscall {
($fn:ident ( $($arg:expr),* $(,)* ) ) => {{
let res = unsafe { libc::$fn($($arg),*) };
if res == -1 {
return Err(io::Error::last_os_error());
}
res
}};
}
/// A connection with the FUSE kernel driver.
#[derive(Debug)]
struct Connection {
fd: RawFd,
mountpoint: Option<PathBuf>,
}
impl Connection {
fn try_clone(&self) -> io::Result<Self> {
let clonefd = syscall! { dup(self.fd) };
Ok(Self {
fd: clonefd,
mountpoint: None,
})
}
fn unmount(&mut self) -> io::Result<()> {
if let Some(mountpoint) = self.mountpoint.take() {
Command::new(FUSERMOUNT_PROG)
.args(&["-u", "-q", "-z", "--"])
.arg(&mountpoint)
.status()?;
}
Ok(())
}
}
impl Drop for Connection {
fn drop(&mut self) {
let _e = self.unmount();
unsafe {
libc::close(self.fd);
}
}
}
impl AsRawFd for Connection {
fn as_raw_fd(&self) -> RawFd {
self.fd
}
}
impl Evented for Connection {
fn register(
&self,
poll: &mio::Poll,
token: Token,
interest: Ready,
opts: PollOpt,
) -> io::Result<()> {
EventedFd(&self.fd).register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &mio::Poll,
token: Token,
interest: Ready,
opts: PollOpt,
) -> io::Result<()> {
EventedFd(&self.fd).reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
EventedFd(&self.fd).deregister(poll)
}
}
fn set_nonblocking(fd: RawFd) -> io::Result<()> {
let flags = syscall! { fcntl(fd, libc::F_GETFL, 0) };
syscall! { fcntl(fd, libc::F_SETFL, flags | libc::O_NONBLOCK) };
Ok(())
}
fn exec_fusermount(mountpoint: &Path, mountopts: &[&OsStr]) -> io::Result<(c_int, UnixDatagram)> {
let (reader, writer) = UnixDatagram::pair()?;
let pid = syscall! { fork() };
if pid == 0 {
drop(reader);
let writer = writer.into_raw_fd();
unsafe { libc::fcntl(writer, libc::F_SETFD, 0) };
let mut fusermount = Command::new(FUSERMOUNT_PROG);
fusermount.env(FUSE_COMMFD_ENV, writer.to_string());
fusermount.args(mountopts);
fusermount.arg("--").arg(mountpoint);
return Err(fusermount.exec());
}
Ok((pid, reader))
}
fn receive_fd(reader: &mut UnixDatagram) -> io::Result<RawFd> {
let mut buf = [0u8; 1];
let mut iov = libc::iovec {
iov_base: buf.as_mut_ptr() as *mut c_void,
iov_len: 1,
};
#[repr(C)]
struct Cmsg {
header: libc::cmsghdr,
fd: c_int,
}
let mut cmsg = MaybeUninit::<Cmsg>::uninit();
let mut msg = libc::msghdr {
msg_name: ptr::null_mut(),
msg_namelen: 0,
msg_iov: &mut iov,
msg_iovlen: 1,
msg_control: cmsg.as_mut_ptr() as *mut c_void,
msg_controllen: mem::size_of_val(&cmsg),
msg_flags: 0,
};
syscall! { recvmsg(reader.as_raw_fd(), &mut msg, 0) };
if msg.msg_controllen < mem::size_of_val(&cmsg) {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"too short control message length",
));
}
let cmsg = unsafe { cmsg.assume_init() };
if cmsg.header.cmsg_type != libc::SCM_RIGHTS {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"got control message with unknown type",
));
}
Ok(cmsg.fd)
}
// ==== Channel ====
/// Asynchronous I/O object that communicates with the FUSE kernel driver.
#[derive(Debug)]
pub struct Channel(PollEvented<Connection>);
impl Channel {
/// Establish a connection with the FUSE kernel driver.
pub fn open(mountpoint: &Path, mountopts: &[&OsStr]) -> io::Result<Self> {
let (_pid, mut reader) = exec_fusermount(mountpoint, mountopts)?;
let fd = receive_fd(&mut reader)?;
set_nonblocking(fd)?;
// Unmounting is executed when `reader` is dropped and the connection
// with `fusermount` is closed.
let _ = reader.into_raw_fd();
let conn = PollEvented::new(Connection {
fd,
mountpoint: Some(mountpoint.into()),
})?;
Ok(Self(conn))
}
fn poll_read_with<F, R>(&mut self, cx: &mut task::Context<'_>, f: F) -> Poll<io::Result<R>>
where
F: FnOnce(&mut Connection) -> io::Result<R>,
{
let mut ready = Ready::readable();
ready.insert(UnixReady::error());
ready!(self.0.poll_read_ready(cx, ready))?;
match f(self.0.get_mut()) {
Ok(ret) => Poll::Ready(Ok(ret)),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
self.0.clear_read_ready(cx, ready)?;
Poll::Pending
}
Err(e) => Poll::Ready(Err(e)),
}
}
fn poll_write_with<F, R>(&mut self, cx: &mut task::Context<'_>, f: F) -> Poll<io::Result<R>>
where
F: FnOnce(&mut Connection) -> io::Result<R>,
{
ready!(self.0.poll_write_ready(cx))?;
match f(self.0.get_mut()) {
Ok(ret) => Poll::Ready(Ok(ret)),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
self.0.clear_write_ready(cx)?;
Poll::Pending
}
Err(e) => {
tracing::debug!("write error: {}", e);
Poll::Ready(Err(e))
}
}
}
/// Attempt to create a clone of this channel.
pub fn try_clone(&self) -> io::Result<Self> {
let conn = self.0.get_ref().try_clone()?;
Ok(Self(PollEvented::new(conn)?))
}
}
impl AsyncRead for Channel {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
dst: &mut [u8],
) -> Poll<io::Result<usize>> {
self.poll_read_with(cx, |conn| {
let len = syscall! {
read(
conn.as_raw_fd(), //
dst.as_mut_ptr() as *mut c_void,
dst.len(),
)
};
Ok(len as usize)
})
}
fn poll_read_vectored(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
dst: &mut [IoSliceMut],
) -> Poll<io::Result<usize>> {
self.poll_read_with(cx, |conn| {
let len = syscall! {
readv(
conn.as_raw_fd(), //
dst.as_mut_ptr() as *mut iovec,
cmp::min(dst.len(), c_int::max_value() as usize) as c_int,
)
};
Ok(len as usize)
})
}
}
impl AsyncWrite for Channel {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
src: &[u8],
) -> Poll<io::Result<usize>> {
self.poll_write_with(cx, |conn| {
let res = syscall! {
write(
conn.as_raw_fd(), //
src.as_ptr() as *const c_void,
src.len(),
)
};
Ok(res as usize)
})
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
src: &[IoSlice],
) -> Poll<io::Result<usize>> {
self.poll_write_with(cx, |conn| {
let res = syscall! {
writev(
conn.as_raw_fd(), //
src.as_ptr() as *const iovec,
cmp::min(src.len(), c_int::max_value() as usize) as c_int,
)
};
Ok(res as usize)
})
}
fn poll_flush(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
fn poll_close(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
}
impl Writer for Channel {}
| 26.179412 | 98 | 0.510729 |
4b17de12f75498b43738ac0660801f286901a6a6
| 4,969 |
mod utils;
use crypto_market_type::MarketType;
use crypto_pair::{get_market_type, normalize_currency, normalize_pair};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashMap;
use utils::http_get;
const EXCHANGE_NAME: &'static str = "binance";
#[derive(Serialize, Deserialize)]
struct BinanceResponse<T: Sized> {
symbols: Vec<T>,
}
// Spot, Future and Swap markets
#[derive(Serialize, Deserialize)]
#[allow(non_snake_case)]
struct Market {
symbol: String,
baseAsset: String,
quoteAsset: String,
#[serde(flatten)]
extra: HashMap<String, Value>,
}
#[derive(Serialize, Deserialize)]
#[allow(non_snake_case)]
struct OptionMarket {
symbol: String,
quoteAsset: String,
underlying: String,
#[serde(flatten)]
extra: HashMap<String, Value>,
}
// see <https://binance-docs.github.io/apidocs/spot/en/#exchange-information>
fn fetch_spot_markets_raw() -> Vec<Market> {
let txt = http_get("https://api.binance.com/api/v3/exchangeInfo").unwrap();
let resp = serde_json::from_str::<BinanceResponse<Market>>(&txt).unwrap();
resp.symbols
}
// see <https://binance-docs.github.io/apidocs/delivery/en/#exchange-information>
fn fetch_inverse_markets_raw() -> Vec<Market> {
let txt = http_get("https://dapi.binance.com/dapi/v1/exchangeInfo").unwrap();
let resp = serde_json::from_str::<BinanceResponse<Market>>(&txt).unwrap();
resp.symbols
}
// see <https://binance-docs.github.io/apidocs/futures/en/#exchange-information>
fn fetch_linear_markets_raw() -> Vec<Market> {
let txt = http_get("https://fapi.binance.com/fapi/v1/exchangeInfo").unwrap();
let resp = serde_json::from_str::<BinanceResponse<Market>>(&txt).unwrap();
resp.symbols
}
fn fetch_option_markets_raw() -> Vec<OptionMarket> {
#[derive(Serialize, Deserialize)]
#[allow(non_snake_case)]
struct OptionData {
timezone: String,
serverTime: i64,
optionContracts: Vec<Value>,
optionAssets: Vec<Value>,
optionSymbols: Vec<OptionMarket>,
}
#[derive(Serialize, Deserialize)]
#[allow(non_snake_case)]
struct BinanceOptionResponse {
code: i64,
msg: String,
data: OptionData,
}
let txt =
http_get("https://voptions.binance.com/options-api/v1/public/exchange/symbols").unwrap();
let resp = serde_json::from_str::<BinanceOptionResponse>(&txt).unwrap();
resp.data.optionSymbols
}
#[test]
fn verify_spot_symbols() {
let markets = fetch_spot_markets_raw();
for market in markets.iter() {
let pair = normalize_pair(&market.symbol, EXCHANGE_NAME).unwrap();
let pair_expected = format!(
"{}/{}",
normalize_currency(&market.baseAsset, EXCHANGE_NAME),
normalize_currency(&market.quoteAsset, EXCHANGE_NAME)
);
assert_eq!(pair, pair_expected);
assert_eq!(
MarketType::Spot,
get_market_type(&market.symbol, EXCHANGE_NAME, Some(true))
);
}
}
#[test]
fn verify_inverse_symbols() {
let markets = fetch_inverse_markets_raw();
for market in markets.iter() {
let pair = normalize_pair(&market.symbol, EXCHANGE_NAME).unwrap();
let pair_expected = format!(
"{}/{}",
normalize_currency(&market.baseAsset, EXCHANGE_NAME),
normalize_currency(&market.quoteAsset, EXCHANGE_NAME)
);
assert_eq!(pair, pair_expected);
let market_type = get_market_type(&market.symbol, EXCHANGE_NAME, None);
assert!(market_type == MarketType::InverseSwap || market_type == MarketType::InverseFuture);
}
}
#[test]
fn verify_linear_symbols() {
let markets = fetch_linear_markets_raw();
for market in markets.iter() {
let pair = normalize_pair(&market.symbol, EXCHANGE_NAME).unwrap();
let pair_expected = format!(
"{}/{}",
normalize_currency(&market.baseAsset, EXCHANGE_NAME),
normalize_currency(&market.quoteAsset, EXCHANGE_NAME)
);
assert_eq!(pair, pair_expected);
let market_type = get_market_type(&market.symbol, EXCHANGE_NAME, None);
assert!(market_type == MarketType::LinearSwap || market_type == MarketType::LinearFuture);
}
}
#[test]
fn verify_option_symbols() {
let markets = fetch_option_markets_raw();
for market in markets.iter() {
let pair = normalize_pair(&market.symbol, EXCHANGE_NAME).unwrap();
let base = market
.underlying
.strip_suffix(market.quoteAsset.as_str())
.unwrap();
let pair_expected = format!(
"{}/{}",
normalize_currency(base, EXCHANGE_NAME),
normalize_currency(&market.quoteAsset, EXCHANGE_NAME)
);
assert_eq!(pair, pair_expected);
assert_eq!(
MarketType::EuropeanOption,
get_market_type(&market.symbol, EXCHANGE_NAME, None)
);
}
}
| 31.251572 | 100 | 0.65486 |
ef3482faeff84494dfcb5b08e21be7f106b1f6d5
| 4,571 |
// Copyright (c) 2015 Alcatel-Lucent, (c) 2016 Nokia
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use bambou::{Error, RestEntity, Session};
use reqwest::Response;
use std::collections::BTreeMap;
use serde_json;
pub use metadata::Metadata;
pub use globalmetadata::GlobalMetadata;
#[derive(Serialize, Deserialize, Default, Debug)]
pub struct Avatar<'a> {
#[serde(skip_serializing)]
#[serde(skip_deserializing)]
_session: Option<&'a Session>,
#[serde(rename="ID")]
id: Option<String>,
#[serde(rename="parentID")]
parent_id: Option<String>,
#[serde(rename="parentType")]
parent_type: Option<String>,
owner: Option<String>,
#[serde(rename="lastUpdatedBy")]
pub last_updated_by: Option<String>,
#[serde(rename="entityScope")]
pub entity_scope: Option<String>,
#[serde(rename="externalID")]
pub external_id: Option<String>,
#[serde(rename="type")]
pub type_: Option<String>,
}
impl<'a> RestEntity<'a> for Avatar<'a> {
fn fetch(&mut self) -> Result<Response, Error> {
match self._session {
Some(session) => session.fetch_entity(self),
None => Err(Error::NoSession),
}
}
fn save(&mut self) -> Result<Response, Error> {
match self._session {
Some(session) => session.save(self),
None => Err(Error::NoSession),
}
}
fn delete(self) -> Result<Response, Error> {
match self._session {
Some(session) => session.delete(self),
None => Err(Error::NoSession),
}
}
fn create_child<C>(&self, child: &mut C) -> Result<Response, Error>
where C: RestEntity<'a>
{
match self._session {
Some(session) => session.create_child(self, child),
None => Err(Error::NoSession),
}
}
fn path() -> &'static str {
"avatar"
}
fn group_path() -> &'static str {
"avatars"
}
fn is_root(&self) -> bool {
false
}
fn id(&self) -> Option<&str> {
self.id.as_ref().and_then(|id| Some(id.as_str()))
}
fn fetch_children<R>(&self, children: &mut Vec<R>) -> Result<Response, Error>
where R: RestEntity<'a>
{
match self._session {
Some(session) => session.fetch_children(self, children),
None => Err(Error::NoSession),
}
}
fn get_session(&self) -> Option<&Session> {
self._session
}
fn set_session(&mut self, session: &'a Session) {
self._session = Some(session);
}
}
impl<'a> Avatar<'a> {
pub fn fetch_metadatas(&self) -> Result<Vec<Metadata>, Error> {
let mut metadatas = Vec::<Metadata>::new();
let _ = self.fetch_children(&mut metadatas)?;
Ok(metadatas)
}
pub fn fetch_globalmetadatas(&self) -> Result<Vec<GlobalMetadata>, Error> {
let mut globalmetadatas = Vec::<GlobalMetadata>::new();
let _ = self.fetch_children(&mut globalmetadatas)?;
Ok(globalmetadatas)
}
}
| 30.885135 | 87 | 0.640123 |
eb47fa780f871187a1b9d211408805fb99ac483f
| 8,440 |
/*
Rule 110 is a simple cellular automaton that is universal.
For more information, see https://en.wikipedia.org/wiki/Rule_110
This solver reconstructs a space-time of cell states from known states.
It works remarkably well, considering the simple strategy of picking
the most constrained cell.
*/
extern crate quickbacktrack;
use quickbacktrack::{BackTrackSolver, Puzzle, SolveSettings};
#[derive(Clone)]
pub struct Rule110 {
pub cells: Vec<Vec<u8>>,
}
impl Rule110 {
pub fn next(&mut self) -> Vec<u8> {
let last = self.cells.last().unwrap();
let n = last.len();
let mut new_row = vec![0; n];
for i in 0..n {
let input = (
last[(i + n - 1) % n],
last[i],
last[(i + 1) % n]
);
new_row[i] = rule(input);
}
new_row
}
/// Returns `false` when a contradition is found.
/// This is checked by checking all affected cells in next step.
/// Also checks the previous step.
pub fn is_satisfied(&self, pos: [usize; 2], val: u8) -> bool {
let row = pos[0];
let col = pos[1] as isize;
let n = self.cells[row].len() as isize;
if row + 1 < self.cells.len() {
// Replace with new value if looking up cell at the location.
let f = |ind: isize| {
let map_ind = ((col + ind + n) % n) as usize;
if map_ind == col as usize { val }
else { self.cells[row][map_ind] }
};
// [o o x] [o x o] [x o o]
for i in -1..2 {
let input = (
f(i - 1),
f(i),
f(i + 1),
);
let col_next = ((col + n + i) % n) as usize;
let new_value = rule(input);
let old_value = self.cells[row + 1][col_next];
match (new_value, old_value) {
(_, 0) => {}
(0, _) => {}
(a, b) if a == b => {}
(_, _) => return false,
}
}
}
// Check that previous row yields value.
if row > 0 {
let f = |ind: isize| {
let map_ind = ((col + ind + n) % n) as usize;
self.cells[row - 1][map_ind]
};
let input = (
f(-1),
f(0),
f(1),
);
match (rule(input), val) {
(_, 0) => {}
(0, _) => {}
(a, b) if a == b => {}
(_, _) => return false,
}
}
true
}
pub fn possible(&self, pos: [usize; 2]) -> Vec<u8> {
let mut res = vec![];
for v in 1..3 {
if self.is_satisfied(pos, v) {
res.push(v);
}
}
res
}
pub fn find_min_empty(&self) -> Option<[usize; 2]> {
let mut min = None;
let mut min_pos = None;
for i in 0..self.cells.len() {
for j in 0..self.cells[i].len() {
if self.cells[i][j] == 0 {
let possible = self.possible([i, j]);
if min.is_none() || min.unwrap() >= possible.len() {
min = Some(possible.len());
min_pos = Some([i, j]);
}
}
}
}
return min_pos;
}
}
/// Rule 110 extended with unknown inputs.
fn rule(state: (u8, u8, u8)) -> u8 {
match state {
(2, 2, 2) => 1,
(2, 2, 1) => 2,
(2, 1, 2) => 2,
(2, 1, 1) => 1,
(1, 2, 2) => 2,
(1, 2, 1) => 2,
(1, 1, 2) => 2,
(1, 1, 1) => 1,
// 1 unknown.
(2, 2, 0) => 0,
(2, 0, 2) => 0,
(0, 2, 2) => 0,
(2, 0, 1) => 0,
(0, 2, 1) => 2,
(2, 1, 0) => 0,
(0, 1, 2) => 2,
(0, 1, 1) => 1,
(1, 2, 0) => 2,
(1, 0, 2) => 2,
(1, 0, 1) => 0,
(1, 1, 0) => 0,
// All with 2 unknowns or more has unknown result.
(_, _, _) => 0,
}
}
impl Puzzle for Rule110 {
type Pos = [usize; 2];
type Val = u8;
fn solve_simple<F: FnMut(&mut Self, Self::Pos, Self::Val)>(&mut self, mut f: F) {
loop {
let mut found_any = false;
for i in 0..self.cells.len() {
for j in 0..self.cells[i].len() {
if self.cells[i][j] != 0 { continue; }
let possible = self.possible([i, j]);
if possible.len() == 1 {
f(self, [i, j], possible[0]);
found_any = true;
}
}
}
if !found_any { break; }
}
}
fn set(&mut self, pos: [usize; 2], val: u8) {
self.cells[pos[0]][pos[1]] = val;
}
fn get(&self, pos: [usize; 2]) -> u8 {
self.cells[pos[0]][pos[1]]
}
fn is_solved(&self) -> bool {
// All cells must be non-empty.
for row in &self.cells {
for col in row {
if *col == 0 { return false; }
}
}
// All cells must satisfy the constraints.
for i in 0..self.cells.len() {
for j in 0..self.cells[i].len() {
if !self.is_satisfied([i, j], self.cells[i][j]) {
return false;
}
}
}
true
}
fn remove(&mut self, other: &Rule110) {
for i in 0..self.cells.len() {
for j in 0..self.cells[i].len() {
if other.cells[i][j] != 0 {
self.cells[i][j] = 0;
}
}
}
}
fn print(&self) {
println!("");
for row in &self.cells {
for cell in row {
if *cell == 2 { print!("o"); }
else if *cell == 1 { print!("."); }
else { print!(" "); }
}
println!("")
}
println!("");
}
}
fn main() {
/*
let mut r = Rule110 {
cells: vec![
vec![1, 0, 2, 1]
]
};
let next = r.next();
r.cells.push(next);
r.print();
*/
let x = Rule110 {
cells: vec![
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
vec![1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0, 2, 0],
]
};
let settings = SolveSettings::new()
.solve_simple(true)
.debug(false)
.difference(false)
.sleep_ms(50)
;
let solver = BackTrackSolver::new(x, settings);
let difference = solver.solve(|s| s.find_min_empty(), |s, p| s.possible(p))
.expect("Expected solution").puzzle;
println!("Solution:");
difference.print();
}
pub fn example1() -> Rule110 {
Rule110 {
cells: vec![
vec![1, 1, 1, 2, 1],
vec![1, 0, 0, 0, 1],
vec![1, 0, 0, 0, 1],
vec![2, 2, 1, 2, 1],
]
}
}
pub fn example2() -> Rule110 {
Rule110 {
cells: vec![
vec![1, 1, 0, 0, 0],
vec![0, 0, 0, 0, 0],
vec![0, 0, 0, 0, 0],
vec![2, 2, 1, 2, 1],
]
}
}
| 28.133333 | 85 | 0.386493 |
39b1fe74b2559ef630e65fd8b03c627970590dfa
| 220 |
// functions2.rs
// Make me compile! Execute `rustlings hint functions2` for hints :)
fn main() {
call_me(3);
}
fn call_me(num: i64) {
for i in 0..num {
println!("Ring! Call number {}", i + 1);
}
}
| 18.333333 | 69 | 0.568182 |
1ee79832d3d3f45ee8aa613fefc6d4f88af355d2
| 6,918 |
/// Defines the fields required to initiate a STREAM payment.
/// Next field: 4
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SendPaymentRequest {
/// The destination PP to pay.
#[prost(string, tag = "1")]
pub destination_payment_pointer: std::string::String,
/// The amount of this payment in the units and scale of the account.
#[prost(uint64, tag = "2")]
pub amount: u64,
/// TODO: Should the client be able to specify this?
/// The number of seconds to wait for this payment to complete.
#[prost(uint64, tag = "3")]
pub timeout_seconds: u64,
/// Account Id of the sender
#[prost(string, tag = "4")]
pub account_id: std::string::String,
}
/// Defines the fields that are returned after a SendPayment RPC has completed.
/// Next field: 4
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SendPaymentResponse {
/// The original amount that was requested to be sent.
#[prost(uint64, tag = "1")]
pub original_amount: u64,
/// The actual amount, in the receivers units, that was delivered to the receiver
#[prost(uint64, tag = "2")]
pub amount_delivered: u64,
/// The actual amount, in the senders units, that was sent to the receiver
#[prost(uint64, tag = "3")]
pub amount_sent: u64,
/// Indicates if the payment was completed successfully.
#[prost(bool, tag = "4")]
pub successful_payment: bool,
}
#[doc = r" Generated client implementations."]
pub mod ilp_over_http_service_client {
#![allow(unused_variables, dead_code, missing_docs)]
use tonic::codegen::*;
#[doc = " RPCs available to interact with Hermes."]
pub struct IlpOverHttpServiceClient<T> {
inner: tonic::client::Grpc<T>,
}
impl IlpOverHttpServiceClient<tonic::transport::Channel> {
#[doc = r" Attempt to create a new client by connecting to a given endpoint."]
pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: std::convert::TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
Ok(Self::new(conn))
}
}
impl<T> IlpOverHttpServiceClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::ResponseBody: Body + HttpBody + Send + 'static,
T::Error: Into<StdError>,
<T::ResponseBody as HttpBody>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_interceptor(inner: T, interceptor: impl Into<tonic::Interceptor>) -> Self {
let inner = tonic::client::Grpc::with_interceptor(inner, interceptor);
Self { inner }
}
#[doc = " Send a payment through Hermes to a given payment pointer"]
pub async fn send_money(
&mut self,
request: impl tonic::IntoRequest<super::SendPaymentRequest>,
) -> Result<tonic::Response<super::SendPaymentResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/org.interledger.stream.proto.IlpOverHttpService/SendMoney",
);
self.inner.unary(request.into_request(), path, codec).await
}
}
impl<T: Clone> Clone for IlpOverHttpServiceClient<T> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
}
/// Return fields of a balance request
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetBalanceResponse {
#[prost(string, tag = "1")]
pub account_id: std::string::String,
#[prost(string, tag = "2")]
pub asset_code: std::string::String,
#[prost(int32, tag = "3")]
pub asset_scale: i32,
#[prost(int64, tag = "4")]
pub net_balance: i64,
#[prost(int64, tag = "5")]
pub prepaid_amount: i64,
#[prost(int64, tag = "6")]
pub clearing_balance: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetBalanceRequest {
#[prost(string, tag = "1")]
pub account_id: std::string::String,
}
#[doc = r" Generated client implementations."]
pub mod balance_service_client {
#![allow(unused_variables, dead_code, missing_docs)]
use tonic::codegen::*;
#[doc = " RPCs available to interact with Hermes."]
pub struct BalanceServiceClient<T> {
inner: tonic::client::Grpc<T>,
}
impl BalanceServiceClient<tonic::transport::Channel> {
#[doc = r" Attempt to create a new client by connecting to a given endpoint."]
pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: std::convert::TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
Ok(Self::new(conn))
}
}
impl<T> BalanceServiceClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::ResponseBody: Body + HttpBody + Send + 'static,
T::Error: Into<StdError>,
<T::ResponseBody as HttpBody>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_interceptor(inner: T, interceptor: impl Into<tonic::Interceptor>) -> Self {
let inner = tonic::client::Grpc::with_interceptor(inner, interceptor);
Self { inner }
}
#[doc = " Get the balance of a certain account on a connector"]
pub async fn get_balance(
&mut self,
request: impl tonic::IntoRequest<super::GetBalanceRequest>,
) -> Result<tonic::Response<super::GetBalanceResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/org.interledger.stream.proto.BalanceService/GetBalance",
);
self.inner.unary(request.into_request(), path, codec).await
}
}
impl<T: Clone> Clone for BalanceServiceClient<T> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
}
| 39.084746 | 95 | 0.587887 |
89d159f97782129632add8479fd234d8528dcebe
| 2,437 |
use super::{rocket, TemplateContext};
use rocket::local::{Client, LocalResponse};
use rocket::http::Method::*;
use rocket::http::Status;
use rocket_contrib::templates::Template;
macro_rules! dispatch {
($method:expr, $path:expr, $test_fn:expr) => ({
let client = Client::new(rocket()).unwrap();
$test_fn(&client, client.req($method, $path).dispatch());
})
}
#[test]
fn test_root() {
// Check that the redirect works.
for method in &[Get, Head] {
dispatch!(*method, "/", |_: &Client, mut response: LocalResponse<'_>| {
assert_eq!(response.status(), Status::SeeOther);
assert!(response.body().is_none());
let location: Vec<_> = response.headers().get("Location").collect();
assert_eq!(location, vec!["/hello/Unknown"]);
});
}
// Check that other request methods are not accepted (and instead caught).
for method in &[Post, Put, Delete, Options, Trace, Connect, Patch] {
dispatch!(*method, "/", |client: &Client, mut response: LocalResponse<'_>| {
let mut map = std::collections::HashMap::new();
map.insert("path", "/");
let expected = Template::show(client.rocket(), "error/404", &map).unwrap();
assert_eq!(response.status(), Status::NotFound);
assert_eq!(response.body_string(), Some(expected));
});
}
}
#[test]
fn test_name() {
// Check that the /hello/<name> route works.
dispatch!(Get, "/hello/Jack%20Daniels", |client: &Client, mut response: LocalResponse<'_>| {
let context = TemplateContext {
title: "Hello",
name: Some("Jack Daniels".into()),
items: vec!["One", "Two", "Three"],
parent: "layout",
};
let expected = Template::show(client.rocket(), "index", &context).unwrap();
assert_eq!(response.status(), Status::Ok);
assert_eq!(response.body_string(), Some(expected));
});
}
#[test]
fn test_404() {
// Check that the error catcher works.
dispatch!(Get, "/hello/", |client: &Client, mut response: LocalResponse<'_>| {
let mut map = std::collections::HashMap::new();
map.insert("path", "/hello/");
let expected = Template::show(client.rocket(), "error/404", &map).unwrap();
assert_eq!(response.status(), Status::NotFound);
assert_eq!(response.body_string(), Some(expected));
});
}
| 34.814286 | 96 | 0.587197 |
8ab9ca451871b930d2bf7496650d2e50736ebaf0
| 32,494 |
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::env;
use std::ffi::OsString;
use std::io::prelude::*;
use std::io;
use std::path::{Path, PathBuf};
use std::panic::{self, AssertUnwindSafe};
use std::process::Command;
use std::str;
use rustc_data_structures::sync::Lrc;
use std::sync::{Arc, Mutex};
use testing;
use rustc_lint;
use rustc::hir;
use rustc::hir::intravisit;
use rustc::session::{self, CompileIncomplete, config};
use rustc::session::config::{OutputType, OutputTypes, Externs};
use rustc::session::search_paths::{SearchPaths, PathKind};
use rustc_metadata::dynamic_lib::DynamicLibrary;
use tempdir::TempDir;
use rustc_driver::{self, driver, Compilation};
use rustc_driver::driver::phase_2_configure_and_expand;
use rustc_metadata::cstore::CStore;
use rustc_resolve::MakeGlobMap;
use syntax::ast;
use syntax::codemap::CodeMap;
use syntax::feature_gate::UnstableFeatures;
use syntax::with_globals;
use syntax_pos::{BytePos, DUMMY_SP, Pos, Span, FileName};
use errors;
use errors::emitter::ColorConfig;
use clean::Attributes;
use html::markdown;
#[derive(Clone, Default)]
pub struct TestOptions {
pub no_crate_inject: bool,
pub attrs: Vec<String>,
}
pub fn run(input_path: &Path,
cfgs: Vec<String>,
libs: SearchPaths,
externs: Externs,
mut test_args: Vec<String>,
crate_name: Option<String>,
maybe_sysroot: Option<PathBuf>,
display_warnings: bool,
linker: Option<PathBuf>)
-> isize {
let input = config::Input::File(input_path.to_owned());
let sessopts = config::Options {
maybe_sysroot: maybe_sysroot.clone().or_else(
|| Some(env::current_exe().unwrap().parent().unwrap().parent().unwrap().to_path_buf())),
search_paths: libs.clone(),
crate_types: vec![config::CrateTypeDylib],
externs: externs.clone(),
unstable_features: UnstableFeatures::from_environment(),
lint_cap: Some(::rustc::lint::Level::Allow),
actually_rustdoc: true,
..config::basic_options().clone()
};
let codemap = Lrc::new(CodeMap::new(sessopts.file_path_mapping()));
let handler =
errors::Handler::with_tty_emitter(ColorConfig::Auto,
true, false,
Some(codemap.clone()));
let mut sess = session::build_session_(
sessopts, Some(input_path.to_owned()), handler, codemap.clone(),
);
let trans = rustc_driver::get_trans(&sess);
let cstore = CStore::new(trans.metadata_loader());
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
sess.parse_sess.config =
config::build_configuration(&sess, config::parse_cfgspecs(cfgs.clone()));
let krate = panictry!(driver::phase_1_parse_input(&driver::CompileController::basic(),
&sess,
&input));
let driver::ExpansionResult { defs, mut hir_forest, .. } = {
phase_2_configure_and_expand(
&sess,
&cstore,
krate,
None,
"rustdoc-test",
None,
MakeGlobMap::No,
|_| Ok(()),
).expect("phase_2_configure_and_expand aborted in rustdoc!")
};
let crate_name = crate_name.unwrap_or_else(|| {
::rustc_trans_utils::link::find_crate_name(None, &hir_forest.krate().attrs, &input)
});
let opts = scrape_test_config(hir_forest.krate());
let mut collector = Collector::new(crate_name,
cfgs,
libs,
externs,
false,
opts,
maybe_sysroot,
Some(codemap),
None,
linker);
{
let map = hir::map::map_crate(&sess, &cstore, &mut hir_forest, &defs);
let krate = map.krate();
let mut hir_collector = HirCollector {
sess: &sess,
collector: &mut collector,
map: &map
};
hir_collector.visit_testable("".to_string(), &krate.attrs, |this| {
intravisit::walk_crate(this, krate);
});
}
test_args.insert(0, "rustdoctest".to_string());
testing::test_main(&test_args,
collector.tests.into_iter().collect(),
testing::Options::new().display_output(display_warnings));
0
}
// Look for #![doc(test(no_crate_inject))], used by crates in the std facade
fn scrape_test_config(krate: &::rustc::hir::Crate) -> TestOptions {
use syntax::print::pprust;
let mut opts = TestOptions {
no_crate_inject: false,
attrs: Vec::new(),
};
let test_attrs: Vec<_> = krate.attrs.iter()
.filter(|a| a.check_name("doc"))
.flat_map(|a| a.meta_item_list().unwrap_or_else(Vec::new))
.filter(|a| a.check_name("test"))
.collect();
let attrs = test_attrs.iter().flat_map(|a| a.meta_item_list().unwrap_or(&[]));
for attr in attrs {
if attr.check_name("no_crate_inject") {
opts.no_crate_inject = true;
}
if attr.check_name("attr") {
if let Some(l) = attr.meta_item_list() {
for item in l {
opts.attrs.push(pprust::meta_list_item_to_string(item));
}
}
}
}
opts
}
fn run_test(test: &str, cratename: &str, filename: &FileName, line: usize,
cfgs: Vec<String>, libs: SearchPaths,
externs: Externs,
should_panic: bool, no_run: bool, as_test_harness: bool,
compile_fail: bool, mut error_codes: Vec<String>, opts: &TestOptions,
maybe_sysroot: Option<PathBuf>,
linker: Option<PathBuf>) {
// the test harness wants its own `main` & top level functions, so
// never wrap the test in `fn main() { ... }`
let (test, line_offset) = make_test(test, Some(cratename), as_test_harness, opts);
// FIXME(#44940): if doctests ever support path remapping, then this filename
// needs to be the result of CodeMap::span_to_unmapped_path
let input = config::Input::Str {
name: filename.to_owned(),
input: test.to_owned(),
};
let outputs = OutputTypes::new(&[(OutputType::Exe, None)]);
let sessopts = config::Options {
maybe_sysroot: maybe_sysroot.or_else(
|| Some(env::current_exe().unwrap().parent().unwrap().parent().unwrap().to_path_buf())),
search_paths: libs,
crate_types: vec![config::CrateTypeExecutable],
output_types: outputs,
externs,
cg: config::CodegenOptions {
prefer_dynamic: true,
linker,
.. config::basic_codegen_options()
},
test: as_test_harness,
unstable_features: UnstableFeatures::from_environment(),
..config::basic_options().clone()
};
// Shuffle around a few input and output handles here. We're going to pass
// an explicit handle into rustc to collect output messages, but we also
// want to catch the error message that rustc prints when it fails.
//
// We take our thread-local stderr (likely set by the test runner) and replace
// it with a sink that is also passed to rustc itself. When this function
// returns the output of the sink is copied onto the output of our own thread.
//
// The basic idea is to not use a default Handler for rustc, and then also
// not print things by default to the actual stderr.
struct Sink(Arc<Mutex<Vec<u8>>>);
impl Write for Sink {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
Write::write(&mut *self.0.lock().unwrap(), data)
}
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
struct Bomb(Arc<Mutex<Vec<u8>>>, Box<Write+Send>);
impl Drop for Bomb {
fn drop(&mut self) {
let _ = self.1.write_all(&self.0.lock().unwrap());
}
}
let data = Arc::new(Mutex::new(Vec::new()));
let codemap = Lrc::new(CodeMap::new_doctest(
sessopts.file_path_mapping(), filename.clone(), line as isize - line_offset as isize
));
let emitter = errors::emitter::EmitterWriter::new(box Sink(data.clone()),
Some(codemap.clone()),
false,
false);
let old = io::set_panic(Some(box Sink(data.clone())));
let _bomb = Bomb(data.clone(), old.unwrap_or(box io::stdout()));
// Compile the code
let diagnostic_handler = errors::Handler::with_emitter(true, false, box emitter);
let mut sess = session::build_session_(
sessopts, None, diagnostic_handler, codemap,
);
let trans = rustc_driver::get_trans(&sess);
let cstore = CStore::new(trans.metadata_loader());
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
let outdir = Mutex::new(TempDir::new("rustdoctest").ok().expect("rustdoc needs a tempdir"));
let libdir = sess.target_filesearch(PathKind::All).get_lib_path();
let mut control = driver::CompileController::basic();
sess.parse_sess.config =
config::build_configuration(&sess, config::parse_cfgspecs(cfgs.clone()));
let out = Some(outdir.lock().unwrap().path().to_path_buf());
if no_run {
control.after_analysis.stop = Compilation::Stop;
}
let res = panic::catch_unwind(AssertUnwindSafe(|| {
driver::compile_input(
trans,
&sess,
&cstore,
&None,
&input,
&out,
&None,
None,
&control
)
}));
let compile_result = match res {
Ok(Ok(())) | Ok(Err(CompileIncomplete::Stopped)) => Ok(()),
Err(_) | Ok(Err(CompileIncomplete::Errored(_))) => Err(())
};
match (compile_result, compile_fail) {
(Ok(()), true) => {
panic!("test compiled while it wasn't supposed to")
}
(Ok(()), false) => {}
(Err(()), true) => {
if error_codes.len() > 0 {
let out = String::from_utf8(data.lock().unwrap().to_vec()).unwrap();
error_codes.retain(|err| !out.contains(err));
}
}
(Err(()), false) => {
panic!("couldn't compile the test")
}
}
if error_codes.len() > 0 {
panic!("Some expected error codes were not found: {:?}", error_codes);
}
if no_run { return }
// Run the code!
//
// We're careful to prepend the *target* dylib search path to the child's
// environment to ensure that the target loads the right libraries at
// runtime. It would be a sad day if the *host* libraries were loaded as a
// mistake.
let mut cmd = Command::new(&outdir.lock().unwrap().path().join("rust_out"));
let var = DynamicLibrary::envvar();
let newpath = {
let path = env::var_os(var).unwrap_or(OsString::new());
let mut path = env::split_paths(&path).collect::<Vec<_>>();
path.insert(0, libdir.clone());
env::join_paths(path).unwrap()
};
cmd.env(var, &newpath);
match cmd.output() {
Err(e) => panic!("couldn't run the test: {}{}", e,
if e.kind() == io::ErrorKind::PermissionDenied {
" - maybe your tempdir is mounted with noexec?"
} else { "" }),
Ok(out) => {
if should_panic && out.status.success() {
panic!("test executable succeeded when it should have failed");
} else if !should_panic && !out.status.success() {
panic!("test executable failed:\n{}\n{}\n",
str::from_utf8(&out.stdout).unwrap_or(""),
str::from_utf8(&out.stderr).unwrap_or(""));
}
}
}
}
/// Makes the test file. Also returns the number of lines before the code begins
pub fn make_test(s: &str,
cratename: Option<&str>,
dont_insert_main: bool,
opts: &TestOptions)
-> (String, usize) {
let (crate_attrs, everything_else) = partition_source(s);
let everything_else = everything_else.trim();
let mut line_offset = 0;
let mut prog = String::new();
if opts.attrs.is_empty() {
// If there aren't any attributes supplied by #![doc(test(attr(...)))], then allow some
// lints that are commonly triggered in doctests. The crate-level test attributes are
// commonly used to make tests fail in case they trigger warnings, so having this there in
// that case may cause some tests to pass when they shouldn't have.
prog.push_str("#![allow(unused)]\n");
line_offset += 1;
}
// Next, any attributes that came from the crate root via #![doc(test(attr(...)))].
for attr in &opts.attrs {
prog.push_str(&format!("#![{}]\n", attr));
line_offset += 1;
}
// Now push any outer attributes from the example, assuming they
// are intended to be crate attributes.
prog.push_str(&crate_attrs);
// Don't inject `extern crate std` because it's already injected by the
// compiler.
if !s.contains("extern crate") && !opts.no_crate_inject && cratename != Some("std") {
if let Some(cratename) = cratename {
if s.contains(cratename) {
prog.push_str(&format!("extern crate {};\n", cratename));
line_offset += 1;
}
}
}
// FIXME (#21299): prefer libsyntax or some other actual parser over this
// best-effort ad hoc approach
let already_has_main = s.lines()
.map(|line| {
let comment = line.find("//");
if let Some(comment_begins) = comment {
&line[0..comment_begins]
} else {
line
}
})
.any(|code| code.contains("fn main"));
if dont_insert_main || already_has_main {
prog.push_str(everything_else);
} else {
prog.push_str("fn main() {\n");
line_offset += 1;
prog.push_str(everything_else);
prog.push_str("\n}");
}
info!("final test program: {}", prog);
(prog, line_offset)
}
// FIXME(aburka): use a real parser to deal with multiline attributes
fn partition_source(s: &str) -> (String, String) {
let mut after_header = false;
let mut before = String::new();
let mut after = String::new();
for line in s.lines() {
let trimline = line.trim();
let header = trimline.is_whitespace() ||
trimline.starts_with("#![") ||
trimline.starts_with("#[macro_use] extern crate") ||
trimline.starts_with("extern crate");
if !header || after_header {
after_header = true;
after.push_str(line);
after.push_str("\n");
} else {
before.push_str(line);
before.push_str("\n");
}
}
(before, after)
}
pub struct Collector {
pub tests: Vec<testing::TestDescAndFn>,
// The name of the test displayed to the user, separated by `::`.
//
// In tests from Rust source, this is the path to the item
// e.g. `["std", "vec", "Vec", "push"]`.
//
// In tests from a markdown file, this is the titles of all headers (h1~h6)
// of the sections that contain the code block, e.g. if the markdown file is
// written as:
//
// ``````markdown
// # Title
//
// ## Subtitle
//
// ```rust
// assert!(true);
// ```
// ``````
//
// the `names` vector of that test will be `["Title", "Subtitle"]`.
names: Vec<String>,
cfgs: Vec<String>,
libs: SearchPaths,
externs: Externs,
use_headers: bool,
cratename: String,
opts: TestOptions,
maybe_sysroot: Option<PathBuf>,
position: Span,
codemap: Option<Lrc<CodeMap>>,
filename: Option<PathBuf>,
linker: Option<PathBuf>,
}
impl Collector {
pub fn new(cratename: String, cfgs: Vec<String>, libs: SearchPaths, externs: Externs,
use_headers: bool, opts: TestOptions, maybe_sysroot: Option<PathBuf>,
codemap: Option<Lrc<CodeMap>>, filename: Option<PathBuf>,
linker: Option<PathBuf>) -> Collector {
Collector {
tests: Vec::new(),
names: Vec::new(),
cfgs,
libs,
externs,
use_headers,
cratename,
opts,
maybe_sysroot,
position: DUMMY_SP,
codemap,
filename,
linker,
}
}
fn generate_name(&self, line: usize, filename: &FileName) -> String {
format!("{} - {} (line {})", filename, self.names.join("::"), line)
}
pub fn add_test(&mut self, test: String,
should_panic: bool, no_run: bool, should_ignore: bool,
as_test_harness: bool, compile_fail: bool, error_codes: Vec<String>,
line: usize, filename: FileName, allow_fail: bool) {
let name = self.generate_name(line, &filename);
let cfgs = self.cfgs.clone();
let libs = self.libs.clone();
let externs = self.externs.clone();
let cratename = self.cratename.to_string();
let opts = self.opts.clone();
let maybe_sysroot = self.maybe_sysroot.clone();
let linker = self.linker.clone();
debug!("Creating test {}: {}", name, test);
self.tests.push(testing::TestDescAndFn {
desc: testing::TestDesc {
name: testing::DynTestName(name),
ignore: should_ignore,
// compiler failures are test failures
should_panic: testing::ShouldPanic::No,
allow_fail,
},
testfn: testing::DynTestFn(box move || {
let panic = io::set_panic(None);
let print = io::set_print(None);
match {
rustc_driver::in_rustc_thread(move || with_globals(move || {
io::set_panic(panic);
io::set_print(print);
run_test(&test,
&cratename,
&filename,
line,
cfgs,
libs,
externs,
should_panic,
no_run,
as_test_harness,
compile_fail,
error_codes,
&opts,
maybe_sysroot,
linker)
}))
} {
Ok(()) => (),
Err(err) => panic::resume_unwind(err),
}
}),
});
}
pub fn get_line(&self) -> usize {
if let Some(ref codemap) = self.codemap {
let line = self.position.lo().to_usize();
let line = codemap.lookup_char_pos(BytePos(line as u32)).line;
if line > 0 { line - 1 } else { line }
} else {
0
}
}
pub fn set_position(&mut self, position: Span) {
self.position = position;
}
pub fn get_filename(&self) -> FileName {
if let Some(ref codemap) = self.codemap {
let filename = codemap.span_to_filename(self.position);
if let FileName::Real(ref filename) = filename {
if let Ok(cur_dir) = env::current_dir() {
if let Ok(path) = filename.strip_prefix(&cur_dir) {
return path.to_owned().into();
}
}
}
filename
} else if let Some(ref filename) = self.filename {
filename.clone().into()
} else {
FileName::Custom("input".to_owned())
}
}
pub fn register_header(&mut self, name: &str, level: u32) {
if self.use_headers {
// we use these headings as test names, so it's good if
// they're valid identifiers.
let name = name.chars().enumerate().map(|(i, c)| {
if (i == 0 && c.is_xid_start()) ||
(i != 0 && c.is_xid_continue()) {
c
} else {
'_'
}
}).collect::<String>();
// Here we try to efficiently assemble the header titles into the
// test name in the form of `h1::h2::h3::h4::h5::h6`.
//
// Suppose originally `self.names` contains `[h1, h2, h3]`...
let level = level as usize;
if level <= self.names.len() {
// ... Consider `level == 2`. All headers in the lower levels
// are irrelevant in this new level. So we should reset
// `self.names` to contain headers until <h2>, and replace that
// slot with the new name: `[h1, name]`.
self.names.truncate(level);
self.names[level - 1] = name;
} else {
// ... On the other hand, consider `level == 5`. This means we
// need to extend `self.names` to contain five headers. We fill
// in the missing level (<h4>) with `_`. Thus `self.names` will
// become `[h1, h2, h3, "_", name]`.
if level - 1 > self.names.len() {
self.names.resize(level - 1, "_".to_owned());
}
self.names.push(name);
}
}
}
}
struct HirCollector<'a, 'hir: 'a> {
sess: &'a session::Session,
collector: &'a mut Collector,
map: &'a hir::map::Map<'hir>
}
impl<'a, 'hir> HirCollector<'a, 'hir> {
fn visit_testable<F: FnOnce(&mut Self)>(&mut self,
name: String,
attrs: &[ast::Attribute],
nested: F) {
let mut attrs = Attributes::from_ast(self.sess.diagnostic(), attrs);
if let Some(ref cfg) = attrs.cfg {
if !cfg.matches(&self.sess.parse_sess, Some(&self.sess.features_untracked())) {
return;
}
}
let has_name = !name.is_empty();
if has_name {
self.collector.names.push(name);
}
attrs.collapse_doc_comments();
attrs.unindent_doc_comments();
// the collapse-docs pass won't combine sugared/raw doc attributes, or included files with
// anything else, this will combine them for us
if let Some(doc) = attrs.collapsed_doc_value() {
markdown::find_testable_code(&doc,
self.collector,
attrs.span.unwrap_or(DUMMY_SP),
Some(self.sess));
}
nested(self);
if has_name {
self.collector.names.pop();
}
}
}
impl<'a, 'hir> intravisit::Visitor<'hir> for HirCollector<'a, 'hir> {
fn nested_visit_map<'this>(&'this mut self) -> intravisit::NestedVisitorMap<'this, 'hir> {
intravisit::NestedVisitorMap::All(&self.map)
}
fn visit_item(&mut self, item: &'hir hir::Item) {
let name = if let hir::ItemImpl(.., ref ty, _) = item.node {
self.map.node_to_pretty_string(ty.id)
} else {
item.name.to_string()
};
self.visit_testable(name, &item.attrs, |this| {
intravisit::walk_item(this, item);
});
}
fn visit_trait_item(&mut self, item: &'hir hir::TraitItem) {
self.visit_testable(item.name.to_string(), &item.attrs, |this| {
intravisit::walk_trait_item(this, item);
});
}
fn visit_impl_item(&mut self, item: &'hir hir::ImplItem) {
self.visit_testable(item.name.to_string(), &item.attrs, |this| {
intravisit::walk_impl_item(this, item);
});
}
fn visit_foreign_item(&mut self, item: &'hir hir::ForeignItem) {
self.visit_testable(item.name.to_string(), &item.attrs, |this| {
intravisit::walk_foreign_item(this, item);
});
}
fn visit_variant(&mut self,
v: &'hir hir::Variant,
g: &'hir hir::Generics,
item_id: ast::NodeId) {
self.visit_testable(v.node.name.to_string(), &v.node.attrs, |this| {
intravisit::walk_variant(this, v, g, item_id);
});
}
fn visit_struct_field(&mut self, f: &'hir hir::StructField) {
self.visit_testable(f.name.to_string(), &f.attrs, |this| {
intravisit::walk_struct_field(this, f);
});
}
fn visit_macro_def(&mut self, macro_def: &'hir hir::MacroDef) {
self.visit_testable(macro_def.name.to_string(), ¯o_def.attrs, |_| ());
}
}
#[cfg(test)]
mod tests {
use super::{TestOptions, make_test};
#[test]
fn make_test_basic() {
//basic use: wraps with `fn main`, adds `#![allow(unused)]`
let opts = TestOptions::default();
let input =
"assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
fn main() {
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, None, false, &opts);
assert_eq!(output, (expected.clone(), 2));
}
#[test]
fn make_test_crate_name_no_use() {
//if you give a crate name but *don't* use it within the test, it won't bother inserting
//the `extern crate` statement
let opts = TestOptions::default();
let input =
"assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
fn main() {
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 2));
}
#[test]
fn make_test_crate_name() {
//if you give a crate name and use it within the test, it will insert an `extern crate`
//statement before `fn main`
let opts = TestOptions::default();
let input =
"use asdf::qwop;
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
extern crate asdf;
fn main() {
use asdf::qwop;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 3));
}
#[test]
fn make_test_no_crate_inject() {
//even if you do use the crate within the test, setting `opts.no_crate_inject` will skip
//adding it anyway
let opts = TestOptions {
no_crate_inject: true,
attrs: vec![],
};
let input =
"use asdf::qwop;
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
fn main() {
use asdf::qwop;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 2));
}
#[test]
fn make_test_ignore_std() {
//even if you include a crate name, and use it in the doctest, we still won't include an
//`extern crate` statement if the crate is "std" - that's included already by the compiler!
let opts = TestOptions::default();
let input =
"use std::*;
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
fn main() {
use std::*;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("std"), false, &opts);
assert_eq!(output, (expected, 2));
}
#[test]
fn make_test_manual_extern_crate() {
//when you manually include an `extern crate` statement in your doctest, make_test assumes
//you've included one for your own crate too
let opts = TestOptions::default();
let input =
"extern crate asdf;
use asdf::qwop;
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
extern crate asdf;
fn main() {
use asdf::qwop;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 2));
}
#[test]
fn make_test_manual_extern_crate_with_macro_use() {
let opts = TestOptions::default();
let input =
"#[macro_use] extern crate asdf;
use asdf::qwop;
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
#[macro_use] extern crate asdf;
fn main() {
use asdf::qwop;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 2));
}
#[test]
fn make_test_opts_attrs() {
//if you supplied some doctest attributes with #![doc(test(attr(...)))], it will use those
//instead of the stock #![allow(unused)]
let mut opts = TestOptions::default();
opts.attrs.push("feature(sick_rad)".to_string());
let input =
"use asdf::qwop;
assert_eq!(2+2, 4);";
let expected =
"#![feature(sick_rad)]
extern crate asdf;
fn main() {
use asdf::qwop;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 3));
//adding more will also bump the returned line offset
opts.attrs.push("feature(hella_dope)".to_string());
let expected =
"#![feature(sick_rad)]
#![feature(hella_dope)]
extern crate asdf;
fn main() {
use asdf::qwop;
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, Some("asdf"), false, &opts);
assert_eq!(output, (expected, 4));
}
#[test]
fn make_test_crate_attrs() {
//including inner attributes in your doctest will apply them to the whole "crate", pasting
//them outside the generated main function
let opts = TestOptions::default();
let input =
"#![feature(sick_rad)]
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
#![feature(sick_rad)]
fn main() {
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, None, false, &opts);
assert_eq!(output, (expected, 2));
}
#[test]
fn make_test_with_main() {
//including your own `fn main` wrapper lets the test use it verbatim
let opts = TestOptions::default();
let input =
"fn main() {
assert_eq!(2+2, 4);
}";
let expected =
"#![allow(unused)]
fn main() {
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, None, false, &opts);
assert_eq!(output, (expected, 1));
}
#[test]
fn make_test_fake_main() {
//...but putting it in a comment will still provide a wrapper
let opts = TestOptions::default();
let input =
"//Ceci n'est pas une `fn main`
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
fn main() {
//Ceci n'est pas une `fn main`
assert_eq!(2+2, 4);
}".to_string();
let output = make_test(input, None, false, &opts);
assert_eq!(output, (expected.clone(), 2));
}
#[test]
fn make_test_dont_insert_main() {
//even with that, if you set `dont_insert_main`, it won't create the `fn main` wrapper
let opts = TestOptions::default();
let input =
"//Ceci n'est pas une `fn main`
assert_eq!(2+2, 4);";
let expected =
"#![allow(unused)]
//Ceci n'est pas une `fn main`
assert_eq!(2+2, 4);".to_string();
let output = make_test(input, None, true, &opts);
assert_eq!(output, (expected.clone(), 1));
}
}
| 34.312566 | 100 | 0.548994 |
9ce5b8e17a9ae45ea1ba304ad7331ea07635f217
| 9,900 |
use clippy_utils::consts::{constant, Constant};
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::higher::IfLet;
use clippy_utils::ty::is_copy;
use clippy_utils::{is_expn_of, is_lint_allowed, meets_msrv, msrvs, path_to_local};
use if_chain::if_chain;
use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_hir::intravisit::{self, Visitor};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::nested_filter;
use rustc_middle::ty;
use rustc_semver::RustcVersion;
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::{symbol::Ident, Span};
declare_clippy_lint! {
/// ### What it does
/// The lint checks for slice bindings in patterns that are only used to
/// access individual slice values.
///
/// ### Why is this bad?
/// Accessing slice values using indices can lead to panics. Using refutable
/// patterns can avoid these. Binding to individual values also improves the
/// readability as they can be named.
///
/// ### Limitations
/// This lint currently only checks for immutable access inside `if let`
/// patterns.
///
/// ### Example
/// ```rust
/// let slice: Option<&[u32]> = Some(&[1, 2, 3]);
///
/// if let Some(slice) = slice {
/// println!("{}", slice[0]);
/// }
/// ```
/// Use instead:
/// ```rust
/// let slice: Option<&[u32]> = Some(&[1, 2, 3]);
///
/// if let Some(&[first, ..]) = slice {
/// println!("{}", first);
/// }
/// ```
#[clippy::version = "1.58.0"]
pub INDEX_REFUTABLE_SLICE,
nursery,
"avoid indexing on slices which could be destructed"
}
#[derive(Copy, Clone)]
pub struct IndexRefutableSlice {
max_suggested_slice: u64,
msrv: Option<RustcVersion>,
}
impl IndexRefutableSlice {
pub fn new(max_suggested_slice_pattern_length: u64, msrv: Option<RustcVersion>) -> Self {
Self {
max_suggested_slice: max_suggested_slice_pattern_length,
msrv,
}
}
}
impl_lint_pass!(IndexRefutableSlice => [INDEX_REFUTABLE_SLICE]);
impl<'tcx> LateLintPass<'tcx> for IndexRefutableSlice {
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx hir::Expr<'_>) {
if_chain! {
if !expr.span.from_expansion() || is_expn_of(expr.span, "if_chain").is_some();
if let Some(IfLet {let_pat, if_then, ..}) = IfLet::hir(cx, expr);
if !is_lint_allowed(cx, INDEX_REFUTABLE_SLICE, expr.hir_id);
if meets_msrv(self.msrv, msrvs::SLICE_PATTERNS);
let found_slices = find_slice_values(cx, let_pat);
if !found_slices.is_empty();
let filtered_slices = filter_lintable_slices(cx, found_slices, self.max_suggested_slice, if_then);
if !filtered_slices.is_empty();
then {
for slice in filtered_slices.values() {
lint_slice(cx, slice);
}
}
}
}
extract_msrv_attr!(LateContext);
}
fn find_slice_values(cx: &LateContext<'_>, pat: &hir::Pat<'_>) -> FxIndexMap<hir::HirId, SliceLintInformation> {
let mut removed_pat: FxHashSet<hir::HirId> = FxHashSet::default();
let mut slices: FxIndexMap<hir::HirId, SliceLintInformation> = FxIndexMap::default();
pat.walk_always(|pat| {
if let hir::PatKind::Binding(binding, value_hir_id, ident, sub_pat) = pat.kind {
// We'll just ignore mut and ref mut for simplicity sake right now
if let hir::BindingAnnotation::Mutable | hir::BindingAnnotation::RefMut = binding {
return;
}
// This block catches bindings with sub patterns. It would be hard to build a correct suggestion
// for them and it's likely that the user knows what they are doing in such a case.
if removed_pat.contains(&value_hir_id) {
return;
}
if sub_pat.is_some() {
removed_pat.insert(value_hir_id);
slices.remove(&value_hir_id);
return;
}
let bound_ty = cx.typeck_results().node_type(pat.hir_id);
if let ty::Slice(inner_ty) | ty::Array(inner_ty, _) = bound_ty.peel_refs().kind() {
// The values need to use the `ref` keyword if they can't be copied.
// This will need to be adjusted if the lint want to support mutable access in the future
let src_is_ref = bound_ty.is_ref() && binding != hir::BindingAnnotation::Ref;
let needs_ref = !(src_is_ref || is_copy(cx, *inner_ty));
let slice_info = slices
.entry(value_hir_id)
.or_insert_with(|| SliceLintInformation::new(ident, needs_ref));
slice_info.pattern_spans.push(pat.span);
}
}
});
slices
}
fn lint_slice(cx: &LateContext<'_>, slice: &SliceLintInformation) {
let used_indices = slice
.index_use
.iter()
.map(|(index, _)| *index)
.collect::<FxHashSet<_>>();
let value_name = |index| format!("{}_{}", slice.ident.name, index);
if let Some(max_index) = used_indices.iter().max() {
let opt_ref = if slice.needs_ref { "ref " } else { "" };
let pat_sugg_idents = (0..=*max_index)
.map(|index| {
if used_indices.contains(&index) {
format!("{}{}", opt_ref, value_name(index))
} else {
"_".to_string()
}
})
.collect::<Vec<_>>();
let pat_sugg = format!("[{}, ..]", pat_sugg_idents.join(", "));
span_lint_and_then(
cx,
INDEX_REFUTABLE_SLICE,
slice.ident.span,
"this binding can be a slice pattern to avoid indexing",
|diag| {
diag.multipart_suggestion(
"try using a slice pattern here",
slice
.pattern_spans
.iter()
.map(|span| (*span, pat_sugg.clone()))
.collect(),
Applicability::MaybeIncorrect,
);
diag.multipart_suggestion(
"and replace the index expressions here",
slice
.index_use
.iter()
.map(|(index, span)| (*span, value_name(*index)))
.collect(),
Applicability::MaybeIncorrect,
);
// The lint message doesn't contain a warning about the removed index expression,
// since `filter_lintable_slices` will only return slices where all access indices
// are known at compile time. Therefore, they can be removed without side effects.
},
);
}
}
#[derive(Debug)]
struct SliceLintInformation {
ident: Ident,
needs_ref: bool,
pattern_spans: Vec<Span>,
index_use: Vec<(u64, Span)>,
}
impl SliceLintInformation {
fn new(ident: Ident, needs_ref: bool) -> Self {
Self {
ident,
needs_ref,
pattern_spans: Vec::new(),
index_use: Vec::new(),
}
}
}
fn filter_lintable_slices<'a, 'tcx>(
cx: &'a LateContext<'tcx>,
slice_lint_info: FxIndexMap<hir::HirId, SliceLintInformation>,
max_suggested_slice: u64,
scope: &'tcx hir::Expr<'tcx>,
) -> FxIndexMap<hir::HirId, SliceLintInformation> {
let mut visitor = SliceIndexLintingVisitor {
cx,
slice_lint_info,
max_suggested_slice,
};
intravisit::walk_expr(&mut visitor, scope);
visitor.slice_lint_info
}
struct SliceIndexLintingVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
slice_lint_info: FxIndexMap<hir::HirId, SliceLintInformation>,
max_suggested_slice: u64,
}
impl<'a, 'tcx> Visitor<'tcx> for SliceIndexLintingVisitor<'a, 'tcx> {
type NestedFilter = nested_filter::OnlyBodies;
fn nested_visit_map(&mut self) -> Self::Map {
self.cx.tcx.hir()
}
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
if let Some(local_id) = path_to_local(expr) {
let Self {
cx,
ref mut slice_lint_info,
max_suggested_slice,
} = *self;
if_chain! {
// Check if this is even a local we're interested in
if let Some(use_info) = slice_lint_info.get_mut(&local_id);
let map = cx.tcx.hir();
// Checking for slice indexing
let parent_id = map.get_parent_node(expr.hir_id);
if let Some(hir::Node::Expr(parent_expr)) = map.find(parent_id);
if let hir::ExprKind::Index(_, index_expr) = parent_expr.kind;
if let Some((Constant::Int(index_value), _)) = constant(cx, cx.typeck_results(), index_expr);
if let Ok(index_value) = index_value.try_into();
if index_value < max_suggested_slice;
// Make sure that this slice index is read only
let maybe_addrof_id = map.get_parent_node(parent_id);
if let Some(hir::Node::Expr(maybe_addrof_expr)) = map.find(maybe_addrof_id);
if let hir::ExprKind::AddrOf(_kind, hir::Mutability::Not, _inner_expr) = maybe_addrof_expr.kind;
then {
use_info.index_use.push((index_value, map.span(parent_expr.hir_id)));
return;
}
}
// The slice was used for something other than indexing
self.slice_lint_info.remove(&local_id);
}
intravisit::walk_expr(self, expr);
}
}
| 35.869565 | 112 | 0.572222 |
8a0e97197f1bba63e208150df3e2e642fb693b19
| 249 |
#[allow(unused_imports)]
use serde_json::Value;
#[derive(Debug, Serialize, Deserialize)]
pub struct ClusterNodeStateSmartfail {
/// This node is smartfailed (soft_devs).
#[serde(rename = "smartfailed")]
pub smartfailed: Option<bool>,
}
| 24.9 | 45 | 0.714859 |
1ef839a31b95a6854c4c844b521c767130710f3e
| 618 |
// Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0
//! Structure of a global slot
use serde::{Deserialize, Serialize};
use wire_type::WireType;
use crate::numbers::{self, Length};
#[derive(Clone, Serialize, Deserialize, Default, PartialEq, Debug, WireType)]
#[serde(from = "<Self as WireType>::WireType")]
#[serde(into = "<Self as WireType>::WireType")]
#[wire_type(recurse = 2)]
/// A global slot
pub struct GlobalSlot {
/// The global slot number of a chain or block
pub slot_number: numbers::GlobalSlotNumber,
/// Number of slots per epoch
pub slots_per_epoch: Length,
}
| 28.090909 | 77 | 0.708738 |
f4b4273c9db773dd0c30d7191abb9506076dfeea
| 38,824 |
// Copyright (c) 2001-2016, Alliance for Open Media. All rights reserved
// Copyright (c) 2017-2020, The rav1e contributors. All rights reserved
//
// This source code is subject to the terms of the BSD 2 Clause License and
// the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
// was not distributed with this source code in the LICENSE file, you can
// obtain it at www.aomedia.org/license/software. If the Alliance for Open
// Media Patent License 1.0 was not distributed with this source code in the
// PATENTS file, you can obtain it at www.aomedia.org/license/patent.
//! # C API for rav1e
//!
//! [rav1e](https://github.com/xiph/rav1e/) is an [AV1](https://aomediacodec.github.io/av1-spec/)
//! encoder written in [Rust](https://rust-lang.org)
//!
//! This is the C-compatible API
#![deny(missing_docs)]
use std::slice;
use std::sync::Arc;
use std::ffi::CStr;
use std::ffi::CString;
use std::os::raw::c_char;
use std::os::raw::c_int;
use std::os::raw::c_void;
use libc::ptrdiff_t;
use libc::size_t;
use num_derive::*;
use num_traits::cast::FromPrimitive;
use crate::prelude as rav1e;
type PixelRange = rav1e::PixelRange;
type ChromaSamplePosition = rav1e::ChromaSamplePosition;
type ChromaSampling = rav1e::ChromaSampling;
type MatrixCoefficients = rav1e::MatrixCoefficients;
type ColorPrimaries = rav1e::ColorPrimaries;
type TransferCharacteristics = rav1e::TransferCharacteristics;
type Rational = rav1e::Rational;
type FrameTypeOverride = rav1e::FrameTypeOverride;
type FrameOpaqueCb = Option<extern fn(*mut c_void)>;
#[derive(Clone)]
enum FrameInternal {
U8(Arc<rav1e::Frame<u8>>),
U16(Arc<rav1e::Frame<u16>>),
}
impl From<rav1e::Frame<u8>> for FrameInternal {
fn from(f: rav1e::Frame<u8>) -> FrameInternal {
FrameInternal::U8(Arc::new(f))
}
}
impl From<rav1e::Frame<u16>> for FrameInternal {
fn from(f: rav1e::Frame<u16>) -> FrameInternal {
FrameInternal::U16(Arc::new(f))
}
}
struct FrameOpaque {
opaque: *mut c_void,
cb: FrameOpaqueCb,
}
unsafe impl Send for FrameOpaque {}
impl Default for FrameOpaque {
fn default() -> Self {
FrameOpaque { opaque: std::ptr::null_mut(), cb: None }
}
}
impl Drop for FrameOpaque {
fn drop(&mut self) {
let FrameOpaque { opaque, cb } = self;
if let Some(cb) = cb {
cb(*opaque);
}
}
}
/// Raw video Frame
///
/// It can be allocated through rav1e_frame_new(), populated using rav1e_frame_fill_plane()
/// and freed using rav1e_frame_unref().
pub struct Frame {
fi: FrameInternal,
frame_type: FrameTypeOverride,
opaque: Option<FrameOpaque>,
}
/// Status that can be returned by encoder functions.
#[repr(C)]
#[derive(Copy, Clone, Debug, FromPrimitive, PartialEq)]
pub enum EncoderStatus {
/// Normal operation.
Success = 0,
/// The encoder needs more data to produce an output packet.
///
/// May be emitted by `rav1e_receive_packet` when frame reordering is
/// enabled.
NeedMoreData,
/// There are enough frames in the queue.
///
/// May be emitted by `rav1e_send_frame` when trying to send a frame after
/// the encoder has been flushed or the internal queue is full.
EnoughData,
/// The encoder has already produced the number of frames requested.
///
/// May be emitted by `rav1e_receive_packet` after a flush request had been
/// processed or the frame limit had been reached.
LimitReached,
/// A Frame had been encoded but not emitted yet.
Encoded,
/// Generic fatal error.
Failure = -1,
/// A frame was encoded in the first pass of a 2-pass encode, but its stats
/// data was not retrieved with `rav1e_twopass_out`, or not enough stats data
/// was provided in the second pass of a 2-pass encode to encode the next
/// frame.
NotReady = -2,
}
impl EncoderStatus {
fn to_c(&self) -> *const u8 {
use self::EncoderStatus::*;
match self {
Success => "Normal operation\0".as_ptr(),
NeedMoreData => "The encoder needs more data to produce an output packet\0".as_ptr(),
EnoughData => "There are enough frames in the queue\0".as_ptr(),
LimitReached => "The encoder has already produced the number of frames requested\0".as_ptr(),
Encoded => "A Frame had been encoded but not emitted yet\0".as_ptr(),
Failure => "Generic fatal error\0".as_ptr(),
NotReady => "First-pass stats data not retrieved or not enough second-pass data provided\0".as_ptr(),
}
}
}
impl From<Option<rav1e::EncoderStatus>> for EncoderStatus {
fn from(status: Option<rav1e::EncoderStatus>) -> Self {
match status {
None => EncoderStatus::Success,
Some(s) => match s {
rav1e::EncoderStatus::NeedMoreData => EncoderStatus::NeedMoreData,
rav1e::EncoderStatus::EnoughData => EncoderStatus::EnoughData,
rav1e::EncoderStatus::LimitReached => EncoderStatus::LimitReached,
rav1e::EncoderStatus::Encoded => EncoderStatus::Encoded,
rav1e::EncoderStatus::Failure => EncoderStatus::Failure,
rav1e::EncoderStatus::NotReady => EncoderStatus::NotReady,
},
}
}
}
/// Encoder configuration
///
/// Instantiate it using rav1e_config_default() and fine-tune it using
/// rav1e_config_parse().
///
/// Use rav1e_config_unref() to free its memory.
pub struct Config {
cfg: rav1e::Config,
}
enum EncContext {
U8(rav1e::Context<u8>),
U16(rav1e::Context<u16>),
}
impl EncContext {
fn new_frame(&self) -> FrameInternal {
match self {
EncContext::U8(ctx) => ctx.new_frame().into(),
EncContext::U16(ctx) => ctx.new_frame().into(),
}
}
fn send_frame(
&mut self, frame: Option<FrameInternal>, frame_type: FrameTypeOverride,
opaque: Option<Box<dyn std::any::Any + Send>>,
) -> Result<(), rav1e::EncoderStatus> {
let info =
rav1e::FrameParameters { frame_type_override: frame_type, opaque };
if let Some(frame) = frame {
match (self, frame) {
(EncContext::U8(ctx), FrameInternal::U8(ref f)) => {
ctx.send_frame((f.clone(), info))
}
(EncContext::U16(ctx), FrameInternal::U16(ref f)) => {
ctx.send_frame((f.clone(), info))
}
_ => Err(rav1e::EncoderStatus::Failure),
}
} else {
match self {
EncContext::U8(ctx) => ctx.send_frame(None),
EncContext::U16(ctx) => ctx.send_frame(None),
}
}
}
fn receive_packet(&mut self) -> Result<Packet, rav1e::EncoderStatus> {
fn receive_packet<T: rav1e::Pixel>(
ctx: &mut rav1e::Context<T>,
) -> Result<Packet, rav1e::EncoderStatus> {
ctx.receive_packet().map(|p| {
let mut p = std::mem::ManuallyDrop::new(p);
let opaque = p.opaque.take().map_or_else(
|| std::ptr::null_mut(),
|o| {
let mut opaque = o.downcast::<FrameOpaque>().unwrap();
opaque.cb = None;
opaque.opaque
},
);
let p = std::mem::ManuallyDrop::into_inner(p);
let rav1e::Packet { data, input_frameno, frame_type, .. } = p;
let len = data.len();
let data = Box::into_raw(data.into_boxed_slice()) as *const u8;
Packet { data, len, input_frameno, frame_type, opaque }
})
}
match self {
EncContext::U8(ctx) => receive_packet(ctx),
EncContext::U16(ctx) => receive_packet(ctx),
}
}
fn container_sequence_header(&self) -> Vec<u8> {
match self {
EncContext::U8(ctx) => ctx.container_sequence_header(),
EncContext::U16(ctx) => ctx.container_sequence_header(),
}
}
fn twopass_bytes_needed(&mut self) -> usize {
match self {
EncContext::U8(ctx) => ctx.twopass_bytes_needed(),
EncContext::U16(ctx) => ctx.twopass_bytes_needed(),
}
}
fn twopass_in(&mut self, buf: &[u8]) -> Result<usize, rav1e::EncoderStatus> {
match self {
EncContext::U8(ctx) => ctx.twopass_in(buf),
EncContext::U16(ctx) => ctx.twopass_in(buf),
}
}
fn twopass_out(&mut self) -> Option<&[u8]> {
match self {
EncContext::U8(ctx) => ctx.twopass_out(),
EncContext::U16(ctx) => ctx.twopass_out(),
}
}
fn rc_summary_size(&self) -> usize {
match self {
EncContext::U8(ctx) => ctx.rc_summary_size(),
EncContext::U16(ctx) => ctx.rc_summary_size(),
}
}
fn rc_receive_pass_data(&mut self) -> Option<rav1e::RcData> {
match self {
EncContext::U8(ctx) => ctx.rc_receive_pass_data(),
EncContext::U16(ctx) => ctx.rc_receive_pass_data(),
}
}
fn rc_second_pass_data_required(&self) -> usize {
match self {
EncContext::U8(ctx) => ctx.rc_second_pass_data_required(),
EncContext::U16(ctx) => ctx.rc_second_pass_data_required(),
}
}
fn rc_send_pass_data(
&mut self, data: &[u8],
) -> Result<(), rav1e::EncoderStatus> {
match self {
EncContext::U8(ctx) => ctx.rc_send_pass_data(data),
EncContext::U16(ctx) => ctx.rc_send_pass_data(data),
}
}
}
/// Encoder context
///
/// Contains the encoding state, it is created by rav1e_context_new() using an
/// Encoder configuration.
///
/// Use rav1e_context_unref() to free its memory.
pub struct Context {
ctx: EncContext,
last_err: Option<rav1e::EncoderStatus>,
}
type FrameType = rav1e::FrameType;
/// Encoded Packet
///
/// The encoded packets are retrieved using rav1e_receive_packet().
///
/// Use rav1e_packet_unref() to free its memory.
#[repr(C)]
pub struct Packet {
/// Encoded data buffer
pub data: *const u8,
/// Encoded data buffer size
pub len: size_t,
/// Frame sequence number
pub input_frameno: u64,
/// Frame type
pub frame_type: FrameType,
/// User provided opaque data
pub opaque: *mut c_void,
}
/// Version information as presented in `[package]` `version`.
///
/// e.g. `0.1.0``
///
/// Can be parsed by [semver](https://crates.io/crates/semver).
/// This returns the version of the loaded library, regardless
/// of which version the library user was built against.
#[no_mangle]
pub unsafe extern fn rav1e_version_short() -> *const c_char {
concat!(env!("CARGO_PKG_VERSION"), "\0").as_ptr() as *const c_char
}
/// Version information with the information
/// provided by `git describe --tags`.
///
/// e.g. `0.1.0 (v0.1.0-1-g743d464)`
///
/// This returns the version of the loaded library, regardless
/// of which version the library user was built against.
#[no_mangle]
pub unsafe extern fn rav1e_version_full() -> *const c_char {
concat!(
env!("CARGO_PKG_VERSION"),
" (",
env!("VERGEN_SEMVER_LIGHTWEIGHT"),
")\0"
)
.as_ptr() as *const c_char
}
/// Simple Data
///
///
///
/// Use rav1e_data_unref() to free its memory.
#[repr(C)]
pub struct Data {
/// Pointer to the data buffer
pub data: *const u8,
/// Data buffer size
pub len: size_t,
}
/// Free a RaData buffer
#[no_mangle]
pub unsafe extern fn rav1e_data_unref(data: *mut Data) {
if !data.is_null() {
let data = Box::from_raw(data);
let _ = Vec::from_raw_parts(
data.data as *mut u8,
data.len as usize,
data.len as usize,
);
}
}
/// Create a RaConfig filled with default parameters.
#[no_mangle]
pub unsafe extern fn rav1e_config_default() -> *mut Config {
let cfg = rav1e::Config::default();
let c = Box::new(Config { cfg });
Box::into_raw(c)
}
unsafe fn decode_slice<'a>(
data: *mut *const u8, len: *mut size_t,
) -> (c_int, Option<&'a [u8]>) {
use std::convert::TryInto;
if *len < 8 {
return (8, None);
}
let buf = slice::from_raw_parts(*data, *len as usize);
let (len_bytes, rest) = buf.split_at(std::mem::size_of::<u64>());
let buf_len = u64::from_be_bytes(len_bytes.try_into().unwrap()) as usize;
let full_len = buf_len + 8;
if buf_len > rest.len() {
return (full_len as c_int, None);
}
*len -= full_len;
*data = (*data).offset(full_len.try_into().unwrap());
(0, Some(&rest[..buf_len]))
}
/// Setup a second pass rate control using the provided summary
///
/// Passing NULL data resets the rate control settings.
///
/// If additional data is required, pointer and len stay unchanged, otherwise
/// they are updated.
///
/// Return:
/// 0 on success
/// > 0 if the buffer has to be larger
/// < 0 on failure
#[no_mangle]
pub unsafe extern fn rav1e_config_set_rc_summary(
cfg: *mut Config, data: *mut *const u8, len: *mut size_t,
) -> c_int {
if data.is_null() {
(*cfg).cfg.rate_control.summary = None;
return 0;
}
let (needed, maybe_buf) = decode_slice(data, len);
if maybe_buf.is_none() {
return needed;
}
let summary = rav1e::RateControlSummary::from_slice(maybe_buf.unwrap()).ok();
if summary.is_none() {
-1
} else {
(*cfg).cfg.rate_control.summary = summary;
0
}
}
/// Request to emit pass data
///
/// Set emit to 0 to not emit pass data, non-zero to emit pass data.
///
#[no_mangle]
pub unsafe extern fn rav1e_config_set_emit_data(
cfg: *mut Config, emit: c_int,
) {
(*cfg).cfg.rate_control.emit_pass_data = emit != 0;
}
/// Set the display aspect ratio of the stream
///
/// Needed for anamorphic video.
#[no_mangle]
pub unsafe extern fn rav1e_config_set_sample_aspect_ratio(
cfg: *mut Config, sample_aspect_ratio: Rational,
) {
(*cfg).cfg.enc.sample_aspect_ratio = sample_aspect_ratio
}
/// Set the time base of the stream
///
/// Needed for rate control.
#[no_mangle]
pub unsafe extern fn rav1e_config_set_time_base(
cfg: *mut Config, time_base: Rational,
) {
(*cfg).cfg.enc.time_base = time_base
}
/// Set pixel format of the stream.
///
/// Supported values for subsampling and chromapos are defined by the
/// enum types RaChromaSampling and RaChromaSamplePosition respectively.
/// Valid values for fullrange are 0 and 1.
///
/// Returns a negative value on error or 0.
#[no_mangle]
pub unsafe extern fn rav1e_config_set_pixel_format(
cfg: *mut Config, bit_depth: u8, subsampling: ChromaSampling,
chroma_pos: ChromaSamplePosition, pixel_range: PixelRange,
) -> c_int {
if bit_depth != 8 && bit_depth != 10 && bit_depth != 12 {
return -1;
}
(*cfg).cfg.enc.bit_depth = bit_depth as usize;
let subsampling_val =
std::mem::transmute::<ChromaSampling, i32>(subsampling);
if ChromaSampling::from_i32(subsampling_val).is_none() {
return -1;
}
(*cfg).cfg.enc.chroma_sampling = subsampling;
let chroma_pos_val =
std::mem::transmute::<ChromaSamplePosition, i32>(chroma_pos);
if ChromaSamplePosition::from_i32(chroma_pos_val).is_none() {
return -1;
}
(*cfg).cfg.enc.chroma_sample_position = chroma_pos;
let pixel_range_val = std::mem::transmute::<PixelRange, i32>(pixel_range);
if PixelRange::from_i32(pixel_range_val).is_none() {
return -1;
}
(*cfg).cfg.enc.pixel_range = pixel_range;
0
}
/// Set color properties of the stream.
///
/// Supported values are defined by the enum types
/// RaMatrixCoefficients, RaColorPrimaries, and RaTransferCharacteristics
/// respectively.
///
/// Return a negative value on error or 0.
#[no_mangle]
pub unsafe extern fn rav1e_config_set_color_description(
cfg: *mut Config, matrix: MatrixCoefficients, primaries: ColorPrimaries,
transfer: TransferCharacteristics,
) -> c_int {
(*cfg).cfg.enc.color_description = Some(rav1e::ColorDescription {
matrix_coefficients: matrix,
color_primaries: primaries,
transfer_characteristics: transfer,
});
if (*cfg).cfg.enc.color_description.is_some() {
0
} else {
-1
}
}
/// Set the content light level information for HDR10 streams.
///
/// Return a negative value on error or 0.
#[no_mangle]
pub unsafe extern fn rav1e_config_set_content_light(
cfg: *mut Config, max_content_light_level: u16,
max_frame_average_light_level: u16,
) -> c_int {
(*cfg).cfg.enc.content_light = Some(rav1e::ContentLight {
max_content_light_level,
max_frame_average_light_level,
});
if (*cfg).cfg.enc.content_light.is_some() {
0
} else {
-1
}
}
/// Set the mastering display information for HDR10 streams.
///
/// primaries and white_point arguments are RaChromaticityPoint, containing 0.16 fixed point
/// values.
/// max_luminance is a 24.8 fixed point value.
/// min_luminance is a 18.14 fixed point value.
///
/// Returns a negative value on error or 0.
/// cbindgen:ptrs-as-arrays=[[primaries;3]]
#[no_mangle]
pub unsafe extern fn rav1e_config_set_mastering_display(
cfg: *mut Config, primaries: *const rav1e::ChromaticityPoint,
white_point: rav1e::ChromaticityPoint, max_luminance: u32,
min_luminance: u32,
) -> c_int {
let primaries = *(primaries as *const [rav1e::ChromaticityPoint; 3]);
(*cfg).cfg.enc.mastering_display = Some(rav1e::MasteringDisplay {
primaries,
white_point,
max_luminance,
min_luminance,
});
if (*cfg).cfg.enc.mastering_display.is_some() {
0
} else {
-1
}
}
/// Free the RaConfig.
#[no_mangle]
pub unsafe extern fn rav1e_config_unref(cfg: *mut Config) {
if !cfg.is_null() {
let _ = Box::from_raw(cfg);
}
}
fn tile_log2(blk_size: usize, target: usize) -> usize {
let mut k = 0;
while (blk_size << k) < target {
k += 1;
}
k
}
fn check_tile_log2(n: Result<usize, ()>) -> Result<usize, ()> {
match n {
Ok(n) => {
if ((1 << tile_log2(1, n)) - n) == 0 || n == 0 {
Ok(n)
} else {
Err(())
}
}
Err(e) => Err(e),
}
}
fn check_frame_size(n: Result<usize, ()>) -> Result<usize, ()> {
match n {
Ok(n) => {
if n >= 16 && n < u16::max_value().into() {
Ok(n)
} else {
Err(())
}
}
Err(e) => Err(e),
}
}
unsafe fn option_match(
cfg: *mut Config, key: *const c_char, value: *const c_char,
) -> Result<(), ()> {
let key = CStr::from_ptr(key).to_str().map_err(|_| ())?;
let value = CStr::from_ptr(value).to_str().map_err(|_| ())?;
let enc = &mut (*cfg).cfg.enc;
match key {
"width" => enc.width = check_frame_size(value.parse().map_err(|_| ()))?,
"height" => enc.height = check_frame_size(value.parse().map_err(|_| ()))?,
"speed" => {
enc.speed_settings =
rav1e::SpeedSettings::from_preset(value.parse().map_err(|_| ())?)
}
"threads" => (*cfg).cfg.threads = value.parse().map_err(|_| ())?,
"tiles" => enc.tiles = value.parse().map_err(|_| ())?,
"tile_rows" => {
enc.tile_rows = check_tile_log2(value.parse().map_err(|_| ()))?
}
"tile_cols" => {
enc.tile_cols = check_tile_log2(value.parse().map_err(|_| ()))?
}
"tune" => enc.tune = value.parse().map_err(|_| ())?,
"quantizer" => enc.quantizer = value.parse().map_err(|_| ())?,
"min_quantizer" => enc.min_quantizer = value.parse().map_err(|_| ())?,
"bitrate" => enc.bitrate = value.parse().map_err(|_| ())?,
"key_frame_interval" => {
enc.set_key_frame_interval(
enc.min_key_frame_interval,
value.parse().map_err(|_| ())?,
);
}
"min_key_frame_interval" => {
enc.set_key_frame_interval(
value.parse().map_err(|_| ())?,
enc.max_key_frame_interval,
);
}
"switch_frame_interval" => {
enc.switch_frame_interval = value.parse().map_err(|_| ())?
}
"reservoir_frame_delay" => {
enc.reservoir_frame_delay = Some(value.parse().map_err(|_| ())?)
}
"rdo_lookahead_frames" => {
enc.rdo_lookahead_frames = value.parse().map_err(|_| ())?
}
"low_latency" => enc.low_latency = value.parse().map_err(|_| ())?,
"enable_timing_info" => {
enc.enable_timing_info = value.parse().map_err(|_| ())?
}
"still_picture" => enc.still_picture = value.parse().map_err(|_| ())?,
_ => return Err(()),
}
Ok(())
}
/// Set a configuration parameter using its key and value as string.
///
/// Available keys and values
/// - "width": width of the frame, default 640
/// - "height": height of the frame, default 480
/// - "speed": 0-10, default 6
/// - "threads": maximum number of threads to be used
/// - "tune": "psnr"-"psychovisual", default "psychovisual"
/// - "quantizer": 0-255, default 100
/// - "tiles": total number of tiles desired (0 denotes auto), default 0
/// - "tile_rows": number of tiles horizontally (must be a power of two, overridden by tiles if present), default 0
/// - "tile_cols": number of tiles vertically (must be a power of two, overridden by tiles if present), default 0
/// - "min_quantizer": minimum allowed base quantizer to use in bitrate mode, default 0
/// - "bitrate": target bitrate for the bitrate mode (required for two pass mode), default 0
/// - "key_frame_interval": maximum interval between two keyframes, default 240
/// - "min_key_frame_interval": minimum interval between two keyframes, default 12
/// - "switch_frame_interval": interval between switch frames, default 0
/// - "reservoir_frame_delay": number of temporal units over which to distribute the reservoir usage, default None
/// - "rdo_lookahead_frames": number of frames to read ahead for the RDO lookahead computation, default 40
/// - "low_latency": flag to enable low latency mode, default false
/// - "enable_timing_info": flag to enable signaling timing info in the bitstream, default false
/// - "still_picture": flag for still picture mode, default false
///
/// Return a negative value on error or 0.
#[no_mangle]
pub unsafe extern fn rav1e_config_parse(
cfg: *mut Config, key: *const c_char, value: *const c_char,
) -> c_int {
if option_match(cfg, key, value) == Ok(()) {
0
} else {
-1
}
}
/// Set a configuration parameter using its key and value as integer.
///
/// Available keys and values are the same as rav1e_config_parse()
///
/// Return a negative value on error or 0.
#[no_mangle]
pub unsafe extern fn rav1e_config_parse_int(
cfg: *mut Config, key: *const c_char, value: c_int,
) -> c_int {
let val = CString::new(value.to_string()).unwrap();
if option_match(cfg, key, val.as_ptr()) == Ok(()) {
0
} else {
-1
}
}
/// Generate a new encoding context from a populated encoder configuration
///
/// Multiple contexts can be generated through it.
/// Returns Null if context creation failed, e.g. by passing
/// an invalid Config.
#[no_mangle]
pub unsafe extern fn rav1e_context_new(cfg: *const Config) -> *mut Context {
let cfg = &(*cfg).cfg;
let enc = &cfg.enc;
let ctx = match enc.bit_depth {
8 => cfg.new_context().map(EncContext::U8),
_ => cfg.new_context().map(EncContext::U16),
};
if let Ok(ctx) = ctx {
Box::into_raw(Box::new(Context { ctx, last_err: None }))
} else {
std::ptr::null_mut()
}
}
/// Free the RaContext.
#[no_mangle]
pub unsafe extern fn rav1e_context_unref(ctx: *mut Context) {
if !ctx.is_null() {
let _ = Box::from_raw(ctx);
}
}
/// Produce a new frame from the encoding context
///
/// It must be populated using rav1e_frame_fill_plane().
///
/// The frame is reference counted and must be released passing it to rav1e_frame_unref(),
/// see rav1e_send_frame().
#[no_mangle]
pub unsafe extern fn rav1e_frame_new(ctx: *const Context) -> *mut Frame {
let fi = (*ctx).ctx.new_frame();
let frame_type = rav1e::FrameTypeOverride::No;
let f = Frame { fi, frame_type, opaque: None };
let frame = Box::new(f.into());
Box::into_raw(frame)
}
/// Free the RaFrame.
#[no_mangle]
pub unsafe extern fn rav1e_frame_unref(frame: *mut Frame) {
if !frame.is_null() {
let _ = Box::from_raw(frame);
}
}
/// Overrides the encoders frame type decision for a frame
///
/// Must be called before rav1e_send_frame() if used.
#[no_mangle]
pub unsafe extern fn rav1e_frame_set_type(
frame: *mut Frame, frame_type: FrameTypeOverride,
) -> c_int {
let frame_type_val =
std::mem::transmute::<FrameTypeOverride, i32>(frame_type);
if FrameTypeOverride::from_i32(frame_type_val).is_none() {
return -1;
}
(*frame).frame_type = frame_type;
0
}
/// Register an opaque data and a destructor to the frame
///
/// It takes the ownership of its memory:
/// - it will relinquish the ownership to the context if
/// rav1e_send_frame is called.
/// - it will call the destructor if rav1e_frame_unref is called
/// otherwise.
#[no_mangle]
pub unsafe extern fn rav1e_frame_set_opaque(
frame: *mut Frame, opaque: *mut c_void, cb: FrameOpaqueCb,
) {
if opaque.is_null() {
(*frame).opaque = None;
} else {
(*frame).opaque = Some(FrameOpaque { opaque, cb });
}
}
/// Retrieve the first-pass data of a two-pass encode for the frame that was
/// just encoded. This should be called BEFORE every call to rav1e_receive_packet()
/// (including the very first one), even if no packet was produced by the
/// last call to rav1e_receive_packet, if any (i.e., RA_ENCODER_STATUS_ENCODED
/// was returned). It needs to be called once more after
/// RA_ENCODER_STATUS_LIMIT_REACHED is returned, to retrieve the header that
/// should be written to the front of the stats file (overwriting the
/// placeholder header that was emitted at the start of encoding).
///
/// It is still safe to call this function when rav1e_receive_packet() returns any
/// other error. It will return NULL instead of returning a duplicate copy
/// of the previous frame's data.
///
/// Must be freed with rav1e_data_unref().
#[no_mangle]
pub unsafe extern fn rav1e_twopass_out(ctx: *mut Context) -> *mut Data {
let buf = (*ctx).ctx.twopass_out();
if buf.is_none() {
return std::ptr::null_mut();
}
let v = buf.unwrap().to_vec();
Box::into_raw(Box::new(Data {
len: v.len(),
data: Box::into_raw(v.into_boxed_slice()) as *mut u8,
}))
}
/// Rate Control Data
#[derive(Debug, PartialEq)]
#[repr(C)]
pub enum RcDataKind {
/// A Rate Control Summary Packet
///
/// It is emitted once, after the encoder is flushed.
///
/// It contains a summary of the rate control information for the
/// encoding process that just terminated.
Summary,
/// A Rate Control Frame-specific Packet
///
/// It is emitted every time a frame is processed.
///
/// The information contained is required to encode its matching
/// frame in a second pass encoding.
Frame,
/// There is no pass data available for now
///
/// This is emitted if rav1e_rc_receive_pass_data is called more
/// often than it should.
Empty,
}
/// Return the Rate Control Summary Packet size
///
/// It is useful mainly to preserve space when saving
/// both Rate Control Summary and Frame Packets in a single file
#[no_mangle]
pub unsafe extern fn rav1e_rc_summary_size(ctx: *const Context) -> size_t {
(*ctx).ctx.rc_summary_size() as size_t + 8
}
/// Return the first pass data
///
/// Call it after rav1e_receive_packet() returns a normal condition status:
/// EncoderStatus::Encoded,
/// EncoderStatus::Success,
/// EncoderStatus::LimitReached.
///
/// use rav1e_data_unref() to free the data.
///
/// It will return a `RcDataKind::Summary` once the encoder is flushed.
#[no_mangle]
pub unsafe extern fn rav1e_rc_receive_pass_data(
ctx: *mut Context, data: *mut *mut Data,
) -> RcDataKind {
use crate::api::RcData::*;
let (buf, kind) = match (*ctx).ctx.rc_receive_pass_data() {
Some(Summary(data)) => (data, RcDataKind::Summary),
Some(Frame(data)) => (data, RcDataKind::Frame),
None => return RcDataKind::Empty,
};
let mut full_buf = Vec::with_capacity(buf.len() + 8);
full_buf.extend_from_slice(&(buf.len() as u64).to_be_bytes());
full_buf.extend_from_slice(&buf);
let full_buf = full_buf.into_boxed_slice();
*data = Box::into_raw(Box::new(Data {
len: full_buf.len(),
data: Box::into_raw(full_buf) as *mut u8,
}));
kind
}
/// Number of pass data packets required to progress the encoding process.
///
/// At least that number of packets must be passed before the encoder can
/// progress.
///
/// Stop feeding-in pass data packets once the function returns 0.
///
/// ``` c
/// while (rav1e_rc_second_pass_data_required(ctx) > 0) {
/// int more = rav1e_rc_send_pass_data(ctx, &data, &len);
/// if (more > 0) {
/// refill(&data, &len);
/// } else if (more < 0) {
/// goto fail;
/// }
/// }
/// ```
///
#[no_mangle]
pub unsafe extern fn rav1e_rc_second_pass_data_required(
ctx: *const Context,
) -> i32 {
(*ctx).ctx.rc_second_pass_data_required() as i32
}
/// Feed the first pass Rate Control data to the encoder,
/// Frame-specific Packets only.
///
/// Call it before receive_packet()
///
/// If additional data is required, pointer and len stay unchanged, otherwise
/// they are updated.
///
/// Returns:
/// - `0` on success,
/// - `> 0` the amount of bytes needed
/// - `< 0` on unrecoverable failure
#[no_mangle]
pub unsafe extern fn rav1e_rc_send_pass_data(
ctx: *mut Context, data: *mut *const u8, len: *mut size_t,
) -> c_int {
let (need, maybe_buf) = decode_slice(data, len);
if maybe_buf.is_none() {
return need;
}
let ret = (*ctx)
.ctx
.rc_send_pass_data(maybe_buf.unwrap())
.map(|_v| None)
.unwrap_or_else(|e| Some(e));
(*ctx).last_err = ret;
if ret.is_some() {
-1
} else {
0
}
}
/// Ask how many bytes of the stats file are needed before the next frame
/// of the second pass in a two-pass encode can be encoded. This is a lower
/// bound (more might be required), but if 0 is returned, then encoding can
/// proceed. This is just a hint to the application, and does not need to
/// be called for encoding the second pass to work, so long as the
/// application continues to provide more data to rav1e_twopass_in() in a loop
/// until rav1e_twopass_in() returns 0.
#[no_mangle]
pub unsafe extern fn rav1e_twopass_bytes_needed(ctx: *mut Context) -> size_t {
(*ctx).ctx.twopass_bytes_needed() as size_t
}
/// Provide stats data produced in the first pass of a two-pass encode to the
/// second pass. On success this returns the number of bytes of that data
/// which were consumed. When encoding the second pass of a two-pass encode,
/// this should be called repeatedly in a loop before every call to
/// rav1e_receive_packet() (including the very first one) until no bytes are
/// consumed, or until twopass_bytes_needed() returns 0. Returns -1 on failure.
#[no_mangle]
pub unsafe extern fn rav1e_twopass_in(
ctx: *mut Context, buf: *mut u8, buf_size: size_t,
) -> c_int {
let buf_slice = slice::from_raw_parts(buf, buf_size as usize);
let r = (*ctx).ctx.twopass_in(buf_slice);
match r {
Ok(v) => v as c_int,
Err(v) => {
(*ctx).last_err = Some(v);
-1
}
}
}
/// Send the frame for encoding
///
/// The function increases the frame internal reference count and it can be passed multiple
/// times to different rav1e_send_frame() with a caveat:
///
/// The opaque data, if present, will be moved from the Frame to the context
/// and returned by rav1e_receive_packet in the Packet opaque field or
/// the destructor will be called on rav1e_context_unref if the frame is
/// still pending in the encoder.
///
/// Returns:
/// - `0` on success,
/// - `> 0` if the input queue is full
/// - `< 0` on unrecoverable failure
#[no_mangle]
pub unsafe extern fn rav1e_send_frame(
ctx: *mut Context, frame: *mut Frame,
) -> EncoderStatus {
let frame_internal =
if frame.is_null() { None } else { Some((*frame).fi.clone()) };
let frame_type = if frame.is_null() {
rav1e::FrameTypeOverride::No
} else {
(*frame).frame_type
};
let maybe_opaque = if frame.is_null() {
None
} else {
(*frame)
.opaque
.take()
.map(|o| Box::new(o) as Box<dyn std::any::Any + Send>)
};
let ret = (*ctx)
.ctx
.send_frame(frame_internal, frame_type, maybe_opaque)
.map(|_v| None)
.unwrap_or_else(|e| Some(e));
(*ctx).last_err = ret;
ret.into()
}
/// Return the last encoder status
#[no_mangle]
pub unsafe extern fn rav1e_last_status(ctx: *const Context) -> EncoderStatus {
(*ctx).last_err.into()
}
/// Return a static string matching the EncoderStatus variant.
///
#[no_mangle]
pub unsafe extern fn rav1e_status_to_str(
status: EncoderStatus,
) -> *const c_char {
if EncoderStatus::from_i32(std::mem::transmute(status)).is_none() {
return std::ptr::null();
}
status.to_c() as *const c_char
}
/// Receive encoded data
///
/// Returns:
/// - `0` on success
/// - `> 0` if additional frame data is required
/// - `< 0` on unrecoverable failure
#[no_mangle]
pub unsafe extern fn rav1e_receive_packet(
ctx: *mut Context, pkt: *mut *mut Packet,
) -> EncoderStatus {
let ret = (*ctx)
.ctx
.receive_packet()
.map(|packet| {
*pkt = Box::into_raw(Box::new(packet));
None
})
.unwrap_or_else(|e| Some(e));
(*ctx).last_err = ret;
ret.into()
}
/// Free the RaPacket.
#[no_mangle]
pub unsafe extern fn rav1e_packet_unref(pkt: *mut Packet) {
if !pkt.is_null() {
let pkt = Box::from_raw(pkt);
let _ = Vec::from_raw_parts(
pkt.data as *mut u8,
pkt.len as usize,
pkt.len as usize,
);
}
}
/// Produce a sequence header matching the current encoding context
///
/// Its format is compatible with the AV1 Matroska and ISOBMFF specification.
///
/// Use rav1e_data_unref() to free it.
#[no_mangle]
pub unsafe extern fn rav1e_container_sequence_header(
ctx: *const Context,
) -> *mut Data {
let buf = (*ctx).ctx.container_sequence_header();
Box::into_raw(Box::new(Data {
len: buf.len(),
data: Box::into_raw(buf.into_boxed_slice()) as *mut u8,
}))
}
fn rav1e_frame_fill_plane_internal<T: rav1e::Pixel>(
f: &mut Arc<rav1e::Frame<T>>, plane: c_int, data_slice: &[u8],
stride: ptrdiff_t, bytewidth: c_int,
) {
let input = Arc::make_mut(f);
input.planes[plane as usize].copy_from_raw_u8(
data_slice,
stride as usize,
bytewidth as usize,
);
}
/// Fill a frame plane
///
/// Currently the frame contains 3 planes, the first is luminance followed by
/// chrominance.
///
/// The data is copied and this function has to be called for each plane.
///
/// frame: A frame provided by rav1e_frame_new()
/// plane: The index of the plane starting from 0
/// data: The data to be copied
/// data_len: Length of the buffer
/// stride: Plane line in bytes, including padding
/// bytewidth: Number of bytes per component, either 1 or 2
#[no_mangle]
pub unsafe extern fn rav1e_frame_fill_plane(
frame: *mut Frame, plane: c_int, data: *const u8, data_len: size_t,
stride: ptrdiff_t, bytewidth: c_int,
) {
let data_slice = slice::from_raw_parts(data, data_len as usize);
match (*frame).fi {
FrameInternal::U8(ref mut f) => {
rav1e_frame_fill_plane_internal(f, plane, data_slice, stride, bytewidth)
}
FrameInternal::U16(ref mut f) => {
rav1e_frame_fill_plane_internal(f, plane, data_slice, stride, bytewidth)
}
}
}
#[cfg(test)]
mod test {
use super::*;
use std::ffi::CString;
#[test]
fn forward_opaque() {
unsafe {
let rac = rav1e_config_default();
let w = CString::new("width").unwrap();
rav1e_config_parse_int(rac, w.as_ptr(), 64);
let h = CString::new("height").unwrap();
rav1e_config_parse_int(rac, h.as_ptr(), 64);
let s = CString::new("speed").unwrap();
rav1e_config_parse_int(rac, s.as_ptr(), 10);
let rax = rav1e_context_new(rac);
let f = rav1e_frame_new(rax);
for i in 0..30 {
let v = Box::new(i as u8);
extern fn cb(o: *mut c_void) {
let v = unsafe { Box::from_raw(o as *mut u8) };
eprintln!("Would free {}", v);
}
rav1e_frame_set_opaque(f, Box::into_raw(v) as *mut c_void, Some(cb));
rav1e_send_frame(rax, f);
}
rav1e_send_frame(rax, std::ptr::null_mut());
for _ in 0..15 {
let mut p: *mut Packet = std::ptr::null_mut();
let ret = rav1e_receive_packet(rax, &mut p);
if ret == EncoderStatus::Success {
let v = Box::from_raw((*p).opaque as *mut u8);
eprintln!("Opaque {}", v);
}
if ret == EncoderStatus::LimitReached {
break;
}
}
let v = Box::new(42u64);
extern fn cb(o: *mut c_void) {
let v = unsafe { Box::from_raw(o as *mut u64) };
eprintln!("Would free {}", v);
}
rav1e_frame_set_opaque(f, Box::into_raw(v) as *mut c_void, Some(cb));
// 42 would be freed after this
rav1e_frame_unref(f);
// 15 - reorder delay .. 29 would be freed after this
rav1e_context_unref(rax);
rav1e_config_unref(rac);
}
}
#[test]
fn two_pass_encoding() {
unsafe {
let rac = rav1e_config_default();
let w = CString::new("width").unwrap();
rav1e_config_parse_int(rac, w.as_ptr(), 64);
let h = CString::new("height").unwrap();
rav1e_config_parse_int(rac, h.as_ptr(), 64);
let s = CString::new("speed").unwrap();
rav1e_config_parse_int(rac, s.as_ptr(), 10);
let s = CString::new("bitrate").unwrap();
rav1e_config_parse_int(rac, s.as_ptr(), 1000);
rav1e_config_set_emit_data(rac, 1);
let rax = rav1e_context_new(rac);
let f = rav1e_frame_new(rax);
for _ in 0..10 {
rav1e_send_frame(rax, f);
}
rav1e_send_frame(rax, std::ptr::null_mut());
let mut frame_data = std::collections::VecDeque::new();
let mut summary: *mut Data = std::ptr::null_mut();
loop {
let mut p: *mut Packet = std::ptr::null_mut();
let ret = rav1e_receive_packet(rax, &mut p);
rav1e_packet_unref(p);
if ret == EncoderStatus::LimitReached {
let kind = rav1e_rc_receive_pass_data(rax, &mut summary);
assert_eq!(kind, RcDataKind::Summary);
eprintln!("Got rc summary {} bytes", (*summary).len);
break;
} else if ret == EncoderStatus::Encoded
|| ret == EncoderStatus::Success
{
let mut p: *mut Data = std::ptr::null_mut();
let kind = rav1e_rc_receive_pass_data(rax, &mut p);
assert_eq!(kind, RcDataKind::Frame);
eprintln!("Got rc frame data {} bytes", (*p).len);
frame_data.push_back(p);
}
}
rav1e_config_set_emit_data(rac, 0);
let mut data = (*summary).data;
let mut len = (*summary).len;
let ret = rav1e_config_set_rc_summary(rac, &mut data, &mut len);
assert_eq!(ret, 0);
rav1e_data_unref(summary);
for _ in 0..10 {
rav1e_send_frame(rax, f);
}
rav1e_send_frame(rax, std::ptr::null_mut());
loop {
let mut p: *mut Packet = std::ptr::null_mut();
while rav1e_rc_second_pass_data_required(rax) > 0 {
let d = frame_data.pop_front().unwrap();
let mut data = (*d).data;
let mut len = (*d).len;
rav1e_rc_send_pass_data(rax, &mut data, &mut len);
rav1e_data_unref(d);
}
let ret = rav1e_receive_packet(rax, &mut p);
rav1e_packet_unref(p);
if ret == EncoderStatus::LimitReached {
break;
}
}
rav1e_frame_unref(f);
rav1e_context_unref(rax);
rav1e_config_unref(rac);
}
}
}
| 28.929955 | 115 | 0.652174 |
1cccfdb24c27dc5f5b0f36594f2ab2197603e08f
| 16,368 |
// This file was generated by `cargo dev update_lints`.
// Use that command to update this file and do not edit by hand.
// Manual edits will be overwritten.
store.register_group(true, "clippy::all", Some("clippy_all"), vec![
LintId::of(absurd_extreme_comparisons::ABSURD_EXTREME_COMPARISONS),
LintId::of(approx_const::APPROX_CONSTANT),
LintId::of(assertions_on_constants::ASSERTIONS_ON_CONSTANTS),
LintId::of(assign_ops::ASSIGN_OP_PATTERN),
LintId::of(assign_ops::MISREFACTORED_ASSIGN_OP),
LintId::of(async_yields_async::ASYNC_YIELDS_ASYNC),
LintId::of(attrs::BLANKET_CLIPPY_RESTRICTION_LINTS),
LintId::of(attrs::DEPRECATED_CFG_ATTR),
LintId::of(attrs::DEPRECATED_SEMVER),
LintId::of(attrs::MISMATCHED_TARGET_OS),
LintId::of(attrs::USELESS_ATTRIBUTE),
LintId::of(await_holding_invalid::AWAIT_HOLDING_INVALID_TYPE),
LintId::of(await_holding_invalid::AWAIT_HOLDING_LOCK),
LintId::of(await_holding_invalid::AWAIT_HOLDING_REFCELL_REF),
LintId::of(bit_mask::BAD_BIT_MASK),
LintId::of(bit_mask::INEFFECTIVE_BIT_MASK),
LintId::of(blacklisted_name::BLACKLISTED_NAME),
LintId::of(blocks_in_if_conditions::BLOCKS_IN_IF_CONDITIONS),
LintId::of(bool_assert_comparison::BOOL_ASSERT_COMPARISON),
LintId::of(booleans::LOGIC_BUG),
LintId::of(booleans::NONMINIMAL_BOOL),
LintId::of(bytes_count_to_len::BYTES_COUNT_TO_LEN),
LintId::of(casts::CAST_ABS_TO_UNSIGNED),
LintId::of(casts::CAST_ENUM_CONSTRUCTOR),
LintId::of(casts::CAST_ENUM_TRUNCATION),
LintId::of(casts::CAST_REF_TO_MUT),
LintId::of(casts::CAST_SLICE_DIFFERENT_SIZES),
LintId::of(casts::CHAR_LIT_AS_U8),
LintId::of(casts::FN_TO_NUMERIC_CAST),
LintId::of(casts::FN_TO_NUMERIC_CAST_WITH_TRUNCATION),
LintId::of(casts::UNNECESSARY_CAST),
LintId::of(collapsible_if::COLLAPSIBLE_ELSE_IF),
LintId::of(collapsible_if::COLLAPSIBLE_IF),
LintId::of(collapsible_match::COLLAPSIBLE_MATCH),
LintId::of(comparison_chain::COMPARISON_CHAIN),
LintId::of(copies::IFS_SAME_COND),
LintId::of(copies::IF_SAME_THEN_ELSE),
LintId::of(crate_in_macro_def::CRATE_IN_MACRO_DEF),
LintId::of(default::FIELD_REASSIGN_WITH_DEFAULT),
LintId::of(dereference::NEEDLESS_BORROW),
LintId::of(derivable_impls::DERIVABLE_IMPLS),
LintId::of(derive::DERIVE_HASH_XOR_EQ),
LintId::of(derive::DERIVE_ORD_XOR_PARTIAL_ORD),
LintId::of(disallowed_methods::DISALLOWED_METHODS),
LintId::of(disallowed_types::DISALLOWED_TYPES),
LintId::of(doc::MISSING_SAFETY_DOC),
LintId::of(doc::NEEDLESS_DOCTEST_MAIN),
LintId::of(double_comparison::DOUBLE_COMPARISONS),
LintId::of(double_parens::DOUBLE_PARENS),
LintId::of(drop_forget_ref::DROP_COPY),
LintId::of(drop_forget_ref::DROP_NON_DROP),
LintId::of(drop_forget_ref::DROP_REF),
LintId::of(drop_forget_ref::FORGET_COPY),
LintId::of(drop_forget_ref::FORGET_NON_DROP),
LintId::of(drop_forget_ref::FORGET_REF),
LintId::of(drop_forget_ref::UNDROPPED_MANUALLY_DROPS),
LintId::of(duration_subsec::DURATION_SUBSEC),
LintId::of(entry::MAP_ENTRY),
LintId::of(enum_clike::ENUM_CLIKE_UNPORTABLE_VARIANT),
LintId::of(enum_variants::ENUM_VARIANT_NAMES),
LintId::of(enum_variants::MODULE_INCEPTION),
LintId::of(eq_op::EQ_OP),
LintId::of(eq_op::OP_REF),
LintId::of(erasing_op::ERASING_OP),
LintId::of(escape::BOXED_LOCAL),
LintId::of(eta_reduction::REDUNDANT_CLOSURE),
LintId::of(eval_order_dependence::DIVERGING_SUB_EXPRESSION),
LintId::of(eval_order_dependence::EVAL_ORDER_DEPENDENCE),
LintId::of(explicit_write::EXPLICIT_WRITE),
LintId::of(float_equality_without_abs::FLOAT_EQUALITY_WITHOUT_ABS),
LintId::of(float_literal::EXCESSIVE_PRECISION),
LintId::of(format::USELESS_FORMAT),
LintId::of(format_args::FORMAT_IN_FORMAT_ARGS),
LintId::of(format_args::TO_STRING_IN_FORMAT_ARGS),
LintId::of(format_impl::PRINT_IN_FORMAT_IMPL),
LintId::of(format_impl::RECURSIVE_FORMAT_IMPL),
LintId::of(format_push_string::FORMAT_PUSH_STRING),
LintId::of(formatting::POSSIBLE_MISSING_COMMA),
LintId::of(formatting::SUSPICIOUS_ASSIGNMENT_FORMATTING),
LintId::of(formatting::SUSPICIOUS_ELSE_FORMATTING),
LintId::of(formatting::SUSPICIOUS_UNARY_OP_FORMATTING),
LintId::of(from_over_into::FROM_OVER_INTO),
LintId::of(from_str_radix_10::FROM_STR_RADIX_10),
LintId::of(functions::DOUBLE_MUST_USE),
LintId::of(functions::MUST_USE_UNIT),
LintId::of(functions::NOT_UNSAFE_PTR_ARG_DEREF),
LintId::of(functions::RESULT_UNIT_ERR),
LintId::of(functions::TOO_MANY_ARGUMENTS),
LintId::of(get_last_with_len::GET_LAST_WITH_LEN),
LintId::of(identity_op::IDENTITY_OP),
LintId::of(if_let_mutex::IF_LET_MUTEX),
LintId::of(indexing_slicing::OUT_OF_BOUNDS_INDEXING),
LintId::of(infinite_iter::INFINITE_ITER),
LintId::of(inherent_to_string::INHERENT_TO_STRING),
LintId::of(inherent_to_string::INHERENT_TO_STRING_SHADOW_DISPLAY),
LintId::of(init_numbered_fields::INIT_NUMBERED_FIELDS),
LintId::of(inline_fn_without_body::INLINE_FN_WITHOUT_BODY),
LintId::of(int_plus_one::INT_PLUS_ONE),
LintId::of(large_const_arrays::LARGE_CONST_ARRAYS),
LintId::of(large_enum_variant::LARGE_ENUM_VARIANT),
LintId::of(len_zero::COMPARISON_TO_EMPTY),
LintId::of(len_zero::LEN_WITHOUT_IS_EMPTY),
LintId::of(len_zero::LEN_ZERO),
LintId::of(let_underscore::LET_UNDERSCORE_LOCK),
LintId::of(lifetimes::EXTRA_UNUSED_LIFETIMES),
LintId::of(lifetimes::NEEDLESS_LIFETIMES),
LintId::of(literal_representation::INCONSISTENT_DIGIT_GROUPING),
LintId::of(literal_representation::MISTYPED_LITERAL_SUFFIXES),
LintId::of(literal_representation::UNUSUAL_BYTE_GROUPINGS),
LintId::of(loops::EMPTY_LOOP),
LintId::of(loops::EXPLICIT_COUNTER_LOOP),
LintId::of(loops::FOR_KV_MAP),
LintId::of(loops::FOR_LOOPS_OVER_FALLIBLES),
LintId::of(loops::ITER_NEXT_LOOP),
LintId::of(loops::MANUAL_FLATTEN),
LintId::of(loops::MANUAL_MEMCPY),
LintId::of(loops::MISSING_SPIN_LOOP),
LintId::of(loops::MUT_RANGE_BOUND),
LintId::of(loops::NEEDLESS_COLLECT),
LintId::of(loops::NEEDLESS_RANGE_LOOP),
LintId::of(loops::NEVER_LOOP),
LintId::of(loops::SAME_ITEM_PUSH),
LintId::of(loops::SINGLE_ELEMENT_LOOP),
LintId::of(loops::WHILE_IMMUTABLE_CONDITION),
LintId::of(loops::WHILE_LET_LOOP),
LintId::of(loops::WHILE_LET_ON_ITERATOR),
LintId::of(main_recursion::MAIN_RECURSION),
LintId::of(manual_async_fn::MANUAL_ASYNC_FN),
LintId::of(manual_bits::MANUAL_BITS),
LintId::of(manual_map::MANUAL_MAP),
LintId::of(manual_non_exhaustive::MANUAL_NON_EXHAUSTIVE),
LintId::of(manual_strip::MANUAL_STRIP),
LintId::of(manual_unwrap_or::MANUAL_UNWRAP_OR),
LintId::of(map_clone::MAP_CLONE),
LintId::of(map_unit_fn::OPTION_MAP_UNIT_FN),
LintId::of(map_unit_fn::RESULT_MAP_UNIT_FN),
LintId::of(match_result_ok::MATCH_RESULT_OK),
LintId::of(match_str_case_mismatch::MATCH_STR_CASE_MISMATCH),
LintId::of(matches::INFALLIBLE_DESTRUCTURING_MATCH),
LintId::of(matches::MATCH_AS_REF),
LintId::of(matches::MATCH_LIKE_MATCHES_MACRO),
LintId::of(matches::MATCH_OVERLAPPING_ARM),
LintId::of(matches::MATCH_REF_PATS),
LintId::of(matches::MATCH_SINGLE_BINDING),
LintId::of(matches::NEEDLESS_MATCH),
LintId::of(matches::REDUNDANT_PATTERN_MATCHING),
LintId::of(matches::SINGLE_MATCH),
LintId::of(matches::WILDCARD_IN_OR_PATTERNS),
LintId::of(mem_replace::MEM_REPLACE_OPTION_WITH_NONE),
LintId::of(mem_replace::MEM_REPLACE_WITH_DEFAULT),
LintId::of(mem_replace::MEM_REPLACE_WITH_UNINIT),
LintId::of(methods::BIND_INSTEAD_OF_MAP),
LintId::of(methods::BYTES_NTH),
LintId::of(methods::CHARS_LAST_CMP),
LintId::of(methods::CHARS_NEXT_CMP),
LintId::of(methods::CLONE_DOUBLE_REF),
LintId::of(methods::CLONE_ON_COPY),
LintId::of(methods::ERR_EXPECT),
LintId::of(methods::EXPECT_FUN_CALL),
LintId::of(methods::EXTEND_WITH_DRAIN),
LintId::of(methods::FILTER_MAP_IDENTITY),
LintId::of(methods::FILTER_NEXT),
LintId::of(methods::FLAT_MAP_IDENTITY),
LintId::of(methods::INSPECT_FOR_EACH),
LintId::of(methods::INTO_ITER_ON_REF),
LintId::of(methods::IS_DIGIT_ASCII_RADIX),
LintId::of(methods::ITERATOR_STEP_BY_ZERO),
LintId::of(methods::ITER_CLONED_COLLECT),
LintId::of(methods::ITER_COUNT),
LintId::of(methods::ITER_NEXT_SLICE),
LintId::of(methods::ITER_NTH),
LintId::of(methods::ITER_NTH_ZERO),
LintId::of(methods::ITER_OVEREAGER_CLONED),
LintId::of(methods::ITER_SKIP_NEXT),
LintId::of(methods::MANUAL_FILTER_MAP),
LintId::of(methods::MANUAL_FIND_MAP),
LintId::of(methods::MANUAL_SATURATING_ARITHMETIC),
LintId::of(methods::MANUAL_SPLIT_ONCE),
LintId::of(methods::MANUAL_STR_REPEAT),
LintId::of(methods::MAP_COLLECT_RESULT_UNIT),
LintId::of(methods::MAP_FLATTEN),
LintId::of(methods::MAP_IDENTITY),
LintId::of(methods::NEEDLESS_OPTION_AS_DEREF),
LintId::of(methods::NEEDLESS_OPTION_TAKE),
LintId::of(methods::NEEDLESS_SPLITN),
LintId::of(methods::NEW_RET_NO_SELF),
LintId::of(methods::OK_EXPECT),
LintId::of(methods::OPTION_AS_REF_DEREF),
LintId::of(methods::OPTION_FILTER_MAP),
LintId::of(methods::OPTION_MAP_OR_NONE),
LintId::of(methods::OR_FUN_CALL),
LintId::of(methods::OR_THEN_UNWRAP),
LintId::of(methods::RESULT_MAP_OR_INTO_OPTION),
LintId::of(methods::SEARCH_IS_SOME),
LintId::of(methods::SHOULD_IMPLEMENT_TRAIT),
LintId::of(methods::SINGLE_CHAR_ADD_STR),
LintId::of(methods::SINGLE_CHAR_PATTERN),
LintId::of(methods::SKIP_WHILE_NEXT),
LintId::of(methods::STRING_EXTEND_CHARS),
LintId::of(methods::SUSPICIOUS_MAP),
LintId::of(methods::SUSPICIOUS_SPLITN),
LintId::of(methods::UNINIT_ASSUMED_INIT),
LintId::of(methods::UNNECESSARY_FILTER_MAP),
LintId::of(methods::UNNECESSARY_FIND_MAP),
LintId::of(methods::UNNECESSARY_FOLD),
LintId::of(methods::UNNECESSARY_LAZY_EVALUATIONS),
LintId::of(methods::UNNECESSARY_TO_OWNED),
LintId::of(methods::UNWRAP_OR_ELSE_DEFAULT),
LintId::of(methods::USELESS_ASREF),
LintId::of(methods::WRONG_SELF_CONVENTION),
LintId::of(methods::ZST_OFFSET),
LintId::of(minmax::MIN_MAX),
LintId::of(misc::CMP_NAN),
LintId::of(misc::CMP_OWNED),
LintId::of(misc::MODULO_ONE),
LintId::of(misc::SHORT_CIRCUIT_STATEMENT),
LintId::of(misc::TOPLEVEL_REF_ARG),
LintId::of(misc::ZERO_PTR),
LintId::of(misc_early::BUILTIN_TYPE_SHADOW),
LintId::of(misc_early::DOUBLE_NEG),
LintId::of(misc_early::DUPLICATE_UNDERSCORE_ARGUMENT),
LintId::of(misc_early::MIXED_CASE_HEX_LITERALS),
LintId::of(misc_early::REDUNDANT_PATTERN),
LintId::of(misc_early::UNNEEDED_WILDCARD_PATTERN),
LintId::of(misc_early::ZERO_PREFIXED_LITERAL),
LintId::of(mut_key::MUTABLE_KEY_TYPE),
LintId::of(mut_mutex_lock::MUT_MUTEX_LOCK),
LintId::of(mut_reference::UNNECESSARY_MUT_PASSED),
LintId::of(needless_arbitrary_self_type::NEEDLESS_ARBITRARY_SELF_TYPE),
LintId::of(needless_bool::BOOL_COMPARISON),
LintId::of(needless_bool::NEEDLESS_BOOL),
LintId::of(needless_borrowed_ref::NEEDLESS_BORROWED_REFERENCE),
LintId::of(needless_late_init::NEEDLESS_LATE_INIT),
LintId::of(needless_question_mark::NEEDLESS_QUESTION_MARK),
LintId::of(needless_update::NEEDLESS_UPDATE),
LintId::of(neg_cmp_op_on_partial_ord::NEG_CMP_OP_ON_PARTIAL_ORD),
LintId::of(neg_multiply::NEG_MULTIPLY),
LintId::of(new_without_default::NEW_WITHOUT_DEFAULT),
LintId::of(no_effect::NO_EFFECT),
LintId::of(no_effect::UNNECESSARY_OPERATION),
LintId::of(non_copy_const::BORROW_INTERIOR_MUTABLE_CONST),
LintId::of(non_copy_const::DECLARE_INTERIOR_MUTABLE_CONST),
LintId::of(non_expressive_names::JUST_UNDERSCORES_AND_DIGITS),
LintId::of(non_octal_unix_permissions::NON_OCTAL_UNIX_PERMISSIONS),
LintId::of(octal_escapes::OCTAL_ESCAPES),
LintId::of(only_used_in_recursion::ONLY_USED_IN_RECURSION),
LintId::of(open_options::NONSENSICAL_OPEN_OPTIONS),
LintId::of(option_env_unwrap::OPTION_ENV_UNWRAP),
LintId::of(overflow_check_conditional::OVERFLOW_CHECK_CONDITIONAL),
LintId::of(partialeq_ne_impl::PARTIALEQ_NE_IMPL),
LintId::of(precedence::PRECEDENCE),
LintId::of(ptr::CMP_NULL),
LintId::of(ptr::INVALID_NULL_PTR_USAGE),
LintId::of(ptr::MUT_FROM_REF),
LintId::of(ptr::PTR_ARG),
LintId::of(ptr_eq::PTR_EQ),
LintId::of(ptr_offset_with_cast::PTR_OFFSET_WITH_CAST),
LintId::of(question_mark::QUESTION_MARK),
LintId::of(ranges::MANUAL_RANGE_CONTAINS),
LintId::of(ranges::RANGE_ZIP_WITH_LEN),
LintId::of(ranges::REVERSED_EMPTY_RANGES),
LintId::of(redundant_clone::REDUNDANT_CLONE),
LintId::of(redundant_closure_call::REDUNDANT_CLOSURE_CALL),
LintId::of(redundant_field_names::REDUNDANT_FIELD_NAMES),
LintId::of(redundant_slicing::REDUNDANT_SLICING),
LintId::of(redundant_static_lifetimes::REDUNDANT_STATIC_LIFETIMES),
LintId::of(reference::DEREF_ADDROF),
LintId::of(regex::INVALID_REGEX),
LintId::of(repeat_once::REPEAT_ONCE),
LintId::of(returns::LET_AND_RETURN),
LintId::of(returns::NEEDLESS_RETURN),
LintId::of(self_assignment::SELF_ASSIGNMENT),
LintId::of(self_named_constructors::SELF_NAMED_CONSTRUCTORS),
LintId::of(serde_api::SERDE_API_MISUSE),
LintId::of(single_component_path_imports::SINGLE_COMPONENT_PATH_IMPORTS),
LintId::of(size_of_in_element_count::SIZE_OF_IN_ELEMENT_COUNT),
LintId::of(slow_vector_initialization::SLOW_VECTOR_INITIALIZATION),
LintId::of(strings::STRING_FROM_UTF8_AS_BYTES),
LintId::of(strlen_on_c_strings::STRLEN_ON_C_STRINGS),
LintId::of(suspicious_trait_impl::SUSPICIOUS_ARITHMETIC_IMPL),
LintId::of(suspicious_trait_impl::SUSPICIOUS_OP_ASSIGN_IMPL),
LintId::of(swap::ALMOST_SWAPPED),
LintId::of(swap::MANUAL_SWAP),
LintId::of(tabs_in_doc_comments::TABS_IN_DOC_COMMENTS),
LintId::of(temporary_assignment::TEMPORARY_ASSIGNMENT),
LintId::of(to_digit_is_some::TO_DIGIT_IS_SOME),
LintId::of(transmute::CROSSPOINTER_TRANSMUTE),
LintId::of(transmute::TRANSMUTES_EXPRESSIBLE_AS_PTR_CASTS),
LintId::of(transmute::TRANSMUTE_BYTES_TO_STR),
LintId::of(transmute::TRANSMUTE_FLOAT_TO_INT),
LintId::of(transmute::TRANSMUTE_INT_TO_BOOL),
LintId::of(transmute::TRANSMUTE_INT_TO_CHAR),
LintId::of(transmute::TRANSMUTE_INT_TO_FLOAT),
LintId::of(transmute::TRANSMUTE_NUM_TO_BYTES),
LintId::of(transmute::TRANSMUTE_PTR_TO_REF),
LintId::of(transmute::UNSOUND_COLLECTION_TRANSMUTE),
LintId::of(transmute::WRONG_TRANSMUTE),
LintId::of(transmuting_null::TRANSMUTING_NULL),
LintId::of(types::BORROWED_BOX),
LintId::of(types::BOX_COLLECTION),
LintId::of(types::REDUNDANT_ALLOCATION),
LintId::of(types::TYPE_COMPLEXITY),
LintId::of(types::VEC_BOX),
LintId::of(unicode::INVISIBLE_CHARACTERS),
LintId::of(uninit_vec::UNINIT_VEC),
LintId::of(unit_hash::UNIT_HASH),
LintId::of(unit_return_expecting_ord::UNIT_RETURN_EXPECTING_ORD),
LintId::of(unit_types::LET_UNIT_VALUE),
LintId::of(unit_types::UNIT_ARG),
LintId::of(unit_types::UNIT_CMP),
LintId::of(unnamed_address::FN_ADDRESS_COMPARISONS),
LintId::of(unnamed_address::VTABLE_ADDRESS_COMPARISONS),
LintId::of(unnecessary_owned_empty_strings::UNNECESSARY_OWNED_EMPTY_STRINGS),
LintId::of(unnecessary_sort_by::UNNECESSARY_SORT_BY),
LintId::of(unsafe_removed_from_name::UNSAFE_REMOVED_FROM_NAME),
LintId::of(unused_io_amount::UNUSED_IO_AMOUNT),
LintId::of(unused_unit::UNUSED_UNIT),
LintId::of(unwrap::PANICKING_UNWRAP),
LintId::of(unwrap::UNNECESSARY_UNWRAP),
LintId::of(upper_case_acronyms::UPPER_CASE_ACRONYMS),
LintId::of(useless_conversion::USELESS_CONVERSION),
LintId::of(vec::USELESS_VEC),
LintId::of(vec_init_then_push::VEC_INIT_THEN_PUSH),
LintId::of(vec_resize_to_zero::VEC_RESIZE_TO_ZERO),
LintId::of(write::PRINTLN_EMPTY_STRING),
LintId::of(write::PRINT_LITERAL),
LintId::of(write::PRINT_WITH_NEWLINE),
LintId::of(write::WRITELN_EMPTY_STRING),
LintId::of(write::WRITE_LITERAL),
LintId::of(write::WRITE_WITH_NEWLINE),
LintId::of(zero_div_zero::ZERO_DIVIDED_BY_ZERO),
])
| 48.426036 | 81 | 0.753482 |
dd7915e3660c21a251156fba6f4b1c1963febe2e
| 4,460 |
use types::Type;
use error;
use parser;
use parser::symbolparser;
use error::{SyntaxError, SyntaxErrorType, SyntaxErrorFactory};
#[derive(Clone, Debug)]
pub struct Block(pub Vec<SymbolDef>);
impl Block {
pub fn is_comma_delimited(&self) -> bool {
for val in &self.0 {
match val.symbol {
Symbol::Comma => return true,
_ => {}
}
}
return false;
}
pub fn split_commas(&self) -> Vec<Vec<SymbolDef>> {
let mut ret = vec![];
let mut vec = vec![];
for val in &self.0 {
match val.symbol {
Symbol::Comma => {
ret.push(vec);
vec = vec![];
},
_ => {
vec.push(val.clone())
}
}
}
ret.push(vec);
ret
}
}
/* Symbols:
* ! - define
* <a@b> - user
* "a" - text
* a - identifier
* {a;b;c;} - curlybraced
* (a,b,c) - parenthesis
* > - arrow
* , - comma
* ; - semicolon
**/
#[derive(Clone, Debug)]
pub enum Symbol {
// Structures
CurlyBraced(Block),
Parenthesis(Block),
// Types
UserPath(Block, Block),
Identifier(String),
Text(String),
// Syntax
Comma,
Semicolon,
Index(Block),
Slice(Option<Block>, Option<Block>),
If,
Else,
ElseIf,
// Operators
Define,
Arrow,
Addition,
Receive,
Assign,
Modifier,
}
#[derive(Clone, Debug)]
pub struct SymbolDef {
pub errfactory: error::SyntaxErrorFactory,
pub symbol: Symbol
}
#[derive(Clone, Copy, Debug)]
pub enum OperatorType {
LeftToRight(usize, bool, bool),
RightToLeft(usize, bool, bool),
Neither
}
impl OperatorType {
pub fn compare(&self, other:usize) -> bool {
match *self {
OperatorType::LeftToRight(val,_,_) => val > other,
OperatorType::RightToLeft(val,_,_) => val >= other,
OperatorType::Neither => false
}
}
pub fn get(&self) -> usize {
match *self {
OperatorType::LeftToRight(val,_,_) => val,
OperatorType::RightToLeft(val,_,_) => val,
OperatorType::Neither => 0
}
}
pub fn is_op(&self) -> bool {
if let OperatorType::Neither = *self {
false
} else {
true
}
}
pub fn preval(&self) -> bool {
match *self {
OperatorType::LeftToRight(_,l,_) => l,
OperatorType::RightToLeft(_,l,_) => l,
OperatorType::Neither => false
}
}
pub fn postval(&self) -> bool {
match *self {
OperatorType::LeftToRight(_,_,r) => r,
OperatorType::RightToLeft(_,_,r) => r,
OperatorType::Neither => false
}
}
}
impl SymbolDef {
pub fn get_type(&self) -> Result<Type, SyntaxError> {
match self.symbol {
Symbol::Text(ref val) => Ok(Type::Text(val.clone())),
Symbol::Identifier(ref val) => Ok(Type::Text(val.clone())),
Symbol::UserPath(ref a, ref b) => {
Ok(Type::UserPath(
Box::new(try!(symbolparser::parse_type(&a.0))),
Box::new(try!(symbolparser::parse_type(&b.0)))
))
},
Symbol::Parenthesis(ref val) => {
if val.is_comma_delimited() {
let mut tuple = Vec::new();
for v in val.split_commas() {
if parser::symbolparser::is_expression(&v) {
tuple.push(Type::Expression(
Box::new(try!(symbolparser::parse_expression(&v,
SyntaxErrorFactory::from_symbols(&v))))));
} else {
for symdef in v {
tuple.push(try!(symdef.get_type()));
}
}
}
Ok(Type::Tuple(tuple))
} else {
if symbolparser::is_expression(&val.0) {
Ok(Type::Expression(Box::new(try!(symbolparser::parse_expression(&val.0,
SyntaxErrorFactory::from_symbols(&val.0))))))
} else {
assert!(val.0.len() == 1);
Ok(try!(try!(val.0.get(0)
.ok_or(self.errfactory.gen_error(SyntaxErrorType::BadExpression)))
.get_type()))
}
}
},
_ => Err(self.errfactory.gen_error(SyntaxErrorType::NotAType))
}
}
pub fn get_operator(&self) -> OperatorType {
match self.symbol {
// Separators
// Symbol::Comma => OperatorType::LeftToRight(2000, None, None),
// Operators
// Symbol::Define => OperatorType::LeftToRight(1003, false, true),
Symbol::Assign => OperatorType::LeftToRight(1002, true, true),
Symbol::Arrow => OperatorType::LeftToRight(1001, true, true),
Symbol::Addition => OperatorType::LeftToRight(1000, true, true),
// Modifier operators
Symbol::Modifier => OperatorType::RightToLeft(3, true, true),
Symbol::Slice(_,_) => OperatorType::RightToLeft(2, true, false),
Symbol::Index(_) => OperatorType::RightToLeft(2, true, false),
Symbol::Receive => OperatorType::LeftToRight(1, false, true),
_ => OperatorType::Neither
}
}
}
| 23.597884 | 78 | 0.611435 |
11514b7cfb36292700e1cf3ae148716b5fb3cec0
| 767 |
use failure::Error;
use hyper::Uri;
use relative_path::RelativePathBuf;
use crate::models::repo::RepoPath;
const GITLAB_USER_CONTENT_BASE_URI: &'static str = "https://gitlab.com";
pub fn get_manifest_uri(repo_path: &RepoPath, path: &RelativePathBuf) -> Result<Uri, Error> {
let path_str: &str = path.as_ref();
// gitlab will return a 308 if the Uri ends with, say, `.../raw/HEAD//Cargo.toml`, so make
// sure that last slash isn't doubled
let slash_path = if path_str.starts_with("/") {
&path_str[1..]
} else {
path_str
};
Ok(format!(
"{}/{}/{}/raw/HEAD/{}",
GITLAB_USER_CONTENT_BASE_URI,
repo_path.qual.as_ref(),
repo_path.name.as_ref(),
slash_path
)
.parse::<Uri>()?)
}
| 28.407407 | 94 | 0.621904 |
edc4a0c7b97e4f1477deba568f8e869fa4cacb90
| 1,583 |
// Copyright 2019-2020 Twitter, Inc.
// Licensed under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
use serde_derive::Deserialize;
use crate::config::SamplerConfig;
use super::stat::*;
use crate::samplers::Common;
#[derive(Debug, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct MemcacheConfig {
#[serde(default)]
enabled: bool,
#[serde(default)]
interval: Option<usize>,
#[serde(default = "crate::common::default_percentiles")]
percentiles: Vec<f64>,
endpoint: Option<String>,
}
impl Default for MemcacheConfig {
fn default() -> Self {
Self {
enabled: Default::default(),
interval: Default::default(),
percentiles: crate::common::default_percentiles(),
endpoint: None,
}
}
}
impl MemcacheConfig {
pub fn endpoint(&self) -> Option<String> {
self.endpoint.clone()
}
}
impl SamplerConfig for MemcacheConfig {
type Statistic = MemcacheStatistic;
fn enabled(&self) -> bool {
self.enabled
}
fn interval(&self) -> Option<usize> {
self.interval
}
fn percentiles(&self) -> &[f64] {
&self.percentiles
}
#[allow(unused_variables)]
fn statistics(&self,common: &Common) -> Vec<<Self as SamplerConfig>::Statistic> {
Vec::new()
}
#[allow(unused_variables)]
fn set_statistics_detail(&self,common: &Common) -> Vec<<Self as SamplerConfig>::Statistic> {
// we don't know the statistics yet, register at runtime instead
Vec::new()
}
}
| 23.626866 | 96 | 0.624763 |
33ce98991aa9910f8e44a4a9f00042d6fc26e1d3
| 687 |
use rustorm::{
DbError,
FromDao,
Pool,
Rows,
ToColumnNames,
ToTableName,
};
/// Run using:
/// ```
/// cargo run --example update_usage_mysql --features "with-mysql"
/// ```
fn main() {
let db_url = "mysql://root:r00tpwdh3r3@localhost/sakila";
let mut pool = Pool::new();
pool.ensure(db_url);
let mut em = pool
.em_mut(db_url)
.expect("Should be able to get a connection here..");
let sql = "UPDATE actor SET last_name = ? WHERE first_name = ?".to_string();
let rows: Result<Rows, DbError> = em
.db()
.execute_sql_with_return(&sql, &[&"JONES".into(), &"TOM".into()]);
println!("rows: {:#?}", rows);
}
| 24.535714 | 80 | 0.577875 |
394fac6726cf44336baa38dd1e59f3faea9ef0bb
| 1,125 |
/// Represents a Token from a Javalette program.
///
/// Usually constructed by a `Lexer` and used by the `Parser` to build the `AST`.
#[derive(Debug, Clone, PartialEq)]
pub enum Token<'input> {
EOF,
// symbols
LeftParenthesis,
RightParenthesis,
LeftBracket,
RightBracket,
LeftSquare,
RightSquare,
Dot,
SemiColon,
Comma,
Colon,
DotDotDot,
// operators
Equal,
Plus,
Minus,
Star,
Slash,
Percent,
PlusPlus,
MinusMinus,
EqualEqual,
BangEqual,
Less,
LessEqual,
Greater,
GreaterEqual,
PipePipe,
Amp,
AmpAmp,
Bang,
Arrow,
//keywords
ExternKeyword,
StructKeyword,
WhileKeyword,
ForKeyword,
IfKeyword,
ElseKeyword,
ReturnKeyword,
BreakKeyword,
ContinueKeyword,
AsKeyword,
FnKeyword,
LetKeyword,
NullptrKeyword,
/*IntKeyword,
DoubleKeyword,
BooleanKeyword,
VoidKeyword,
StringKeyword,*/
Identifier(&'input str),
IntegerLiteral(i64),
DoubleLiteral(f64),
BooleanLiteral(bool),
StringLiteral(&'input str),
}
| 17.045455 | 81 | 0.617778 |
16a0dd6af5d2bac468634df45957e04dc9790de9
| 58,673 |
// Generated by gir (https://github.com/gtk-rs/gir @ b193568)
// from gir-files (https://github.com/gtk-rs/gir-files @ 7d95377)
// from gst-gir-files (https://gitlab.freedesktop.org/gstreamer/gir-files-rs.git @ 831b444)
// DO NOT EDIT
#![allow(non_camel_case_types, non_upper_case_globals, non_snake_case)]
#![allow(
clippy::approx_constant,
clippy::type_complexity,
clippy::unreadable_literal,
clippy::upper_case_acronyms
)]
#![cfg_attr(feature = "dox", feature(doc_cfg))]
use glib_sys as glib;
use gstreamer_base_sys as gst_base;
use gstreamer_sys as gst;
#[allow(unused_imports)]
use libc::{
c_char, c_double, c_float, c_int, c_long, c_short, c_uchar, c_uint, c_ulong, c_ushort, c_void,
intptr_t, size_t, ssize_t, time_t, uintptr_t, FILE,
};
#[allow(unused_imports)]
use glib::{gboolean, gconstpointer, gpointer, GType};
// Enums
pub type GstRTCPFBType = c_int;
pub const GST_RTCP_FB_TYPE_INVALID: GstRTCPFBType = 0;
pub const GST_RTCP_RTPFB_TYPE_NACK: GstRTCPFBType = 1;
pub const GST_RTCP_RTPFB_TYPE_TMMBR: GstRTCPFBType = 3;
pub const GST_RTCP_RTPFB_TYPE_TMMBN: GstRTCPFBType = 4;
pub const GST_RTCP_RTPFB_TYPE_RTCP_SR_REQ: GstRTCPFBType = 5;
pub const GST_RTCP_RTPFB_TYPE_TWCC: GstRTCPFBType = 15;
pub const GST_RTCP_PSFB_TYPE_PLI: GstRTCPFBType = 1;
pub const GST_RTCP_PSFB_TYPE_SLI: GstRTCPFBType = 2;
pub const GST_RTCP_PSFB_TYPE_RPSI: GstRTCPFBType = 3;
pub const GST_RTCP_PSFB_TYPE_AFB: GstRTCPFBType = 15;
pub const GST_RTCP_PSFB_TYPE_FIR: GstRTCPFBType = 4;
pub const GST_RTCP_PSFB_TYPE_TSTR: GstRTCPFBType = 5;
pub const GST_RTCP_PSFB_TYPE_TSTN: GstRTCPFBType = 6;
pub const GST_RTCP_PSFB_TYPE_VBCN: GstRTCPFBType = 7;
pub type GstRTCPSDESType = c_int;
pub const GST_RTCP_SDES_INVALID: GstRTCPSDESType = -1;
pub const GST_RTCP_SDES_END: GstRTCPSDESType = 0;
pub const GST_RTCP_SDES_CNAME: GstRTCPSDESType = 1;
pub const GST_RTCP_SDES_NAME: GstRTCPSDESType = 2;
pub const GST_RTCP_SDES_EMAIL: GstRTCPSDESType = 3;
pub const GST_RTCP_SDES_PHONE: GstRTCPSDESType = 4;
pub const GST_RTCP_SDES_LOC: GstRTCPSDESType = 5;
pub const GST_RTCP_SDES_TOOL: GstRTCPSDESType = 6;
pub const GST_RTCP_SDES_NOTE: GstRTCPSDESType = 7;
pub const GST_RTCP_SDES_PRIV: GstRTCPSDESType = 8;
pub type GstRTCPType = c_int;
pub const GST_RTCP_TYPE_INVALID: GstRTCPType = 0;
pub const GST_RTCP_TYPE_SR: GstRTCPType = 200;
pub const GST_RTCP_TYPE_RR: GstRTCPType = 201;
pub const GST_RTCP_TYPE_SDES: GstRTCPType = 202;
pub const GST_RTCP_TYPE_BYE: GstRTCPType = 203;
pub const GST_RTCP_TYPE_APP: GstRTCPType = 204;
pub const GST_RTCP_TYPE_RTPFB: GstRTCPType = 205;
pub const GST_RTCP_TYPE_PSFB: GstRTCPType = 206;
pub const GST_RTCP_TYPE_XR: GstRTCPType = 207;
pub type GstRTCPXRType = c_int;
pub const GST_RTCP_XR_TYPE_INVALID: GstRTCPXRType = -1;
pub const GST_RTCP_XR_TYPE_LRLE: GstRTCPXRType = 1;
pub const GST_RTCP_XR_TYPE_DRLE: GstRTCPXRType = 2;
pub const GST_RTCP_XR_TYPE_PRT: GstRTCPXRType = 3;
pub const GST_RTCP_XR_TYPE_RRT: GstRTCPXRType = 4;
pub const GST_RTCP_XR_TYPE_DLRR: GstRTCPXRType = 5;
pub const GST_RTCP_XR_TYPE_SSUMM: GstRTCPXRType = 6;
pub const GST_RTCP_XR_TYPE_VOIP_METRICS: GstRTCPXRType = 7;
pub type GstRTPPayload = c_int;
pub const GST_RTP_PAYLOAD_PCMU: GstRTPPayload = 0;
pub const GST_RTP_PAYLOAD_1016: GstRTPPayload = 1;
pub const GST_RTP_PAYLOAD_G721: GstRTPPayload = 2;
pub const GST_RTP_PAYLOAD_GSM: GstRTPPayload = 3;
pub const GST_RTP_PAYLOAD_G723: GstRTPPayload = 4;
pub const GST_RTP_PAYLOAD_DVI4_8000: GstRTPPayload = 5;
pub const GST_RTP_PAYLOAD_DVI4_16000: GstRTPPayload = 6;
pub const GST_RTP_PAYLOAD_LPC: GstRTPPayload = 7;
pub const GST_RTP_PAYLOAD_PCMA: GstRTPPayload = 8;
pub const GST_RTP_PAYLOAD_G722: GstRTPPayload = 9;
pub const GST_RTP_PAYLOAD_L16_STEREO: GstRTPPayload = 10;
pub const GST_RTP_PAYLOAD_L16_MONO: GstRTPPayload = 11;
pub const GST_RTP_PAYLOAD_QCELP: GstRTPPayload = 12;
pub const GST_RTP_PAYLOAD_CN: GstRTPPayload = 13;
pub const GST_RTP_PAYLOAD_MPA: GstRTPPayload = 14;
pub const GST_RTP_PAYLOAD_G728: GstRTPPayload = 15;
pub const GST_RTP_PAYLOAD_DVI4_11025: GstRTPPayload = 16;
pub const GST_RTP_PAYLOAD_DVI4_22050: GstRTPPayload = 17;
pub const GST_RTP_PAYLOAD_G729: GstRTPPayload = 18;
pub const GST_RTP_PAYLOAD_CELLB: GstRTPPayload = 25;
pub const GST_RTP_PAYLOAD_JPEG: GstRTPPayload = 26;
pub const GST_RTP_PAYLOAD_NV: GstRTPPayload = 28;
pub const GST_RTP_PAYLOAD_H261: GstRTPPayload = 31;
pub const GST_RTP_PAYLOAD_MPV: GstRTPPayload = 32;
pub const GST_RTP_PAYLOAD_MP2T: GstRTPPayload = 33;
pub const GST_RTP_PAYLOAD_H263: GstRTPPayload = 34;
pub type GstRTPProfile = c_int;
pub const GST_RTP_PROFILE_UNKNOWN: GstRTPProfile = 0;
pub const GST_RTP_PROFILE_AVP: GstRTPProfile = 1;
pub const GST_RTP_PROFILE_SAVP: GstRTPProfile = 2;
pub const GST_RTP_PROFILE_AVPF: GstRTPProfile = 3;
pub const GST_RTP_PROFILE_SAVPF: GstRTPProfile = 4;
// Constants
pub const GST_RTCP_MAX_BYE_SSRC_COUNT: c_int = 31;
pub const GST_RTCP_MAX_RB_COUNT: c_int = 31;
pub const GST_RTCP_MAX_SDES: c_int = 255;
pub const GST_RTCP_MAX_SDES_ITEM_COUNT: c_int = 31;
pub const GST_RTCP_REDUCED_SIZE_VALID_MASK: c_int = 57592;
pub const GST_RTCP_VALID_MASK: c_int = 57598;
pub const GST_RTCP_VALID_VALUE: c_int = 200;
pub const GST_RTCP_VERSION: c_int = 2;
pub const GST_RTP_HDREXT_BASE: *const c_char =
b"urn:ietf:params:rtp-hdrext:\0" as *const u8 as *const c_char;
pub const GST_RTP_HDREXT_ELEMENT_CLASS: *const c_char =
b"Network/Extension/RTPHeader\0" as *const u8 as *const c_char;
pub const GST_RTP_HDREXT_NTP_56: *const c_char = b"ntp-56\0" as *const u8 as *const c_char;
pub const GST_RTP_HDREXT_NTP_56_SIZE: c_int = 7;
pub const GST_RTP_HDREXT_NTP_64: *const c_char = b"ntp-64\0" as *const u8 as *const c_char;
pub const GST_RTP_HDREXT_NTP_64_SIZE: c_int = 8;
pub const GST_RTP_HEADER_EXTENSION_URI_METADATA_KEY: *const c_char =
b"RTP-Header-Extension-URI\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_1016_STRING: *const c_char = b"1\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_CELLB_STRING: *const c_char = b"25\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_CN_STRING: *const c_char = b"13\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_DVI4_11025_STRING: *const c_char = b"16\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_DVI4_16000_STRING: *const c_char = b"6\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_DVI4_22050_STRING: *const c_char = b"17\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_DVI4_8000_STRING: *const c_char = b"5\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_DYNAMIC_STRING: *const c_char =
b"[96, 127]\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_G721_STRING: *const c_char = b"2\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_G722_STRING: *const c_char = b"9\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_G723_53: c_int = 17;
pub const GST_RTP_PAYLOAD_G723_53_STRING: *const c_char = b"17\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_G723_63: c_int = 16;
pub const GST_RTP_PAYLOAD_G723_63_STRING: *const c_char = b"16\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_G723_STRING: *const c_char = b"4\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_G728_STRING: *const c_char = b"15\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_G729_STRING: *const c_char = b"18\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_GSM_STRING: *const c_char = b"3\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_H261_STRING: *const c_char = b"31\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_H263_STRING: *const c_char = b"34\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_JPEG_STRING: *const c_char = b"26\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_L16_MONO_STRING: *const c_char = b"11\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_L16_STEREO_STRING: *const c_char = b"10\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_LPC_STRING: *const c_char = b"7\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_MP2T_STRING: *const c_char = b"33\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_MPA_STRING: *const c_char = b"14\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_MPV_STRING: *const c_char = b"32\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_NV_STRING: *const c_char = b"28\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_PCMA_STRING: *const c_char = b"8\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_PCMU_STRING: *const c_char = b"0\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_QCELP_STRING: *const c_char = b"12\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_TS41: c_int = 19;
pub const GST_RTP_PAYLOAD_TS41_STRING: *const c_char = b"19\0" as *const u8 as *const c_char;
pub const GST_RTP_PAYLOAD_TS48: c_int = 18;
pub const GST_RTP_PAYLOAD_TS48_STRING: *const c_char = b"18\0" as *const u8 as *const c_char;
pub const GST_RTP_SOURCE_META_MAX_CSRC_COUNT: c_int = 15;
pub const GST_RTP_VERSION: c_int = 2;
// Flags
pub type GstRTPBufferFlags = c_uint;
pub const GST_RTP_BUFFER_FLAG_RETRANSMISSION: GstRTPBufferFlags = 1048576;
pub const GST_RTP_BUFFER_FLAG_REDUNDANT: GstRTPBufferFlags = 2097152;
pub const GST_RTP_BUFFER_FLAG_LAST: GstRTPBufferFlags = 268435456;
pub type GstRTPBufferMapFlags = c_uint;
pub const GST_RTP_BUFFER_MAP_FLAG_SKIP_PADDING: GstRTPBufferMapFlags = 65536;
pub const GST_RTP_BUFFER_MAP_FLAG_LAST: GstRTPBufferMapFlags = 16777216;
pub type GstRTPHeaderExtensionFlags = c_uint;
pub const GST_RTP_HEADER_EXTENSION_ONE_BYTE: GstRTPHeaderExtensionFlags = 1;
pub const GST_RTP_HEADER_EXTENSION_TWO_BYTE: GstRTPHeaderExtensionFlags = 2;
// Records
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GstRTCPBuffer {
pub buffer: *mut gst::GstBuffer,
pub map: gst::GstMapInfo,
}
impl ::std::fmt::Debug for GstRTCPBuffer {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("GstRTCPBuffer @ {:p}", self))
.field("buffer", &self.buffer)
.field("map", &self.map)
.finish()
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GstRTCPPacket {
pub rtcp: *mut GstRTCPBuffer,
pub offset: c_uint,
pub padding: gboolean,
pub count: u8,
pub type_: GstRTCPType,
pub length: u16,
pub item_offset: c_uint,
pub item_count: c_uint,
pub entry_offset: c_uint,
}
impl ::std::fmt::Debug for GstRTCPPacket {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("GstRTCPPacket @ {:p}", self))
.field("rtcp", &self.rtcp)
.field("offset", &self.offset)
.finish()
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GstRTPBaseAudioPayloadClass {
pub parent_class: GstRTPBasePayloadClass,
pub _gst_reserved: [gpointer; 4],
}
impl ::std::fmt::Debug for GstRTPBaseAudioPayloadClass {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("GstRTPBaseAudioPayloadClass @ {:p}", self))
.field("parent_class", &self.parent_class)
.finish()
}
}
#[repr(C)]
pub struct _GstRTPBaseAudioPayloadPrivate(c_void);
pub type GstRTPBaseAudioPayloadPrivate = *mut _GstRTPBaseAudioPayloadPrivate;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GstRTPBaseDepayloadClass {
pub parent_class: gst::GstElementClass,
pub set_caps:
Option<unsafe extern "C" fn(*mut GstRTPBaseDepayload, *mut gst::GstCaps) -> gboolean>,
pub process: Option<
unsafe extern "C" fn(*mut GstRTPBaseDepayload, *mut gst::GstBuffer) -> *mut gst::GstBuffer,
>,
pub packet_lost:
Option<unsafe extern "C" fn(*mut GstRTPBaseDepayload, *mut gst::GstEvent) -> gboolean>,
pub handle_event:
Option<unsafe extern "C" fn(*mut GstRTPBaseDepayload, *mut gst::GstEvent) -> gboolean>,
pub process_rtp_packet: Option<
unsafe extern "C" fn(*mut GstRTPBaseDepayload, *mut GstRTPBuffer) -> *mut gst::GstBuffer,
>,
pub _gst_reserved: [gpointer; 3],
}
impl ::std::fmt::Debug for GstRTPBaseDepayloadClass {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("GstRTPBaseDepayloadClass @ {:p}", self))
.field("parent_class", &self.parent_class)
.field("set_caps", &self.set_caps)
.field("process", &self.process)
.field("packet_lost", &self.packet_lost)
.field("handle_event", &self.handle_event)
.field("process_rtp_packet", &self.process_rtp_packet)
.finish()
}
}
#[repr(C)]
pub struct _GstRTPBaseDepayloadPrivate(c_void);
pub type GstRTPBaseDepayloadPrivate = *mut _GstRTPBaseDepayloadPrivate;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GstRTPBasePayloadClass {
pub parent_class: gst::GstElementClass,
pub get_caps: Option<
unsafe extern "C" fn(
*mut GstRTPBasePayload,
*mut gst::GstPad,
*mut gst::GstCaps,
) -> *mut gst::GstCaps,
>,
pub set_caps:
Option<unsafe extern "C" fn(*mut GstRTPBasePayload, *mut gst::GstCaps) -> gboolean>,
pub handle_buffer: Option<
unsafe extern "C" fn(*mut GstRTPBasePayload, *mut gst::GstBuffer) -> gst::GstFlowReturn,
>,
pub sink_event:
Option<unsafe extern "C" fn(*mut GstRTPBasePayload, *mut gst::GstEvent) -> gboolean>,
pub src_event:
Option<unsafe extern "C" fn(*mut GstRTPBasePayload, *mut gst::GstEvent) -> gboolean>,
pub query: Option<
unsafe extern "C" fn(
*mut GstRTPBasePayload,
*mut gst::GstPad,
*mut gst::GstQuery,
) -> gboolean,
>,
pub _gst_reserved: [gpointer; 4],
}
impl ::std::fmt::Debug for GstRTPBasePayloadClass {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("GstRTPBasePayloadClass @ {:p}", self))
.field("parent_class", &self.parent_class)
.field("get_caps", &self.get_caps)
.field("set_caps", &self.set_caps)
.field("handle_buffer", &self.handle_buffer)
.field("sink_event", &self.sink_event)
.field("src_event", &self.src_event)
.field("query", &self.query)
.finish()
}
}
#[repr(C)]
pub struct _GstRTPBasePayloadPrivate(c_void);
pub type GstRTPBasePayloadPrivate = *mut _GstRTPBasePayloadPrivate;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GstRTPBuffer {
pub buffer: *mut gst::GstBuffer,
pub state: c_uint,
pub data: [gpointer; 4],
pub size: [size_t; 4],
pub map: [gst::GstMapInfo; 4],
}
impl ::std::fmt::Debug for GstRTPBuffer {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("GstRTPBuffer @ {:p}", self))
.field("buffer", &self.buffer)
.field("state", &self.state)
.field("data", &self.data)
.field("size", &self.size)
.field("map", &self.map)
.finish()
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GstRTPHeaderExtensionClass {
pub parent_class: gst::GstElementClass,
pub get_supported_flags:
Option<unsafe extern "C" fn(*mut GstRTPHeaderExtension) -> GstRTPHeaderExtensionFlags>,
pub get_max_size:
Option<unsafe extern "C" fn(*mut GstRTPHeaderExtension, *const gst::GstBuffer) -> size_t>,
pub write: Option<
unsafe extern "C" fn(
*mut GstRTPHeaderExtension,
*const gst::GstBuffer,
GstRTPHeaderExtensionFlags,
*mut gst::GstBuffer,
*mut u8,
size_t,
) -> size_t,
>,
pub read: Option<
unsafe extern "C" fn(
*mut GstRTPHeaderExtension,
GstRTPHeaderExtensionFlags,
*const u8,
size_t,
*mut gst::GstBuffer,
) -> gboolean,
>,
pub set_non_rtp_sink_caps:
Option<unsafe extern "C" fn(*mut GstRTPHeaderExtension, *mut gst::GstCaps) -> gboolean>,
pub update_non_rtp_src_caps:
Option<unsafe extern "C" fn(*mut GstRTPHeaderExtension, *mut gst::GstCaps) -> gboolean>,
pub set_attributes_from_caps:
Option<unsafe extern "C" fn(*mut GstRTPHeaderExtension, *mut gst::GstCaps) -> gboolean>,
pub set_caps_from_attributes:
Option<unsafe extern "C" fn(*mut GstRTPHeaderExtension, *mut gst::GstCaps) -> gboolean>,
pub _gst_reserved: [gpointer; 4],
}
impl ::std::fmt::Debug for GstRTPHeaderExtensionClass {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("GstRTPHeaderExtensionClass @ {:p}", self))
.field("parent_class", &self.parent_class)
.field("get_supported_flags", &self.get_supported_flags)
.field("get_max_size", &self.get_max_size)
.field("write", &self.write)
.field("read", &self.read)
.field("set_non_rtp_sink_caps", &self.set_non_rtp_sink_caps)
.field("update_non_rtp_src_caps", &self.update_non_rtp_src_caps)
.field("set_attributes_from_caps", &self.set_attributes_from_caps)
.field("set_caps_from_attributes", &self.set_caps_from_attributes)
.finish()
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GstRTPPayloadInfo {
pub payload_type: u8,
pub media: *const c_char,
pub encoding_name: *const c_char,
pub clock_rate: c_uint,
pub encoding_parameters: *const c_char,
pub bitrate: c_uint,
pub _gst_reserved: [gpointer; 4],
}
impl ::std::fmt::Debug for GstRTPPayloadInfo {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("GstRTPPayloadInfo @ {:p}", self))
.field("payload_type", &self.payload_type)
.field("media", &self.media)
.field("encoding_name", &self.encoding_name)
.field("clock_rate", &self.clock_rate)
.field("encoding_parameters", &self.encoding_parameters)
.field("bitrate", &self.bitrate)
.finish()
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GstRTPSourceMeta {
pub meta: gst::GstMeta,
pub ssrc: u32,
pub ssrc_valid: gboolean,
pub csrc: [u32; 15],
pub csrc_count: c_uint,
}
impl ::std::fmt::Debug for GstRTPSourceMeta {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("GstRTPSourceMeta @ {:p}", self))
.field("meta", &self.meta)
.field("ssrc", &self.ssrc)
.field("ssrc_valid", &self.ssrc_valid)
.field("csrc", &self.csrc)
.field("csrc_count", &self.csrc_count)
.finish()
}
}
// Classes
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GstRTPBaseAudioPayload {
pub payload: GstRTPBasePayload,
pub priv_: *mut GstRTPBaseAudioPayloadPrivate,
pub base_ts: gst::GstClockTime,
pub frame_size: c_int,
pub frame_duration: c_int,
pub sample_size: c_int,
pub _gst_reserved: [gpointer; 4],
}
impl ::std::fmt::Debug for GstRTPBaseAudioPayload {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("GstRTPBaseAudioPayload @ {:p}", self))
.field("payload", &self.payload)
.field("priv_", &self.priv_)
.field("base_ts", &self.base_ts)
.field("frame_size", &self.frame_size)
.field("frame_duration", &self.frame_duration)
.field("sample_size", &self.sample_size)
.finish()
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GstRTPBaseDepayload {
pub parent: gst::GstElement,
pub sinkpad: *mut gst::GstPad,
pub srcpad: *mut gst::GstPad,
pub clock_rate: c_uint,
pub segment: gst::GstSegment,
pub need_newsegment: gboolean,
pub priv_: *mut GstRTPBaseDepayloadPrivate,
pub _gst_reserved: [gpointer; 4],
}
impl ::std::fmt::Debug for GstRTPBaseDepayload {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("GstRTPBaseDepayload @ {:p}", self))
.field("parent", &self.parent)
.field("sinkpad", &self.sinkpad)
.field("srcpad", &self.srcpad)
.field("clock_rate", &self.clock_rate)
.field("segment", &self.segment)
.field("need_newsegment", &self.need_newsegment)
.finish()
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GstRTPBasePayload {
pub element: gst::GstElement,
pub sinkpad: *mut gst::GstPad,
pub srcpad: *mut gst::GstPad,
pub ts_base: u32,
pub seqnum_base: u16,
pub media: *mut c_char,
pub encoding_name: *mut c_char,
pub dynamic: gboolean,
pub clock_rate: u32,
pub ts_offset: i32,
pub timestamp: u32,
pub seqnum_offset: i16,
pub seqnum: u16,
pub max_ptime: i64,
pub pt: c_uint,
pub ssrc: c_uint,
pub current_ssrc: c_uint,
pub mtu: c_uint,
pub segment: gst::GstSegment,
pub min_ptime: u64,
pub ptime: u64,
pub ptime_multiple: u64,
pub priv_: *mut GstRTPBasePayloadPrivate,
pub _gst_reserved: [gpointer; 4],
}
impl ::std::fmt::Debug for GstRTPBasePayload {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("GstRTPBasePayload @ {:p}", self))
.field("element", &self.element)
.finish()
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GstRTPHeaderExtension {
pub parent: gst::GstElement,
pub _gst_reserved: [gpointer; 4],
}
impl ::std::fmt::Debug for GstRTPHeaderExtension {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("GstRTPHeaderExtension @ {:p}", self))
.field("parent", &self.parent)
.finish()
}
}
#[link(name = "gstrtp-1.0")]
extern "C" {
//=========================================================================
// GstRTCPFBType
//=========================================================================
pub fn gst_rtcpfb_type_get_type() -> GType;
//=========================================================================
// GstRTCPSDESType
//=========================================================================
pub fn gst_rtcpsdes_type_get_type() -> GType;
//=========================================================================
// GstRTCPType
//=========================================================================
pub fn gst_rtcp_type_get_type() -> GType;
//=========================================================================
// GstRTCPXRType
//=========================================================================
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcpxr_type_get_type() -> GType;
//=========================================================================
// GstRTPPayload
//=========================================================================
pub fn gst_rtp_payload_get_type() -> GType;
//=========================================================================
// GstRTPProfile
//=========================================================================
pub fn gst_rtp_profile_get_type() -> GType;
//=========================================================================
// GstRTPBufferFlags
//=========================================================================
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtp_buffer_flags_get_type() -> GType;
//=========================================================================
// GstRTPBufferMapFlags
//=========================================================================
pub fn gst_rtp_buffer_map_flags_get_type() -> GType;
//=========================================================================
// GstRTPHeaderExtensionFlags
//=========================================================================
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_flags_get_type() -> GType;
//=========================================================================
// GstRTCPBuffer
//=========================================================================
pub fn gst_rtcp_buffer_add_packet(
rtcp: *mut GstRTCPBuffer,
type_: GstRTCPType,
packet: *mut GstRTCPPacket,
) -> gboolean;
pub fn gst_rtcp_buffer_get_first_packet(
rtcp: *mut GstRTCPBuffer,
packet: *mut GstRTCPPacket,
) -> gboolean;
pub fn gst_rtcp_buffer_get_packet_count(rtcp: *mut GstRTCPBuffer) -> c_uint;
pub fn gst_rtcp_buffer_unmap(rtcp: *mut GstRTCPBuffer) -> gboolean;
pub fn gst_rtcp_buffer_map(
buffer: *mut gst::GstBuffer,
flags: gst::GstMapFlags,
rtcp: *mut GstRTCPBuffer,
) -> gboolean;
pub fn gst_rtcp_buffer_new(mtu: c_uint) -> *mut gst::GstBuffer;
pub fn gst_rtcp_buffer_new_copy_data(data: gconstpointer, len: c_uint) -> *mut gst::GstBuffer;
pub fn gst_rtcp_buffer_new_take_data(data: gpointer, len: c_uint) -> *mut gst::GstBuffer;
pub fn gst_rtcp_buffer_validate(buffer: *mut gst::GstBuffer) -> gboolean;
pub fn gst_rtcp_buffer_validate_data(data: *mut u8, len: c_uint) -> gboolean;
pub fn gst_rtcp_buffer_validate_data_reduced(data: *mut u8, len: c_uint) -> gboolean;
pub fn gst_rtcp_buffer_validate_reduced(buffer: *mut gst::GstBuffer) -> gboolean;
//=========================================================================
// GstRTCPPacket
//=========================================================================
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtcp_packet_add_profile_specific_ext(
packet: *mut GstRTCPPacket,
data: *const u8,
len: c_uint,
) -> gboolean;
pub fn gst_rtcp_packet_add_rb(
packet: *mut GstRTCPPacket,
ssrc: u32,
fractionlost: u8,
packetslost: i32,
exthighestseq: u32,
jitter: u32,
lsr: u32,
dlsr: u32,
) -> gboolean;
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtcp_packet_app_get_data(packet: *mut GstRTCPPacket) -> *mut u8;
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtcp_packet_app_get_data_length(packet: *mut GstRTCPPacket) -> u16;
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtcp_packet_app_get_name(packet: *mut GstRTCPPacket) -> *const c_char;
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtcp_packet_app_get_ssrc(packet: *mut GstRTCPPacket) -> u32;
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtcp_packet_app_get_subtype(packet: *mut GstRTCPPacket) -> u8;
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtcp_packet_app_set_data_length(
packet: *mut GstRTCPPacket,
wordlen: u16,
) -> gboolean;
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtcp_packet_app_set_name(packet: *mut GstRTCPPacket, name: *const c_char);
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtcp_packet_app_set_ssrc(packet: *mut GstRTCPPacket, ssrc: u32);
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtcp_packet_app_set_subtype(packet: *mut GstRTCPPacket, subtype: u8);
pub fn gst_rtcp_packet_bye_add_ssrc(packet: *mut GstRTCPPacket, ssrc: u32) -> gboolean;
pub fn gst_rtcp_packet_bye_add_ssrcs(
packet: *mut GstRTCPPacket,
ssrc: *mut u32,
len: c_uint,
) -> gboolean;
pub fn gst_rtcp_packet_bye_get_nth_ssrc(packet: *mut GstRTCPPacket, nth: c_uint) -> u32;
pub fn gst_rtcp_packet_bye_get_reason(packet: *mut GstRTCPPacket) -> *mut c_char;
pub fn gst_rtcp_packet_bye_get_reason_len(packet: *mut GstRTCPPacket) -> u8;
pub fn gst_rtcp_packet_bye_get_ssrc_count(packet: *mut GstRTCPPacket) -> c_uint;
pub fn gst_rtcp_packet_bye_set_reason(
packet: *mut GstRTCPPacket,
reason: *const c_char,
) -> gboolean;
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtcp_packet_copy_profile_specific_ext(
packet: *mut GstRTCPPacket,
data: *mut *mut u8,
len: *mut c_uint,
) -> gboolean;
pub fn gst_rtcp_packet_fb_get_fci(packet: *mut GstRTCPPacket) -> *mut u8;
pub fn gst_rtcp_packet_fb_get_fci_length(packet: *mut GstRTCPPacket) -> u16;
pub fn gst_rtcp_packet_fb_get_media_ssrc(packet: *mut GstRTCPPacket) -> u32;
pub fn gst_rtcp_packet_fb_get_sender_ssrc(packet: *mut GstRTCPPacket) -> u32;
pub fn gst_rtcp_packet_fb_get_type(packet: *mut GstRTCPPacket) -> GstRTCPFBType;
pub fn gst_rtcp_packet_fb_set_fci_length(packet: *mut GstRTCPPacket, wordlen: u16) -> gboolean;
pub fn gst_rtcp_packet_fb_set_media_ssrc(packet: *mut GstRTCPPacket, ssrc: u32);
pub fn gst_rtcp_packet_fb_set_sender_ssrc(packet: *mut GstRTCPPacket, ssrc: u32);
pub fn gst_rtcp_packet_fb_set_type(packet: *mut GstRTCPPacket, type_: GstRTCPFBType);
pub fn gst_rtcp_packet_get_count(packet: *mut GstRTCPPacket) -> u8;
pub fn gst_rtcp_packet_get_length(packet: *mut GstRTCPPacket) -> u16;
pub fn gst_rtcp_packet_get_padding(packet: *mut GstRTCPPacket) -> gboolean;
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtcp_packet_get_profile_specific_ext(
packet: *mut GstRTCPPacket,
data: *mut *mut u8,
len: *mut c_uint,
) -> gboolean;
#[cfg(any(feature = "v1_10", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_10")))]
pub fn gst_rtcp_packet_get_profile_specific_ext_length(packet: *mut GstRTCPPacket) -> u16;
pub fn gst_rtcp_packet_get_rb(
packet: *mut GstRTCPPacket,
nth: c_uint,
ssrc: *mut u32,
fractionlost: *mut u8,
packetslost: *mut i32,
exthighestseq: *mut u32,
jitter: *mut u32,
lsr: *mut u32,
dlsr: *mut u32,
);
pub fn gst_rtcp_packet_get_rb_count(packet: *mut GstRTCPPacket) -> c_uint;
pub fn gst_rtcp_packet_get_type(packet: *mut GstRTCPPacket) -> GstRTCPType;
pub fn gst_rtcp_packet_move_to_next(packet: *mut GstRTCPPacket) -> gboolean;
pub fn gst_rtcp_packet_remove(packet: *mut GstRTCPPacket) -> gboolean;
pub fn gst_rtcp_packet_rr_get_ssrc(packet: *mut GstRTCPPacket) -> u32;
pub fn gst_rtcp_packet_rr_set_ssrc(packet: *mut GstRTCPPacket, ssrc: u32);
pub fn gst_rtcp_packet_sdes_add_entry(
packet: *mut GstRTCPPacket,
type_: GstRTCPSDESType,
len: u8,
data: *const u8,
) -> gboolean;
pub fn gst_rtcp_packet_sdes_add_item(packet: *mut GstRTCPPacket, ssrc: u32) -> gboolean;
pub fn gst_rtcp_packet_sdes_copy_entry(
packet: *mut GstRTCPPacket,
type_: *mut GstRTCPSDESType,
len: *mut u8,
data: *mut *mut u8,
) -> gboolean;
pub fn gst_rtcp_packet_sdes_first_entry(packet: *mut GstRTCPPacket) -> gboolean;
pub fn gst_rtcp_packet_sdes_first_item(packet: *mut GstRTCPPacket) -> gboolean;
pub fn gst_rtcp_packet_sdes_get_entry(
packet: *mut GstRTCPPacket,
type_: *mut GstRTCPSDESType,
len: *mut u8,
data: *mut *mut u8,
) -> gboolean;
pub fn gst_rtcp_packet_sdes_get_item_count(packet: *mut GstRTCPPacket) -> c_uint;
pub fn gst_rtcp_packet_sdes_get_ssrc(packet: *mut GstRTCPPacket) -> u32;
pub fn gst_rtcp_packet_sdes_next_entry(packet: *mut GstRTCPPacket) -> gboolean;
pub fn gst_rtcp_packet_sdes_next_item(packet: *mut GstRTCPPacket) -> gboolean;
pub fn gst_rtcp_packet_set_rb(
packet: *mut GstRTCPPacket,
nth: c_uint,
ssrc: u32,
fractionlost: u8,
packetslost: i32,
exthighestseq: u32,
jitter: u32,
lsr: u32,
dlsr: u32,
);
pub fn gst_rtcp_packet_sr_get_sender_info(
packet: *mut GstRTCPPacket,
ssrc: *mut u32,
ntptime: *mut u64,
rtptime: *mut u32,
packet_count: *mut u32,
octet_count: *mut u32,
);
pub fn gst_rtcp_packet_sr_set_sender_info(
packet: *mut GstRTCPPacket,
ssrc: u32,
ntptime: u64,
rtptime: u32,
packet_count: u32,
octet_count: u32,
);
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_first_rb(packet: *mut GstRTCPPacket) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_block_length(packet: *mut GstRTCPPacket) -> u16;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_block_type(packet: *mut GstRTCPPacket) -> GstRTCPXRType;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_dlrr_block(
packet: *mut GstRTCPPacket,
nth: c_uint,
ssrc: *mut u32,
last_rr: *mut u32,
delay: *mut u32,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_prt_by_seq(
packet: *mut GstRTCPPacket,
seq: u16,
receipt_time: *mut u32,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_prt_info(
packet: *mut GstRTCPPacket,
ssrc: *mut u32,
thinning: *mut u8,
begin_seq: *mut u16,
end_seq: *mut u16,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_rle_info(
packet: *mut GstRTCPPacket,
ssrc: *mut u32,
thinning: *mut u8,
begin_seq: *mut u16,
end_seq: *mut u16,
chunk_count: *mut u32,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_rle_nth_chunk(
packet: *mut GstRTCPPacket,
nth: c_uint,
chunk: *mut u16,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_rrt(packet: *mut GstRTCPPacket, timestamp: *mut u64) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_ssrc(packet: *mut GstRTCPPacket) -> u32;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_summary_info(
packet: *mut GstRTCPPacket,
ssrc: *mut u32,
begin_seq: *mut u16,
end_seq: *mut u16,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_summary_jitter(
packet: *mut GstRTCPPacket,
min_jitter: *mut u32,
max_jitter: *mut u32,
mean_jitter: *mut u32,
dev_jitter: *mut u32,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_summary_pkt(
packet: *mut GstRTCPPacket,
lost_packets: *mut u32,
dup_packets: *mut u32,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_summary_ttl(
packet: *mut GstRTCPPacket,
is_ipv4: *mut gboolean,
min_ttl: *mut u8,
max_ttl: *mut u8,
mean_ttl: *mut u8,
dev_ttl: *mut u8,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_voip_burst_metrics(
packet: *mut GstRTCPPacket,
burst_density: *mut u8,
gap_density: *mut u8,
burst_duration: *mut u16,
gap_duration: *mut u16,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_voip_configuration_params(
packet: *mut GstRTCPPacket,
gmin: *mut u8,
rx_config: *mut u8,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_voip_delay_metrics(
packet: *mut GstRTCPPacket,
roundtrip_delay: *mut u16,
end_system_delay: *mut u16,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_voip_jitter_buffer_params(
packet: *mut GstRTCPPacket,
jb_nominal: *mut u16,
jb_maximum: *mut u16,
jb_abs_max: *mut u16,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_voip_metrics_ssrc(
packet: *mut GstRTCPPacket,
ssrc: *mut u32,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_voip_packet_metrics(
packet: *mut GstRTCPPacket,
loss_rate: *mut u8,
discard_rate: *mut u8,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_voip_quality_metrics(
packet: *mut GstRTCPPacket,
r_factor: *mut u8,
ext_r_factor: *mut u8,
mos_lq: *mut u8,
mos_cq: *mut u8,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_get_voip_signal_metrics(
packet: *mut GstRTCPPacket,
signal_level: *mut u8,
noise_level: *mut u8,
rerl: *mut u8,
gmin: *mut u8,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtcp_packet_xr_next_rb(packet: *mut GstRTCPPacket) -> gboolean;
//=========================================================================
// GstRTPBuffer
//=========================================================================
pub fn gst_rtp_buffer_add_extension_onebyte_header(
rtp: *mut GstRTPBuffer,
id: u8,
data: gconstpointer,
size: c_uint,
) -> gboolean;
pub fn gst_rtp_buffer_add_extension_twobytes_header(
rtp: *mut GstRTPBuffer,
appbits: u8,
id: u8,
data: gconstpointer,
size: c_uint,
) -> gboolean;
pub fn gst_rtp_buffer_get_csrc(rtp: *mut GstRTPBuffer, idx: u8) -> u32;
pub fn gst_rtp_buffer_get_csrc_count(rtp: *mut GstRTPBuffer) -> u8;
pub fn gst_rtp_buffer_get_extension(rtp: *mut GstRTPBuffer) -> gboolean;
pub fn gst_rtp_buffer_get_extension_bytes(
rtp: *mut GstRTPBuffer,
bits: *mut u16,
) -> *mut glib::GBytes;
pub fn gst_rtp_buffer_get_extension_data(
rtp: *mut GstRTPBuffer,
bits: *mut u16,
data: *mut u8,
wordlen: *mut c_uint,
) -> gboolean;
pub fn gst_rtp_buffer_get_extension_onebyte_header(
rtp: *mut GstRTPBuffer,
id: u8,
nth: c_uint,
data: *mut u8,
size: *mut c_uint,
) -> gboolean;
pub fn gst_rtp_buffer_get_extension_twobytes_header(
rtp: *mut GstRTPBuffer,
appbits: *mut u8,
id: u8,
nth: c_uint,
data: *mut u8,
size: *mut c_uint,
) -> gboolean;
pub fn gst_rtp_buffer_get_header_len(rtp: *mut GstRTPBuffer) -> c_uint;
pub fn gst_rtp_buffer_get_marker(rtp: *mut GstRTPBuffer) -> gboolean;
pub fn gst_rtp_buffer_get_packet_len(rtp: *mut GstRTPBuffer) -> c_uint;
pub fn gst_rtp_buffer_get_padding(rtp: *mut GstRTPBuffer) -> gboolean;
pub fn gst_rtp_buffer_get_payload(rtp: *mut GstRTPBuffer) -> gpointer;
pub fn gst_rtp_buffer_get_payload_buffer(rtp: *mut GstRTPBuffer) -> *mut gst::GstBuffer;
pub fn gst_rtp_buffer_get_payload_bytes(rtp: *mut GstRTPBuffer) -> *mut glib::GBytes;
pub fn gst_rtp_buffer_get_payload_len(rtp: *mut GstRTPBuffer) -> c_uint;
pub fn gst_rtp_buffer_get_payload_subbuffer(
rtp: *mut GstRTPBuffer,
offset: c_uint,
len: c_uint,
) -> *mut gst::GstBuffer;
pub fn gst_rtp_buffer_get_payload_type(rtp: *mut GstRTPBuffer) -> u8;
pub fn gst_rtp_buffer_get_seq(rtp: *mut GstRTPBuffer) -> u16;
pub fn gst_rtp_buffer_get_ssrc(rtp: *mut GstRTPBuffer) -> u32;
pub fn gst_rtp_buffer_get_timestamp(rtp: *mut GstRTPBuffer) -> u32;
pub fn gst_rtp_buffer_get_version(rtp: *mut GstRTPBuffer) -> u8;
pub fn gst_rtp_buffer_pad_to(rtp: *mut GstRTPBuffer, len: c_uint);
pub fn gst_rtp_buffer_set_csrc(rtp: *mut GstRTPBuffer, idx: u8, csrc: u32);
pub fn gst_rtp_buffer_set_extension(rtp: *mut GstRTPBuffer, extension: gboolean);
pub fn gst_rtp_buffer_set_extension_data(
rtp: *mut GstRTPBuffer,
bits: u16,
length: u16,
) -> gboolean;
pub fn gst_rtp_buffer_set_marker(rtp: *mut GstRTPBuffer, marker: gboolean);
pub fn gst_rtp_buffer_set_packet_len(rtp: *mut GstRTPBuffer, len: c_uint);
pub fn gst_rtp_buffer_set_padding(rtp: *mut GstRTPBuffer, padding: gboolean);
pub fn gst_rtp_buffer_set_payload_type(rtp: *mut GstRTPBuffer, payload_type: u8);
pub fn gst_rtp_buffer_set_seq(rtp: *mut GstRTPBuffer, seq: u16);
pub fn gst_rtp_buffer_set_ssrc(rtp: *mut GstRTPBuffer, ssrc: u32);
pub fn gst_rtp_buffer_set_timestamp(rtp: *mut GstRTPBuffer, timestamp: u32);
pub fn gst_rtp_buffer_set_version(rtp: *mut GstRTPBuffer, version: u8);
pub fn gst_rtp_buffer_unmap(rtp: *mut GstRTPBuffer);
pub fn gst_rtp_buffer_allocate_data(
buffer: *mut gst::GstBuffer,
payload_len: c_uint,
pad_len: u8,
csrc_count: u8,
);
pub fn gst_rtp_buffer_calc_header_len(csrc_count: u8) -> c_uint;
pub fn gst_rtp_buffer_calc_packet_len(
payload_len: c_uint,
pad_len: u8,
csrc_count: u8,
) -> c_uint;
pub fn gst_rtp_buffer_calc_payload_len(
packet_len: c_uint,
pad_len: u8,
csrc_count: u8,
) -> c_uint;
pub fn gst_rtp_buffer_compare_seqnum(seqnum1: u16, seqnum2: u16) -> c_int;
pub fn gst_rtp_buffer_default_clock_rate(payload_type: u8) -> u32;
pub fn gst_rtp_buffer_ext_timestamp(exttimestamp: *mut u64, timestamp: u32) -> u64;
#[cfg(any(feature = "v1_18", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_18")))]
pub fn gst_rtp_buffer_get_extension_onebyte_header_from_bytes(
bytes: *mut glib::GBytes,
bit_pattern: u16,
id: u8,
nth: c_uint,
data: *mut u8,
size: *mut c_uint,
) -> gboolean;
pub fn gst_rtp_buffer_map(
buffer: *mut gst::GstBuffer,
flags: gst::GstMapFlags,
rtp: *mut GstRTPBuffer,
) -> gboolean;
pub fn gst_rtp_buffer_new_allocate(
payload_len: c_uint,
pad_len: u8,
csrc_count: u8,
) -> *mut gst::GstBuffer;
pub fn gst_rtp_buffer_new_allocate_len(
packet_len: c_uint,
pad_len: u8,
csrc_count: u8,
) -> *mut gst::GstBuffer;
pub fn gst_rtp_buffer_new_copy_data(data: gconstpointer, len: size_t) -> *mut gst::GstBuffer;
pub fn gst_rtp_buffer_new_take_data(data: gpointer, len: size_t) -> *mut gst::GstBuffer;
//=========================================================================
// GstRTPHeaderExtensionClass
//=========================================================================
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_class_set_uri(
klass: *mut GstRTPHeaderExtensionClass,
uri: *const c_char,
);
//=========================================================================
// GstRTPPayloadInfo
//=========================================================================
pub fn gst_rtp_payload_info_for_name(
media: *const c_char,
encoding_name: *const c_char,
) -> *const GstRTPPayloadInfo;
pub fn gst_rtp_payload_info_for_pt(payload_type: u8) -> *const GstRTPPayloadInfo;
//=========================================================================
// GstRTPSourceMeta
//=========================================================================
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtp_source_meta_append_csrc(
meta: *mut GstRTPSourceMeta,
csrc: *const u32,
csrc_count: c_uint,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtp_source_meta_get_source_count(meta: *const GstRTPSourceMeta) -> c_uint;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtp_source_meta_set_ssrc(meta: *mut GstRTPSourceMeta, ssrc: *mut u32) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtp_source_meta_get_info() -> *const gst::GstMetaInfo;
//=========================================================================
// GstRTPBaseAudioPayload
//=========================================================================
pub fn gst_rtp_base_audio_payload_get_type() -> GType;
pub fn gst_rtp_base_audio_payload_flush(
baseaudiopayload: *mut GstRTPBaseAudioPayload,
payload_len: c_uint,
timestamp: gst::GstClockTime,
) -> gst::GstFlowReturn;
pub fn gst_rtp_base_audio_payload_get_adapter(
rtpbaseaudiopayload: *mut GstRTPBaseAudioPayload,
) -> *mut gst_base::GstAdapter;
pub fn gst_rtp_base_audio_payload_push(
baseaudiopayload: *mut GstRTPBaseAudioPayload,
data: *const u8,
payload_len: c_uint,
timestamp: gst::GstClockTime,
) -> gst::GstFlowReturn;
pub fn gst_rtp_base_audio_payload_set_frame_based(
rtpbaseaudiopayload: *mut GstRTPBaseAudioPayload,
);
pub fn gst_rtp_base_audio_payload_set_frame_options(
rtpbaseaudiopayload: *mut GstRTPBaseAudioPayload,
frame_duration: c_int,
frame_size: c_int,
);
pub fn gst_rtp_base_audio_payload_set_sample_based(
rtpbaseaudiopayload: *mut GstRTPBaseAudioPayload,
);
pub fn gst_rtp_base_audio_payload_set_sample_options(
rtpbaseaudiopayload: *mut GstRTPBaseAudioPayload,
sample_size: c_int,
);
pub fn gst_rtp_base_audio_payload_set_samplebits_options(
rtpbaseaudiopayload: *mut GstRTPBaseAudioPayload,
sample_size: c_int,
);
//=========================================================================
// GstRTPBaseDepayload
//=========================================================================
pub fn gst_rtp_base_depayload_get_type() -> GType;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtp_base_depayload_is_source_info_enabled(
depayload: *mut GstRTPBaseDepayload,
) -> gboolean;
pub fn gst_rtp_base_depayload_push(
filter: *mut GstRTPBaseDepayload,
out_buf: *mut gst::GstBuffer,
) -> gst::GstFlowReturn;
pub fn gst_rtp_base_depayload_push_list(
filter: *mut GstRTPBaseDepayload,
out_list: *mut gst::GstBufferList,
) -> gst::GstFlowReturn;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtp_base_depayload_set_source_info_enabled(
depayload: *mut GstRTPBaseDepayload,
enable: gboolean,
);
//=========================================================================
// GstRTPBasePayload
//=========================================================================
pub fn gst_rtp_base_payload_get_type() -> GType;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtp_base_payload_allocate_output_buffer(
payload: *mut GstRTPBasePayload,
payload_len: c_uint,
pad_len: u8,
csrc_count: u8,
) -> *mut gst::GstBuffer;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtp_base_payload_get_source_count(
payload: *mut GstRTPBasePayload,
buffer: *mut gst::GstBuffer,
) -> c_uint;
pub fn gst_rtp_base_payload_is_filled(
payload: *mut GstRTPBasePayload,
size: c_uint,
duration: gst::GstClockTime,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtp_base_payload_is_source_info_enabled(payload: *mut GstRTPBasePayload)
-> gboolean;
pub fn gst_rtp_base_payload_push(
payload: *mut GstRTPBasePayload,
buffer: *mut gst::GstBuffer,
) -> gst::GstFlowReturn;
pub fn gst_rtp_base_payload_push_list(
payload: *mut GstRTPBasePayload,
list: *mut gst::GstBufferList,
) -> gst::GstFlowReturn;
pub fn gst_rtp_base_payload_set_options(
payload: *mut GstRTPBasePayload,
media: *const c_char,
dynamic: gboolean,
encoding_name: *const c_char,
clock_rate: u32,
);
pub fn gst_rtp_base_payload_set_outcaps(
payload: *mut GstRTPBasePayload,
fieldname: *const c_char,
...
) -> gboolean;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_base_payload_set_outcaps_structure(
payload: *mut GstRTPBasePayload,
s: *mut gst::GstStructure,
) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtp_base_payload_set_source_info_enabled(
payload: *mut GstRTPBasePayload,
enable: gboolean,
);
//=========================================================================
// GstRTPHeaderExtension
//=========================================================================
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_get_type() -> GType;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_create_from_uri(
uri: *const c_char,
) -> *mut GstRTPHeaderExtension;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_get_id(ext: *mut GstRTPHeaderExtension) -> c_uint;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_get_max_size(
ext: *mut GstRTPHeaderExtension,
input_meta: *const gst::GstBuffer,
) -> size_t;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_get_sdp_caps_field_name(
ext: *mut GstRTPHeaderExtension,
) -> *mut c_char;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_get_supported_flags(
ext: *mut GstRTPHeaderExtension,
) -> GstRTPHeaderExtensionFlags;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_get_uri(ext: *mut GstRTPHeaderExtension) -> *const c_char;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_read(
ext: *mut GstRTPHeaderExtension,
read_flags: GstRTPHeaderExtensionFlags,
data: *const u8,
size: size_t,
buffer: *mut gst::GstBuffer,
) -> gboolean;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_set_attributes_from_caps(
ext: *mut GstRTPHeaderExtension,
caps: *const gst::GstCaps,
) -> gboolean;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_set_attributes_from_caps_simple_sdp(
ext: *mut GstRTPHeaderExtension,
caps: *const gst::GstCaps,
) -> gboolean;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_set_caps_from_attributes(
ext: *mut GstRTPHeaderExtension,
caps: *mut gst::GstCaps,
) -> gboolean;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_set_caps_from_attributes_simple_sdp(
ext: *mut GstRTPHeaderExtension,
caps: *mut gst::GstCaps,
) -> gboolean;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_set_id(ext: *mut GstRTPHeaderExtension, ext_id: c_uint);
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_set_non_rtp_sink_caps(
ext: *mut GstRTPHeaderExtension,
caps: *const gst::GstCaps,
) -> gboolean;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_set_wants_update_non_rtp_src_caps(
ext: *mut GstRTPHeaderExtension,
state: gboolean,
);
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_update_non_rtp_src_caps(
ext: *mut GstRTPHeaderExtension,
caps: *mut gst::GstCaps,
) -> gboolean;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_wants_update_non_rtp_src_caps(
ext: *mut GstRTPHeaderExtension,
) -> gboolean;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_header_extension_write(
ext: *mut GstRTPHeaderExtension,
input_meta: *const gst::GstBuffer,
write_flags: GstRTPHeaderExtensionFlags,
output: *mut gst::GstBuffer,
data: *mut u8,
size: size_t,
) -> size_t;
//=========================================================================
// Other functions
//=========================================================================
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_buffer_add_rtp_source_meta(
buffer: *mut gst::GstBuffer,
ssrc: *const u32,
csrc: *const u32,
csrc_count: c_uint,
) -> *mut GstRTPSourceMeta;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_buffer_get_rtp_source_meta(buffer: *mut gst::GstBuffer) -> *mut GstRTPSourceMeta;
pub fn gst_rtcp_ntp_to_unix(ntptime: u64) -> u64;
pub fn gst_rtcp_sdes_name_to_type(name: *const c_char) -> GstRTCPSDESType;
pub fn gst_rtcp_sdes_type_to_name(type_: GstRTCPSDESType) -> *const c_char;
pub fn gst_rtcp_unix_to_ntp(unixtime: u64) -> u64;
#[cfg(any(feature = "v1_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_20")))]
pub fn gst_rtp_get_header_extension_list() -> *mut glib::GList;
pub fn gst_rtp_hdrext_get_ntp_56(data: gpointer, size: c_uint, ntptime: *mut u64) -> gboolean;
pub fn gst_rtp_hdrext_get_ntp_64(data: gpointer, size: c_uint, ntptime: *mut u64) -> gboolean;
pub fn gst_rtp_hdrext_set_ntp_56(data: gpointer, size: c_uint, ntptime: u64) -> gboolean;
pub fn gst_rtp_hdrext_set_ntp_64(data: gpointer, size: c_uint, ntptime: u64) -> gboolean;
#[cfg(any(feature = "v1_16", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
pub fn gst_rtp_source_meta_api_get_type() -> GType;
}
| 42.547498 | 99 | 0.630341 |
64e3e8fff0835bf34b22cb50fc38600ffc4ff01a
| 518,773 |
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(Debug)]
pub(crate) struct Handle<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
client: aws_smithy_client::Client<C, M, R>,
conf: crate::Config,
}
/// An ergonomic service client for `DynamoDB_20120810`.
///
/// This client allows ergonomic access to a `DynamoDB_20120810`-shaped service.
/// Each method corresponds to an endpoint defined in the service's Smithy model,
/// and the request and response shapes are auto-generated from that same model.
///
/// # Using a Client
///
/// Once you have a client set up, you can access the service's endpoints
/// by calling the appropriate method on [`Client`]. Each such method
/// returns a request builder for that endpoint, with methods for setting
/// the various fields of the request. Once your request is complete, use
/// the `send` method to send the request. `send` returns a future, which
/// you then have to `.await` to get the service's response.
///
/// [builder pattern]: https://rust-lang.github.io/api-guidelines/type-safety.html#c-builder
/// [SigV4-signed requests]: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
#[derive(std::fmt::Debug)]
pub struct Client<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<Handle<C, M, R>>,
}
impl<C, M, R> std::clone::Clone for Client<C, M, R> {
fn clone(&self) -> Self {
Self {
handle: self.handle.clone(),
}
}
}
#[doc(inline)]
pub use aws_smithy_client::Builder;
impl<C, M, R> From<aws_smithy_client::Client<C, M, R>> for Client<C, M, R> {
fn from(client: aws_smithy_client::Client<C, M, R>) -> Self {
Self::with_config(client, crate::Config::builder().build())
}
}
impl<C, M, R> Client<C, M, R> {
/// Creates a client with the given service configuration.
pub fn with_config(client: aws_smithy_client::Client<C, M, R>, conf: crate::Config) -> Self {
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
/// Returns the client's configuration.
pub fn conf(&self) -> &crate::Config {
&self.handle.conf
}
}
impl<C, M, R> Client<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Constructs a fluent builder for the `BatchExecuteStatement` operation.
///
/// See [`BatchExecuteStatement`](crate::client::fluent_builders::BatchExecuteStatement) for more information about the
/// operation and its arguments.
pub fn batch_execute_statement(&self) -> fluent_builders::BatchExecuteStatement<C, M, R> {
fluent_builders::BatchExecuteStatement::new(self.handle.clone())
}
/// Constructs a fluent builder for the `BatchGetItem` operation.
///
/// See [`BatchGetItem`](crate::client::fluent_builders::BatchGetItem) for more information about the
/// operation and its arguments.
pub fn batch_get_item(&self) -> fluent_builders::BatchGetItem<C, M, R> {
fluent_builders::BatchGetItem::new(self.handle.clone())
}
/// Constructs a fluent builder for the `BatchWriteItem` operation.
///
/// See [`BatchWriteItem`](crate::client::fluent_builders::BatchWriteItem) for more information about the
/// operation and its arguments.
pub fn batch_write_item(&self) -> fluent_builders::BatchWriteItem<C, M, R> {
fluent_builders::BatchWriteItem::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CreateBackup` operation.
///
/// See [`CreateBackup`](crate::client::fluent_builders::CreateBackup) for more information about the
/// operation and its arguments.
pub fn create_backup(&self) -> fluent_builders::CreateBackup<C, M, R> {
fluent_builders::CreateBackup::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CreateGlobalTable` operation.
///
/// See [`CreateGlobalTable`](crate::client::fluent_builders::CreateGlobalTable) for more information about the
/// operation and its arguments.
pub fn create_global_table(&self) -> fluent_builders::CreateGlobalTable<C, M, R> {
fluent_builders::CreateGlobalTable::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CreateTable` operation.
///
/// See [`CreateTable`](crate::client::fluent_builders::CreateTable) for more information about the
/// operation and its arguments.
pub fn create_table(&self) -> fluent_builders::CreateTable<C, M, R> {
fluent_builders::CreateTable::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteBackup` operation.
///
/// See [`DeleteBackup`](crate::client::fluent_builders::DeleteBackup) for more information about the
/// operation and its arguments.
pub fn delete_backup(&self) -> fluent_builders::DeleteBackup<C, M, R> {
fluent_builders::DeleteBackup::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteItem` operation.
///
/// See [`DeleteItem`](crate::client::fluent_builders::DeleteItem) for more information about the
/// operation and its arguments.
pub fn delete_item(&self) -> fluent_builders::DeleteItem<C, M, R> {
fluent_builders::DeleteItem::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteTable` operation.
///
/// See [`DeleteTable`](crate::client::fluent_builders::DeleteTable) for more information about the
/// operation and its arguments.
pub fn delete_table(&self) -> fluent_builders::DeleteTable<C, M, R> {
fluent_builders::DeleteTable::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeBackup` operation.
///
/// See [`DescribeBackup`](crate::client::fluent_builders::DescribeBackup) for more information about the
/// operation and its arguments.
pub fn describe_backup(&self) -> fluent_builders::DescribeBackup<C, M, R> {
fluent_builders::DescribeBackup::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeContinuousBackups` operation.
///
/// See [`DescribeContinuousBackups`](crate::client::fluent_builders::DescribeContinuousBackups) for more information about the
/// operation and its arguments.
pub fn describe_continuous_backups(
&self,
) -> fluent_builders::DescribeContinuousBackups<C, M, R> {
fluent_builders::DescribeContinuousBackups::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeContributorInsights` operation.
///
/// See [`DescribeContributorInsights`](crate::client::fluent_builders::DescribeContributorInsights) for more information about the
/// operation and its arguments.
pub fn describe_contributor_insights(
&self,
) -> fluent_builders::DescribeContributorInsights<C, M, R> {
fluent_builders::DescribeContributorInsights::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeEndpoints` operation.
///
/// See [`DescribeEndpoints`](crate::client::fluent_builders::DescribeEndpoints) for more information about the
/// operation and its arguments.
pub fn describe_endpoints(&self) -> fluent_builders::DescribeEndpoints<C, M, R> {
fluent_builders::DescribeEndpoints::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeExport` operation.
///
/// See [`DescribeExport`](crate::client::fluent_builders::DescribeExport) for more information about the
/// operation and its arguments.
pub fn describe_export(&self) -> fluent_builders::DescribeExport<C, M, R> {
fluent_builders::DescribeExport::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeGlobalTable` operation.
///
/// See [`DescribeGlobalTable`](crate::client::fluent_builders::DescribeGlobalTable) for more information about the
/// operation and its arguments.
pub fn describe_global_table(&self) -> fluent_builders::DescribeGlobalTable<C, M, R> {
fluent_builders::DescribeGlobalTable::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeGlobalTableSettings` operation.
///
/// See [`DescribeGlobalTableSettings`](crate::client::fluent_builders::DescribeGlobalTableSettings) for more information about the
/// operation and its arguments.
pub fn describe_global_table_settings(
&self,
) -> fluent_builders::DescribeGlobalTableSettings<C, M, R> {
fluent_builders::DescribeGlobalTableSettings::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeKinesisStreamingDestination` operation.
///
/// See [`DescribeKinesisStreamingDestination`](crate::client::fluent_builders::DescribeKinesisStreamingDestination) for more information about the
/// operation and its arguments.
pub fn describe_kinesis_streaming_destination(
&self,
) -> fluent_builders::DescribeKinesisStreamingDestination<C, M, R> {
fluent_builders::DescribeKinesisStreamingDestination::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeLimits` operation.
///
/// See [`DescribeLimits`](crate::client::fluent_builders::DescribeLimits) for more information about the
/// operation and its arguments.
pub fn describe_limits(&self) -> fluent_builders::DescribeLimits<C, M, R> {
fluent_builders::DescribeLimits::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeTable` operation.
///
/// See [`DescribeTable`](crate::client::fluent_builders::DescribeTable) for more information about the
/// operation and its arguments.
pub fn describe_table(&self) -> fluent_builders::DescribeTable<C, M, R> {
fluent_builders::DescribeTable::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeTableReplicaAutoScaling` operation.
///
/// See [`DescribeTableReplicaAutoScaling`](crate::client::fluent_builders::DescribeTableReplicaAutoScaling) for more information about the
/// operation and its arguments.
pub fn describe_table_replica_auto_scaling(
&self,
) -> fluent_builders::DescribeTableReplicaAutoScaling<C, M, R> {
fluent_builders::DescribeTableReplicaAutoScaling::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeTimeToLive` operation.
///
/// See [`DescribeTimeToLive`](crate::client::fluent_builders::DescribeTimeToLive) for more information about the
/// operation and its arguments.
pub fn describe_time_to_live(&self) -> fluent_builders::DescribeTimeToLive<C, M, R> {
fluent_builders::DescribeTimeToLive::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DisableKinesisStreamingDestination` operation.
///
/// See [`DisableKinesisStreamingDestination`](crate::client::fluent_builders::DisableKinesisStreamingDestination) for more information about the
/// operation and its arguments.
pub fn disable_kinesis_streaming_destination(
&self,
) -> fluent_builders::DisableKinesisStreamingDestination<C, M, R> {
fluent_builders::DisableKinesisStreamingDestination::new(self.handle.clone())
}
/// Constructs a fluent builder for the `EnableKinesisStreamingDestination` operation.
///
/// See [`EnableKinesisStreamingDestination`](crate::client::fluent_builders::EnableKinesisStreamingDestination) for more information about the
/// operation and its arguments.
pub fn enable_kinesis_streaming_destination(
&self,
) -> fluent_builders::EnableKinesisStreamingDestination<C, M, R> {
fluent_builders::EnableKinesisStreamingDestination::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ExecuteStatement` operation.
///
/// See [`ExecuteStatement`](crate::client::fluent_builders::ExecuteStatement) for more information about the
/// operation and its arguments.
pub fn execute_statement(&self) -> fluent_builders::ExecuteStatement<C, M, R> {
fluent_builders::ExecuteStatement::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ExecuteTransaction` operation.
///
/// See [`ExecuteTransaction`](crate::client::fluent_builders::ExecuteTransaction) for more information about the
/// operation and its arguments.
pub fn execute_transaction(&self) -> fluent_builders::ExecuteTransaction<C, M, R> {
fluent_builders::ExecuteTransaction::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ExportTableToPointInTime` operation.
///
/// See [`ExportTableToPointInTime`](crate::client::fluent_builders::ExportTableToPointInTime) for more information about the
/// operation and its arguments.
pub fn export_table_to_point_in_time(
&self,
) -> fluent_builders::ExportTableToPointInTime<C, M, R> {
fluent_builders::ExportTableToPointInTime::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetItem` operation.
///
/// See [`GetItem`](crate::client::fluent_builders::GetItem) for more information about the
/// operation and its arguments.
pub fn get_item(&self) -> fluent_builders::GetItem<C, M, R> {
fluent_builders::GetItem::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListBackups` operation.
///
/// See [`ListBackups`](crate::client::fluent_builders::ListBackups) for more information about the
/// operation and its arguments.
pub fn list_backups(&self) -> fluent_builders::ListBackups<C, M, R> {
fluent_builders::ListBackups::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListContributorInsights` operation.
///
/// See [`ListContributorInsights`](crate::client::fluent_builders::ListContributorInsights) for more information about the
/// operation and its arguments.
pub fn list_contributor_insights(&self) -> fluent_builders::ListContributorInsights<C, M, R> {
fluent_builders::ListContributorInsights::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListExports` operation.
///
/// See [`ListExports`](crate::client::fluent_builders::ListExports) for more information about the
/// operation and its arguments.
pub fn list_exports(&self) -> fluent_builders::ListExports<C, M, R> {
fluent_builders::ListExports::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListGlobalTables` operation.
///
/// See [`ListGlobalTables`](crate::client::fluent_builders::ListGlobalTables) for more information about the
/// operation and its arguments.
pub fn list_global_tables(&self) -> fluent_builders::ListGlobalTables<C, M, R> {
fluent_builders::ListGlobalTables::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListTables` operation.
///
/// See [`ListTables`](crate::client::fluent_builders::ListTables) for more information about the
/// operation and its arguments.
pub fn list_tables(&self) -> fluent_builders::ListTables<C, M, R> {
fluent_builders::ListTables::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListTagsOfResource` operation.
///
/// See [`ListTagsOfResource`](crate::client::fluent_builders::ListTagsOfResource) for more information about the
/// operation and its arguments.
pub fn list_tags_of_resource(&self) -> fluent_builders::ListTagsOfResource<C, M, R> {
fluent_builders::ListTagsOfResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutItem` operation.
///
/// See [`PutItem`](crate::client::fluent_builders::PutItem) for more information about the
/// operation and its arguments.
pub fn put_item(&self) -> fluent_builders::PutItem<C, M, R> {
fluent_builders::PutItem::new(self.handle.clone())
}
/// Constructs a fluent builder for the `Query` operation.
///
/// See [`Query`](crate::client::fluent_builders::Query) for more information about the
/// operation and its arguments.
pub fn query(&self) -> fluent_builders::Query<C, M, R> {
fluent_builders::Query::new(self.handle.clone())
}
/// Constructs a fluent builder for the `RestoreTableFromBackup` operation.
///
/// See [`RestoreTableFromBackup`](crate::client::fluent_builders::RestoreTableFromBackup) for more information about the
/// operation and its arguments.
pub fn restore_table_from_backup(&self) -> fluent_builders::RestoreTableFromBackup<C, M, R> {
fluent_builders::RestoreTableFromBackup::new(self.handle.clone())
}
/// Constructs a fluent builder for the `RestoreTableToPointInTime` operation.
///
/// See [`RestoreTableToPointInTime`](crate::client::fluent_builders::RestoreTableToPointInTime) for more information about the
/// operation and its arguments.
pub fn restore_table_to_point_in_time(
&self,
) -> fluent_builders::RestoreTableToPointInTime<C, M, R> {
fluent_builders::RestoreTableToPointInTime::new(self.handle.clone())
}
/// Constructs a fluent builder for the `Scan` operation.
///
/// See [`Scan`](crate::client::fluent_builders::Scan) for more information about the
/// operation and its arguments.
pub fn scan(&self) -> fluent_builders::Scan<C, M, R> {
fluent_builders::Scan::new(self.handle.clone())
}
/// Constructs a fluent builder for the `TagResource` operation.
///
/// See [`TagResource`](crate::client::fluent_builders::TagResource) for more information about the
/// operation and its arguments.
pub fn tag_resource(&self) -> fluent_builders::TagResource<C, M, R> {
fluent_builders::TagResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the `TransactGetItems` operation.
///
/// See [`TransactGetItems`](crate::client::fluent_builders::TransactGetItems) for more information about the
/// operation and its arguments.
pub fn transact_get_items(&self) -> fluent_builders::TransactGetItems<C, M, R> {
fluent_builders::TransactGetItems::new(self.handle.clone())
}
/// Constructs a fluent builder for the `TransactWriteItems` operation.
///
/// See [`TransactWriteItems`](crate::client::fluent_builders::TransactWriteItems) for more information about the
/// operation and its arguments.
pub fn transact_write_items(&self) -> fluent_builders::TransactWriteItems<C, M, R> {
fluent_builders::TransactWriteItems::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UntagResource` operation.
///
/// See [`UntagResource`](crate::client::fluent_builders::UntagResource) for more information about the
/// operation and its arguments.
pub fn untag_resource(&self) -> fluent_builders::UntagResource<C, M, R> {
fluent_builders::UntagResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateContinuousBackups` operation.
///
/// See [`UpdateContinuousBackups`](crate::client::fluent_builders::UpdateContinuousBackups) for more information about the
/// operation and its arguments.
pub fn update_continuous_backups(&self) -> fluent_builders::UpdateContinuousBackups<C, M, R> {
fluent_builders::UpdateContinuousBackups::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateContributorInsights` operation.
///
/// See [`UpdateContributorInsights`](crate::client::fluent_builders::UpdateContributorInsights) for more information about the
/// operation and its arguments.
pub fn update_contributor_insights(
&self,
) -> fluent_builders::UpdateContributorInsights<C, M, R> {
fluent_builders::UpdateContributorInsights::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateGlobalTable` operation.
///
/// See [`UpdateGlobalTable`](crate::client::fluent_builders::UpdateGlobalTable) for more information about the
/// operation and its arguments.
pub fn update_global_table(&self) -> fluent_builders::UpdateGlobalTable<C, M, R> {
fluent_builders::UpdateGlobalTable::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateGlobalTableSettings` operation.
///
/// See [`UpdateGlobalTableSettings`](crate::client::fluent_builders::UpdateGlobalTableSettings) for more information about the
/// operation and its arguments.
pub fn update_global_table_settings(
&self,
) -> fluent_builders::UpdateGlobalTableSettings<C, M, R> {
fluent_builders::UpdateGlobalTableSettings::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateItem` operation.
///
/// See [`UpdateItem`](crate::client::fluent_builders::UpdateItem) for more information about the
/// operation and its arguments.
pub fn update_item(&self) -> fluent_builders::UpdateItem<C, M, R> {
fluent_builders::UpdateItem::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateTable` operation.
///
/// See [`UpdateTable`](crate::client::fluent_builders::UpdateTable) for more information about the
/// operation and its arguments.
pub fn update_table(&self) -> fluent_builders::UpdateTable<C, M, R> {
fluent_builders::UpdateTable::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateTableReplicaAutoScaling` operation.
///
/// See [`UpdateTableReplicaAutoScaling`](crate::client::fluent_builders::UpdateTableReplicaAutoScaling) for more information about the
/// operation and its arguments.
pub fn update_table_replica_auto_scaling(
&self,
) -> fluent_builders::UpdateTableReplicaAutoScaling<C, M, R> {
fluent_builders::UpdateTableReplicaAutoScaling::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateTimeToLive` operation.
///
/// See [`UpdateTimeToLive`](crate::client::fluent_builders::UpdateTimeToLive) for more information about the
/// operation and its arguments.
pub fn update_time_to_live(&self) -> fluent_builders::UpdateTimeToLive<C, M, R> {
fluent_builders::UpdateTimeToLive::new(self.handle.clone())
}
}
pub mod fluent_builders {
//!
//! Utilities to ergonomically construct a request to the service.
//!
//! Fluent builders are created through the [`Client`](crate::client::Client) by calling
//! one if its operation methods. After parameters are set using the builder methods,
//! the `send` method can be called to initiate the request.
//!
/// Fluent builder constructing a request to `BatchExecuteStatement`.
///
/// <p>This operation allows you to perform batch reads or writes on data stored in DynamoDB,
/// using PartiQL.</p>
/// <note>
/// <p>The entire batch must consist of either read statements or write statements, you
/// cannot mix both in one batch.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct BatchExecuteStatement<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::batch_execute_statement_input::Builder,
}
impl<C, M, R> BatchExecuteStatement<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `BatchExecuteStatement`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::BatchExecuteStatementOutput,
aws_smithy_http::result::SdkError<crate::error::BatchExecuteStatementError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::BatchExecuteStatementInputOperationOutputAlias,
crate::output::BatchExecuteStatementOutput,
crate::error::BatchExecuteStatementError,
crate::input::BatchExecuteStatementInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Appends an item to `Statements`.
///
/// To override the contents of this collection use [`set_statements`](Self::set_statements).
///
/// <p>The list of PartiQL statements representing the batch to run.</p>
pub fn statements(mut self, inp: impl Into<crate::model::BatchStatementRequest>) -> Self {
self.inner = self.inner.statements(inp);
self
}
/// <p>The list of PartiQL statements representing the batch to run.</p>
pub fn set_statements(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::BatchStatementRequest>>,
) -> Self {
self.inner = self.inner.set_statements(input);
self
}
}
/// Fluent builder constructing a request to `BatchGetItem`.
///
/// <p>The <code>BatchGetItem</code> operation returns the attributes of one or more items
/// from one or more tables. You identify requested items by primary key.</p>
/// <p>A single operation can retrieve up to 16 MB of data, which can contain as many as 100
/// items. <code>BatchGetItem</code> returns a partial result if the response size limit is
/// exceeded, the table's provisioned throughput is exceeded, or an internal processing
/// failure occurs. If a partial result is returned, the operation returns a value for
/// <code>UnprocessedKeys</code>. You can use this value to retry the operation starting
/// with the next item to get.</p>
/// <important>
/// <p>If you request more than 100 items, <code>BatchGetItem</code> returns a
/// <code>ValidationException</code> with the message "Too many items requested for
/// the BatchGetItem call."</p>
/// </important>
/// <p>For example, if you ask to retrieve 100 items, but each individual item is 300 KB in
/// size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns
/// an appropriate <code>UnprocessedKeys</code> value so you can get the next page of
/// results. If desired, your application can include its own logic to assemble the pages of
/// results into one dataset.</p>
/// <p>If <i>none</i> of the items can be processed due to insufficient
/// provisioned throughput on all of the tables in the request, then
/// <code>BatchGetItem</code> returns a
/// <code>ProvisionedThroughputExceededException</code>. If <i>at least
/// one</i> of the items is successfully processed, then
/// <code>BatchGetItem</code> completes successfully, while returning the keys of the
/// unread items in <code>UnprocessedKeys</code>.</p>
/// <important>
/// <p>If DynamoDB returns any unprocessed items, you should retry the batch operation on
/// those items. However, <i>we strongly recommend that you use an exponential
/// backoff algorithm</i>. If you retry the batch operation immediately, the
/// underlying read or write requests can still fail due to throttling on the individual
/// tables. If you delay the batch operation using exponential backoff, the individual
/// requests in the batch are much more likely to succeed.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations">Batch Operations and Error Handling</a> in the <i>Amazon DynamoDB
/// Developer Guide</i>.</p>
/// </important>
/// <p>By default, <code>BatchGetItem</code> performs eventually consistent reads on every
/// table in the request. If you want strongly consistent reads instead, you can set
/// <code>ConsistentRead</code> to <code>true</code> for any or all tables.</p>
/// <p>In order to minimize response latency, <code>BatchGetItem</code> retrieves items in
/// parallel.</p>
/// <p>When designing your application, keep in mind that DynamoDB does not return items in
/// any particular order. To help parse the response by item, include the primary key values
/// for the items in your request in the <code>ProjectionExpression</code> parameter.</p>
/// <p>If a requested item does not exist, it is not returned in the result. Requests for
/// nonexistent items consume the minimum read capacity units according to the type of read.
/// For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations">Working with Tables</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
#[derive(std::fmt::Debug)]
pub struct BatchGetItem<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::batch_get_item_input::Builder,
}
impl<C, M, R> BatchGetItem<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `BatchGetItem`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::BatchGetItemOutput,
aws_smithy_http::result::SdkError<crate::error::BatchGetItemError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::BatchGetItemInputOperationOutputAlias,
crate::output::BatchGetItemOutput,
crate::error::BatchGetItemError,
crate::input::BatchGetItemInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Adds a key-value pair to `RequestItems`.
///
/// To override the contents of this collection use [`set_request_items`](Self::set_request_items).
///
/// <p>A map of one or more table names and, for each table, a map that describes one or more
/// items to retrieve from that table. Each table name can be used only once per
/// <code>BatchGetItem</code> request.</p>
/// <p>Each element in the map of items to retrieve consists of the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>ConsistentRead</code> - If <code>true</code>, a strongly consistent read
/// is used; if <code>false</code> (the default), an eventually consistent read is
/// used.</p>
/// </li>
/// <li>
/// <p>
/// <code>ExpressionAttributeNames</code> - One or more substitution tokens for
/// attribute names in the <code>ProjectionExpression</code> parameter. The
/// following are some use cases for using
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name
/// in an expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being
/// misinterpreted in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to
/// dereference an attribute name. For example, consider the following attribute
/// name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be
/// used directly in an expression. (For the complete list of reserved words, see
/// <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved
/// Words</a> in the <i>Amazon DynamoDB Developer Guide</i>).
/// To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this
/// example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character
/// are <i>expression attribute values</i>, which are placeholders
/// for the actual value at runtime.</p>
/// </note>
/// <p>For more information about expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Accessing Item Attributes</a> in the <i>Amazon DynamoDB
/// Developer Guide</i>.</p>
/// </li>
/// <li>
/// <p>
/// <code>Keys</code> - An array of primary key attribute values that define
/// specific items in the table. For each primary key, you must provide
/// <i>all</i> of the key attributes. For example, with a simple
/// primary key, you only need to provide the partition key value. For a composite
/// key, you must provide <i>both</i> the partition key value and the
/// sort key value.</p>
/// </li>
/// <li>
/// <p>
/// <code>ProjectionExpression</code> - A string that identifies one or more
/// attributes to retrieve from the table. These attributes can include scalars,
/// sets, or elements of a JSON document. The attributes in the expression must be
/// separated by commas.</p>
/// <p>If no attribute names are specified, then all attributes are returned. If any
/// of the requested attributes are not found, they do not appear in the
/// result.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Accessing Item Attributes</a> in the <i>Amazon DynamoDB
/// Developer Guide</i>.</p>
/// </li>
/// <li>
/// <p>
/// <code>AttributesToGet</code> - This is a legacy parameter. Use
/// <code>ProjectionExpression</code> instead. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html">AttributesToGet</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>. </p>
///
/// </li>
/// </ul>
pub fn request_items(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::KeysAndAttributes>,
) -> Self {
self.inner = self.inner.request_items(k, v);
self
}
/// <p>A map of one or more table names and, for each table, a map that describes one or more
/// items to retrieve from that table. Each table name can be used only once per
/// <code>BatchGetItem</code> request.</p>
/// <p>Each element in the map of items to retrieve consists of the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>ConsistentRead</code> - If <code>true</code>, a strongly consistent read
/// is used; if <code>false</code> (the default), an eventually consistent read is
/// used.</p>
/// </li>
/// <li>
/// <p>
/// <code>ExpressionAttributeNames</code> - One or more substitution tokens for
/// attribute names in the <code>ProjectionExpression</code> parameter. The
/// following are some use cases for using
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name
/// in an expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being
/// misinterpreted in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to
/// dereference an attribute name. For example, consider the following attribute
/// name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be
/// used directly in an expression. (For the complete list of reserved words, see
/// <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved
/// Words</a> in the <i>Amazon DynamoDB Developer Guide</i>).
/// To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this
/// example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character
/// are <i>expression attribute values</i>, which are placeholders
/// for the actual value at runtime.</p>
/// </note>
/// <p>For more information about expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Accessing Item Attributes</a> in the <i>Amazon DynamoDB
/// Developer Guide</i>.</p>
/// </li>
/// <li>
/// <p>
/// <code>Keys</code> - An array of primary key attribute values that define
/// specific items in the table. For each primary key, you must provide
/// <i>all</i> of the key attributes. For example, with a simple
/// primary key, you only need to provide the partition key value. For a composite
/// key, you must provide <i>both</i> the partition key value and the
/// sort key value.</p>
/// </li>
/// <li>
/// <p>
/// <code>ProjectionExpression</code> - A string that identifies one or more
/// attributes to retrieve from the table. These attributes can include scalars,
/// sets, or elements of a JSON document. The attributes in the expression must be
/// separated by commas.</p>
/// <p>If no attribute names are specified, then all attributes are returned. If any
/// of the requested attributes are not found, they do not appear in the
/// result.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Accessing Item Attributes</a> in the <i>Amazon DynamoDB
/// Developer Guide</i>.</p>
/// </li>
/// <li>
/// <p>
/// <code>AttributesToGet</code> - This is a legacy parameter. Use
/// <code>ProjectionExpression</code> instead. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html">AttributesToGet</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>. </p>
///
/// </li>
/// </ul>
pub fn set_request_items(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::KeysAndAttributes>,
>,
) -> Self {
self.inner = self.inner.set_request_items(input);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn return_consumed_capacity(
mut self,
inp: crate::model::ReturnConsumedCapacity,
) -> Self {
self.inner = self.inner.return_consumed_capacity(inp);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn set_return_consumed_capacity(
mut self,
input: std::option::Option<crate::model::ReturnConsumedCapacity>,
) -> Self {
self.inner = self.inner.set_return_consumed_capacity(input);
self
}
}
/// Fluent builder constructing a request to `BatchWriteItem`.
///
/// <p>The <code>BatchWriteItem</code> operation puts or deletes multiple items in one or
/// more tables. A single call to <code>BatchWriteItem</code> can write up to 16 MB of data,
/// which can comprise as many as 25 put or delete requests. Individual items to be written
/// can be as large as 400 KB.</p>
/// <note>
/// <p>
/// <code>BatchWriteItem</code> cannot update items. To update items, use the
/// <code>UpdateItem</code> action.</p>
/// </note>
/// <p>The individual <code>PutItem</code> and <code>DeleteItem</code> operations specified
/// in <code>BatchWriteItem</code> are atomic; however <code>BatchWriteItem</code> as a
/// whole is not. If any requested operations fail because the table's provisioned
/// throughput is exceeded or an internal processing failure occurs, the failed operations
/// are returned in the <code>UnprocessedItems</code> response parameter. You can
/// investigate and optionally resend the requests. Typically, you would call
/// <code>BatchWriteItem</code> in a loop. Each iteration would check for unprocessed
/// items and submit a new <code>BatchWriteItem</code> request with those unprocessed items
/// until all items have been processed.</p>
/// <p>If <i>none</i> of the items can be processed due to insufficient
/// provisioned throughput on all of the tables in the request, then
/// <code>BatchWriteItem</code> returns a
/// <code>ProvisionedThroughputExceededException</code>.</p>
/// <important>
/// <p>If DynamoDB returns any unprocessed items, you should retry the batch operation on
/// those items. However, <i>we strongly recommend that you use an exponential
/// backoff algorithm</i>. If you retry the batch operation immediately, the
/// underlying read or write requests can still fail due to throttling on the individual
/// tables. If you delay the batch operation using exponential backoff, the individual
/// requests in the batch are much more likely to succeed.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations">Batch Operations and Error Handling</a> in the <i>Amazon DynamoDB
/// Developer Guide</i>.</p>
/// </important>
///
/// <p>With <code>BatchWriteItem</code>, you can efficiently write or delete large amounts of
/// data, such as from Amazon EMR, or copy data from another database into DynamoDB. In
/// order to improve performance with these large-scale operations,
/// <code>BatchWriteItem</code> does not behave in the same way as individual
/// <code>PutItem</code> and <code>DeleteItem</code> calls would. For example, you
/// cannot specify conditions on individual put and delete requests, and
/// <code>BatchWriteItem</code> does not return deleted items in the response.</p>
/// <p>If you use a programming language that supports concurrency, you can use threads to
/// write items in parallel. Your application must include the necessary logic to manage the
/// threads. With languages that don't support threading, you must update or delete the
/// specified items one at a time. In both situations, <code>BatchWriteItem</code> performs
/// the specified put and delete operations in parallel, giving you the power of the thread
/// pool approach without having to introduce complexity into your application.</p>
/// <p>Parallel processing reduces latency, but each specified put and delete request
/// consumes the same number of write capacity units whether it is processed in parallel or
/// not. Delete operations on nonexistent items consume one write capacity unit.</p>
/// <p>If one or more of the following is true, DynamoDB rejects the entire batch write
/// operation:</p>
/// <ul>
/// <li>
/// <p>One or more tables specified in the <code>BatchWriteItem</code> request does
/// not exist.</p>
/// </li>
/// <li>
/// <p>Primary key attributes specified on an item in the request do not match those
/// in the corresponding table's primary key schema.</p>
/// </li>
/// <li>
/// <p>You try to perform multiple operations on the same item in the same
/// <code>BatchWriteItem</code> request. For example, you cannot put and delete
/// the same item in the same <code>BatchWriteItem</code> request. </p>
/// </li>
/// <li>
/// <p> Your request contains at least two items with identical hash and range keys
/// (which essentially is two put operations). </p>
/// </li>
/// <li>
/// <p>There are more than 25 requests in the batch.</p>
/// </li>
/// <li>
/// <p>Any individual item in a batch exceeds 400 KB.</p>
/// </li>
/// <li>
/// <p>The total request size exceeds 16 MB.</p>
/// </li>
/// </ul>
#[derive(std::fmt::Debug)]
pub struct BatchWriteItem<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::batch_write_item_input::Builder,
}
impl<C, M, R> BatchWriteItem<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `BatchWriteItem`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::BatchWriteItemOutput,
aws_smithy_http::result::SdkError<crate::error::BatchWriteItemError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::BatchWriteItemInputOperationOutputAlias,
crate::output::BatchWriteItemOutput,
crate::error::BatchWriteItemError,
crate::input::BatchWriteItemInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Adds a key-value pair to `RequestItems`.
///
/// To override the contents of this collection use [`set_request_items`](Self::set_request_items).
///
/// <p>A map of one or more table names and, for each table, a list of operations to be
/// performed (<code>DeleteRequest</code> or <code>PutRequest</code>). Each element in the
/// map consists of the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>DeleteRequest</code> - Perform a <code>DeleteItem</code> operation on the
/// specified item. The item to be deleted is identified by a <code>Key</code>
/// subelement:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Key</code> - A map of primary key attribute values that uniquely
/// identify the item. Each entry in this map consists of an attribute name
/// and an attribute value. For each primary key, you must provide
/// <i>all</i> of the key attributes. For example, with a
/// simple primary key, you only need to provide a value for the partition
/// key. For a composite primary key, you must provide values for
/// <i>both</i> the partition key and the sort key.</p>
/// </li>
/// </ul>
/// </li>
/// <li>
/// <p>
/// <code>PutRequest</code> - Perform a <code>PutItem</code> operation on the
/// specified item. The item to be put is identified by an <code>Item</code>
/// subelement:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Item</code> - A map of attributes and their values. Each entry in
/// this map consists of an attribute name and an attribute value. Attribute
/// values must not be null; string and binary type attributes must have
/// lengths greater than zero; and set type attributes must not be empty.
/// Requests that contain empty values are rejected with a
/// <code>ValidationException</code> exception.</p>
/// <p>If you specify any attributes that are part of an index key, then the
/// data types for those attributes must match those of the schema in the
/// table's attribute definition.</p>
/// </li>
/// </ul>
/// </li>
/// </ul>
pub fn request_items(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::vec::Vec<crate::model::WriteRequest>>,
) -> Self {
self.inner = self.inner.request_items(k, v);
self
}
/// <p>A map of one or more table names and, for each table, a list of operations to be
/// performed (<code>DeleteRequest</code> or <code>PutRequest</code>). Each element in the
/// map consists of the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>DeleteRequest</code> - Perform a <code>DeleteItem</code> operation on the
/// specified item. The item to be deleted is identified by a <code>Key</code>
/// subelement:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Key</code> - A map of primary key attribute values that uniquely
/// identify the item. Each entry in this map consists of an attribute name
/// and an attribute value. For each primary key, you must provide
/// <i>all</i> of the key attributes. For example, with a
/// simple primary key, you only need to provide a value for the partition
/// key. For a composite primary key, you must provide values for
/// <i>both</i> the partition key and the sort key.</p>
/// </li>
/// </ul>
/// </li>
/// <li>
/// <p>
/// <code>PutRequest</code> - Perform a <code>PutItem</code> operation on the
/// specified item. The item to be put is identified by an <code>Item</code>
/// subelement:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Item</code> - A map of attributes and their values. Each entry in
/// this map consists of an attribute name and an attribute value. Attribute
/// values must not be null; string and binary type attributes must have
/// lengths greater than zero; and set type attributes must not be empty.
/// Requests that contain empty values are rejected with a
/// <code>ValidationException</code> exception.</p>
/// <p>If you specify any attributes that are part of an index key, then the
/// data types for those attributes must match those of the schema in the
/// table's attribute definition.</p>
/// </li>
/// </ul>
/// </li>
/// </ul>
pub fn set_request_items(
mut self,
input: std::option::Option<
std::collections::HashMap<
std::string::String,
std::vec::Vec<crate::model::WriteRequest>,
>,
>,
) -> Self {
self.inner = self.inner.set_request_items(input);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn return_consumed_capacity(
mut self,
inp: crate::model::ReturnConsumedCapacity,
) -> Self {
self.inner = self.inner.return_consumed_capacity(inp);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn set_return_consumed_capacity(
mut self,
input: std::option::Option<crate::model::ReturnConsumedCapacity>,
) -> Self {
self.inner = self.inner.set_return_consumed_capacity(input);
self
}
/// <p>Determines whether item collection metrics are returned. If set to <code>SIZE</code>,
/// the response includes statistics about item collections, if any, that were modified
/// during the operation are returned in the response. If set to <code>NONE</code> (the
/// default), no statistics are returned.</p>
pub fn return_item_collection_metrics(
mut self,
inp: crate::model::ReturnItemCollectionMetrics,
) -> Self {
self.inner = self.inner.return_item_collection_metrics(inp);
self
}
/// <p>Determines whether item collection metrics are returned. If set to <code>SIZE</code>,
/// the response includes statistics about item collections, if any, that were modified
/// during the operation are returned in the response. If set to <code>NONE</code> (the
/// default), no statistics are returned.</p>
pub fn set_return_item_collection_metrics(
mut self,
input: std::option::Option<crate::model::ReturnItemCollectionMetrics>,
) -> Self {
self.inner = self.inner.set_return_item_collection_metrics(input);
self
}
}
/// Fluent builder constructing a request to `CreateBackup`.
///
/// <p>Creates a backup for an existing table.</p>
/// <p> Each time you create an on-demand backup, the entire table data is backed up. There
/// is no limit to the number of on-demand backups that can be taken. </p>
/// <p> When you create an on-demand backup, a time marker of the request is cataloged, and
/// the backup is created asynchronously, by applying all changes until the time of the
/// request to the last full table snapshot. Backup requests are processed instantaneously
/// and become available for restore within minutes. </p>
/// <p>You can call <code>CreateBackup</code> at a maximum rate of 50 times per
/// second.</p>
/// <p>All backups in DynamoDB work without consuming any provisioned throughput on the
/// table.</p>
/// <p> If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to
/// contain all data committed to the table up to 14:24:00, and data committed after
/// 14:26:00 will not be. The backup might contain data modifications made between 14:24:00
/// and 14:26:00. On-demand backup does not support causal consistency. </p>
/// <p> Along with data, the following are also included on the backups: </p>
/// <ul>
/// <li>
/// <p>Global secondary indexes (GSIs)</p>
/// </li>
/// <li>
/// <p>Local secondary indexes (LSIs)</p>
/// </li>
/// <li>
/// <p>Streams</p>
/// </li>
/// <li>
/// <p>Provisioned read and write capacity</p>
/// </li>
/// </ul>
#[derive(std::fmt::Debug)]
pub struct CreateBackup<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_backup_input::Builder,
}
impl<C, M, R> CreateBackup<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateBackup`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateBackupOutput,
aws_smithy_http::result::SdkError<crate::error::CreateBackupError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateBackupInputOperationOutputAlias,
crate::output::CreateBackupOutput,
crate::error::CreateBackupError,
crate::input::CreateBackupInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// <p>Specified name for the backup.</p>
pub fn backup_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.backup_name(inp);
self
}
/// <p>Specified name for the backup.</p>
pub fn set_backup_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_backup_name(input);
self
}
}
/// Fluent builder constructing a request to `CreateGlobalTable`.
///
/// <p>Creates a global table from an existing table. A global table creates a replication
/// relationship between two or more DynamoDB tables with the same table name in the
/// provided Regions. </p>
/// <note>
/// <p>This operation only applies to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html">Version
/// 2017.11.29</a> of global tables.</p>
/// </note>
///
/// <p>If you want to add a new replica table to a global table, each of the following
/// conditions must be true:</p>
/// <ul>
/// <li>
/// <p>The table must have the same primary key as all of the other replicas.</p>
/// </li>
/// <li>
/// <p>The table must have the same name as all of the other replicas.</p>
/// </li>
/// <li>
/// <p>The table must have DynamoDB Streams enabled, with the stream containing both
/// the new and the old images of the item.</p>
/// </li>
/// <li>
/// <p>None of the replica tables in the global table can contain any data.</p>
/// </li>
/// </ul>
/// <p> If global secondary indexes are specified, then the following conditions must also be
/// met: </p>
/// <ul>
/// <li>
/// <p> The global secondary indexes must have the same name. </p>
/// </li>
/// <li>
/// <p> The global secondary indexes must have the same hash key and sort key (if
/// present). </p>
/// </li>
/// </ul>
/// <p> If local secondary indexes are specified, then the following conditions must also be
/// met: </p>
/// <ul>
/// <li>
/// <p> The local secondary indexes must have the same name. </p>
/// </li>
/// <li>
/// <p> The local secondary indexes must have the same hash key and sort key (if
/// present). </p>
/// </li>
/// </ul>
///
/// <important>
/// <p> Write capacity settings should be set consistently across your replica tables and
/// secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the
/// write capacity settings for all of your global tables replicas and indexes. </p>
/// <p> If you prefer to manage write capacity settings manually, you should provision
/// equal replicated write capacity units to your replica tables. You should also
/// provision equal replicated write capacity units to matching secondary indexes across
/// your global table. </p>
/// </important>
#[derive(std::fmt::Debug)]
pub struct CreateGlobalTable<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_global_table_input::Builder,
}
impl<C, M, R> CreateGlobalTable<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateGlobalTable`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateGlobalTableOutput,
aws_smithy_http::result::SdkError<crate::error::CreateGlobalTableError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateGlobalTableInputOperationOutputAlias,
crate::output::CreateGlobalTableOutput,
crate::error::CreateGlobalTableError,
crate::input::CreateGlobalTableInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The global table name.</p>
pub fn global_table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.global_table_name(inp);
self
}
/// <p>The global table name.</p>
pub fn set_global_table_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_global_table_name(input);
self
}
/// Appends an item to `ReplicationGroup`.
///
/// To override the contents of this collection use [`set_replication_group`](Self::set_replication_group).
///
/// <p>The Regions where the global table needs to be created.</p>
pub fn replication_group(mut self, inp: impl Into<crate::model::Replica>) -> Self {
self.inner = self.inner.replication_group(inp);
self
}
/// <p>The Regions where the global table needs to be created.</p>
pub fn set_replication_group(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Replica>>,
) -> Self {
self.inner = self.inner.set_replication_group(input);
self
}
}
/// Fluent builder constructing a request to `CreateTable`.
///
/// <p>The <code>CreateTable</code> operation adds a new table to your account. In an Amazon Web Services account, table names must be unique within each Region. That is, you can
/// have two tables with same name if you create the tables in different Regions.</p>
/// <p>
/// <code>CreateTable</code> is an asynchronous operation. Upon receiving a
/// <code>CreateTable</code> request, DynamoDB immediately returns a response with a
/// <code>TableStatus</code> of <code>CREATING</code>. After the table is created,
/// DynamoDB sets the <code>TableStatus</code> to <code>ACTIVE</code>. You can perform read
/// and write operations only on an <code>ACTIVE</code> table. </p>
/// <p>You can optionally define secondary indexes on the new table, as part of the
/// <code>CreateTable</code> operation. If you want to create multiple tables with
/// secondary indexes on them, you must create the tables sequentially. Only one table with
/// secondary indexes can be in the <code>CREATING</code> state at any given time.</p>
/// <p>You can use the <code>DescribeTable</code> action to check the table status.</p>
#[derive(std::fmt::Debug)]
pub struct CreateTable<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_table_input::Builder,
}
impl<C, M, R> CreateTable<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateTable`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateTableOutput,
aws_smithy_http::result::SdkError<crate::error::CreateTableError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateTableInputOperationOutputAlias,
crate::output::CreateTableOutput,
crate::error::CreateTableError,
crate::input::CreateTableInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Appends an item to `AttributeDefinitions`.
///
/// To override the contents of this collection use [`set_attribute_definitions`](Self::set_attribute_definitions).
///
/// <p>An array of attributes that describe the key schema for the table and indexes.</p>
pub fn attribute_definitions(
mut self,
inp: impl Into<crate::model::AttributeDefinition>,
) -> Self {
self.inner = self.inner.attribute_definitions(inp);
self
}
/// <p>An array of attributes that describe the key schema for the table and indexes.</p>
pub fn set_attribute_definitions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AttributeDefinition>>,
) -> Self {
self.inner = self.inner.set_attribute_definitions(input);
self
}
/// <p>The name of the table to create.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table to create.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// Appends an item to `KeySchema`.
///
/// To override the contents of this collection use [`set_key_schema`](Self::set_key_schema).
///
/// <p>Specifies the attributes that make up the primary key for a table or an index. The
/// attributes in <code>KeySchema</code> must also be defined in the
/// <code>AttributeDefinitions</code> array. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html">Data
/// Model</a> in the <i>Amazon DynamoDB Developer Guide</i>.</p>
/// <p>Each <code>KeySchemaElement</code> in the array is composed of:</p>
/// <ul>
/// <li>
/// <p>
/// <code>AttributeName</code> - The name of this key attribute.</p>
/// </li>
/// <li>
/// <p>
/// <code>KeyType</code> - The role that the key attribute will assume:</p>
/// <ul>
/// <li>
/// <p>
/// <code>HASH</code> - partition key</p>
/// </li>
/// <li>
/// <p>
/// <code>RANGE</code> - sort key</p>
/// </li>
/// </ul>
/// </li>
/// </ul>
/// <note>
/// <p>The partition key of an item is also known as its <i>hash
/// attribute</i>. The term "hash attribute" derives from the DynamoDB usage
/// of an internal hash function to evenly distribute data items across partitions,
/// based on their partition key values.</p>
/// <p>The sort key of an item is also known as its <i>range attribute</i>.
/// The term "range attribute" derives from the way DynamoDB stores items with the same
/// partition key physically close together, in sorted order by the sort key
/// value.</p>
/// </note>
///
/// <p>For a simple primary key (partition key), you must provide exactly one element with a
/// <code>KeyType</code> of <code>HASH</code>.</p>
/// <p>For a composite primary key (partition key and sort key), you must provide exactly two
/// elements, in this order: The first element must have a <code>KeyType</code> of
/// <code>HASH</code>, and the second element must have a <code>KeyType</code> of
/// <code>RANGE</code>.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key">Working with Tables</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn key_schema(mut self, inp: impl Into<crate::model::KeySchemaElement>) -> Self {
self.inner = self.inner.key_schema(inp);
self
}
/// <p>Specifies the attributes that make up the primary key for a table or an index. The
/// attributes in <code>KeySchema</code> must also be defined in the
/// <code>AttributeDefinitions</code> array. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html">Data
/// Model</a> in the <i>Amazon DynamoDB Developer Guide</i>.</p>
/// <p>Each <code>KeySchemaElement</code> in the array is composed of:</p>
/// <ul>
/// <li>
/// <p>
/// <code>AttributeName</code> - The name of this key attribute.</p>
/// </li>
/// <li>
/// <p>
/// <code>KeyType</code> - The role that the key attribute will assume:</p>
/// <ul>
/// <li>
/// <p>
/// <code>HASH</code> - partition key</p>
/// </li>
/// <li>
/// <p>
/// <code>RANGE</code> - sort key</p>
/// </li>
/// </ul>
/// </li>
/// </ul>
/// <note>
/// <p>The partition key of an item is also known as its <i>hash
/// attribute</i>. The term "hash attribute" derives from the DynamoDB usage
/// of an internal hash function to evenly distribute data items across partitions,
/// based on their partition key values.</p>
/// <p>The sort key of an item is also known as its <i>range attribute</i>.
/// The term "range attribute" derives from the way DynamoDB stores items with the same
/// partition key physically close together, in sorted order by the sort key
/// value.</p>
/// </note>
///
/// <p>For a simple primary key (partition key), you must provide exactly one element with a
/// <code>KeyType</code> of <code>HASH</code>.</p>
/// <p>For a composite primary key (partition key and sort key), you must provide exactly two
/// elements, in this order: The first element must have a <code>KeyType</code> of
/// <code>HASH</code>, and the second element must have a <code>KeyType</code> of
/// <code>RANGE</code>.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key">Working with Tables</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_key_schema(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::KeySchemaElement>>,
) -> Self {
self.inner = self.inner.set_key_schema(input);
self
}
/// Appends an item to `LocalSecondaryIndexes`.
///
/// To override the contents of this collection use [`set_local_secondary_indexes`](Self::set_local_secondary_indexes).
///
/// <p>One or more local secondary indexes (the maximum is 5) to be created on the table.
/// Each index is scoped to a given partition key value. There is a 10 GB size limit per
/// partition key value; otherwise, the size of a local secondary index is
/// unconstrained.</p>
/// <p>Each local secondary index in the array includes the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>IndexName</code> - The name of the local secondary index. Must be unique
/// only for this table.</p>
/// <p></p>
/// </li>
/// <li>
/// <p>
/// <code>KeySchema</code> - Specifies the key schema for the local secondary index.
/// The key schema must begin with the same partition key as the table.</p>
/// </li>
/// <li>
/// <p>
/// <code>Projection</code> - Specifies attributes that are copied (projected) from
/// the table into the index. These are in addition to the primary key attributes
/// and index key attributes, which are automatically projected. Each attribute
/// specification is composed of:</p>
/// <ul>
/// <li>
/// <p>
/// <code>ProjectionType</code> - One of the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>KEYS_ONLY</code> - Only the index and primary keys are
/// projected into the index.</p>
/// </li>
/// <li>
/// <p>
/// <code>INCLUDE</code> - Only the specified table attributes are
/// projected into the index. The list of projected attributes is in
/// <code>NonKeyAttributes</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL</code> - All of the table attributes are projected
/// into the index.</p>
/// </li>
/// </ul>
/// </li>
/// <li>
/// <p>
/// <code>NonKeyAttributes</code> - A list of one or more non-key attribute
/// names that are projected into the secondary index. The total count of
/// attributes provided in <code>NonKeyAttributes</code>, summed across all
/// of the secondary indexes, must not exceed 100. If you project the same
/// attribute into two different indexes, this counts as two distinct
/// attributes when determining the total.</p>
/// </li>
/// </ul>
/// </li>
/// </ul>
pub fn local_secondary_indexes(
mut self,
inp: impl Into<crate::model::LocalSecondaryIndex>,
) -> Self {
self.inner = self.inner.local_secondary_indexes(inp);
self
}
/// <p>One or more local secondary indexes (the maximum is 5) to be created on the table.
/// Each index is scoped to a given partition key value. There is a 10 GB size limit per
/// partition key value; otherwise, the size of a local secondary index is
/// unconstrained.</p>
/// <p>Each local secondary index in the array includes the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>IndexName</code> - The name of the local secondary index. Must be unique
/// only for this table.</p>
/// <p></p>
/// </li>
/// <li>
/// <p>
/// <code>KeySchema</code> - Specifies the key schema for the local secondary index.
/// The key schema must begin with the same partition key as the table.</p>
/// </li>
/// <li>
/// <p>
/// <code>Projection</code> - Specifies attributes that are copied (projected) from
/// the table into the index. These are in addition to the primary key attributes
/// and index key attributes, which are automatically projected. Each attribute
/// specification is composed of:</p>
/// <ul>
/// <li>
/// <p>
/// <code>ProjectionType</code> - One of the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>KEYS_ONLY</code> - Only the index and primary keys are
/// projected into the index.</p>
/// </li>
/// <li>
/// <p>
/// <code>INCLUDE</code> - Only the specified table attributes are
/// projected into the index. The list of projected attributes is in
/// <code>NonKeyAttributes</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL</code> - All of the table attributes are projected
/// into the index.</p>
/// </li>
/// </ul>
/// </li>
/// <li>
/// <p>
/// <code>NonKeyAttributes</code> - A list of one or more non-key attribute
/// names that are projected into the secondary index. The total count of
/// attributes provided in <code>NonKeyAttributes</code>, summed across all
/// of the secondary indexes, must not exceed 100. If you project the same
/// attribute into two different indexes, this counts as two distinct
/// attributes when determining the total.</p>
/// </li>
/// </ul>
/// </li>
/// </ul>
pub fn set_local_secondary_indexes(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::LocalSecondaryIndex>>,
) -> Self {
self.inner = self.inner.set_local_secondary_indexes(input);
self
}
/// Appends an item to `GlobalSecondaryIndexes`.
///
/// To override the contents of this collection use [`set_global_secondary_indexes`](Self::set_global_secondary_indexes).
///
/// <p>One or more global secondary indexes (the maximum is 20) to be created on the table.
/// Each global secondary index in the array includes the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>IndexName</code> - The name of the global secondary index. Must be unique
/// only for this table.</p>
/// <p></p>
/// </li>
/// <li>
/// <p>
/// <code>KeySchema</code> - Specifies the key schema for the global secondary
/// index.</p>
/// </li>
/// <li>
/// <p>
/// <code>Projection</code> - Specifies attributes that are copied (projected) from
/// the table into the index. These are in addition to the primary key attributes
/// and index key attributes, which are automatically projected. Each attribute
/// specification is composed of:</p>
/// <ul>
/// <li>
/// <p>
/// <code>ProjectionType</code> - One of the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>KEYS_ONLY</code> - Only the index and primary keys are
/// projected into the index.</p>
/// </li>
/// <li>
/// <p>
/// <code>INCLUDE</code> - Only the specified table attributes are
/// projected into the index. The list of projected attributes is in
/// <code>NonKeyAttributes</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL</code> - All of the table attributes are projected
/// into the index.</p>
/// </li>
/// </ul>
/// </li>
/// <li>
/// <p>
/// <code>NonKeyAttributes</code> - A list of one or more non-key attribute
/// names that are projected into the secondary index. The total count of
/// attributes provided in <code>NonKeyAttributes</code>, summed across all
/// of the secondary indexes, must not exceed 100. If you project the same
/// attribute into two different indexes, this counts as two distinct
/// attributes when determining the total.</p>
/// </li>
/// </ul>
/// </li>
/// <li>
/// <p>
/// <code>ProvisionedThroughput</code> - The provisioned throughput settings for the
/// global secondary index, consisting of read and write capacity units.</p>
/// </li>
/// </ul>
pub fn global_secondary_indexes(
mut self,
inp: impl Into<crate::model::GlobalSecondaryIndex>,
) -> Self {
self.inner = self.inner.global_secondary_indexes(inp);
self
}
/// <p>One or more global secondary indexes (the maximum is 20) to be created on the table.
/// Each global secondary index in the array includes the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>IndexName</code> - The name of the global secondary index. Must be unique
/// only for this table.</p>
/// <p></p>
/// </li>
/// <li>
/// <p>
/// <code>KeySchema</code> - Specifies the key schema for the global secondary
/// index.</p>
/// </li>
/// <li>
/// <p>
/// <code>Projection</code> - Specifies attributes that are copied (projected) from
/// the table into the index. These are in addition to the primary key attributes
/// and index key attributes, which are automatically projected. Each attribute
/// specification is composed of:</p>
/// <ul>
/// <li>
/// <p>
/// <code>ProjectionType</code> - One of the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>KEYS_ONLY</code> - Only the index and primary keys are
/// projected into the index.</p>
/// </li>
/// <li>
/// <p>
/// <code>INCLUDE</code> - Only the specified table attributes are
/// projected into the index. The list of projected attributes is in
/// <code>NonKeyAttributes</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL</code> - All of the table attributes are projected
/// into the index.</p>
/// </li>
/// </ul>
/// </li>
/// <li>
/// <p>
/// <code>NonKeyAttributes</code> - A list of one or more non-key attribute
/// names that are projected into the secondary index. The total count of
/// attributes provided in <code>NonKeyAttributes</code>, summed across all
/// of the secondary indexes, must not exceed 100. If you project the same
/// attribute into two different indexes, this counts as two distinct
/// attributes when determining the total.</p>
/// </li>
/// </ul>
/// </li>
/// <li>
/// <p>
/// <code>ProvisionedThroughput</code> - The provisioned throughput settings for the
/// global secondary index, consisting of read and write capacity units.</p>
/// </li>
/// </ul>
pub fn set_global_secondary_indexes(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::GlobalSecondaryIndex>>,
) -> Self {
self.inner = self.inner.set_global_secondary_indexes(input);
self
}
/// <p>Controls how you are charged for read and write throughput and how you manage
/// capacity. This setting can be changed later.</p>
/// <ul>
/// <li>
/// <p>
/// <code>PROVISIONED</code> - We recommend using <code>PROVISIONED</code> for
/// predictable workloads. <code>PROVISIONED</code> sets the billing mode to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual">Provisioned Mode</a>.</p>
/// </li>
/// <li>
/// <p>
/// <code>PAY_PER_REQUEST</code> - We recommend using <code>PAY_PER_REQUEST</code>
/// for unpredictable workloads. <code>PAY_PER_REQUEST</code> sets the billing mode
/// to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand">On-Demand Mode</a>. </p>
/// </li>
/// </ul>
pub fn billing_mode(mut self, inp: crate::model::BillingMode) -> Self {
self.inner = self.inner.billing_mode(inp);
self
}
/// <p>Controls how you are charged for read and write throughput and how you manage
/// capacity. This setting can be changed later.</p>
/// <ul>
/// <li>
/// <p>
/// <code>PROVISIONED</code> - We recommend using <code>PROVISIONED</code> for
/// predictable workloads. <code>PROVISIONED</code> sets the billing mode to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual">Provisioned Mode</a>.</p>
/// </li>
/// <li>
/// <p>
/// <code>PAY_PER_REQUEST</code> - We recommend using <code>PAY_PER_REQUEST</code>
/// for unpredictable workloads. <code>PAY_PER_REQUEST</code> sets the billing mode
/// to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand">On-Demand Mode</a>. </p>
/// </li>
/// </ul>
pub fn set_billing_mode(
mut self,
input: std::option::Option<crate::model::BillingMode>,
) -> Self {
self.inner = self.inner.set_billing_mode(input);
self
}
/// <p>Represents the provisioned throughput settings for a specified table or index. The
/// settings can be modified using the <code>UpdateTable</code> operation.</p>
/// <p> If you set BillingMode as <code>PROVISIONED</code>, you must specify this property.
/// If you set BillingMode as <code>PAY_PER_REQUEST</code>, you cannot specify this
/// property.</p>
/// <p>For current minimum and maximum provisioned throughput values, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html">Service,
/// Account, and Table Quotas</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn provisioned_throughput(mut self, inp: crate::model::ProvisionedThroughput) -> Self {
self.inner = self.inner.provisioned_throughput(inp);
self
}
/// <p>Represents the provisioned throughput settings for a specified table or index. The
/// settings can be modified using the <code>UpdateTable</code> operation.</p>
/// <p> If you set BillingMode as <code>PROVISIONED</code>, you must specify this property.
/// If you set BillingMode as <code>PAY_PER_REQUEST</code>, you cannot specify this
/// property.</p>
/// <p>For current minimum and maximum provisioned throughput values, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html">Service,
/// Account, and Table Quotas</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_provisioned_throughput(
mut self,
input: std::option::Option<crate::model::ProvisionedThroughput>,
) -> Self {
self.inner = self.inner.set_provisioned_throughput(input);
self
}
/// <p>The settings for DynamoDB Streams on the table. These settings consist of:</p>
/// <ul>
/// <li>
/// <p>
/// <code>StreamEnabled</code> - Indicates whether DynamoDB Streams is to be enabled
/// (true) or disabled (false).</p>
/// </li>
/// <li>
/// <p>
/// <code>StreamViewType</code> - When an item in the table is modified,
/// <code>StreamViewType</code> determines what information is written to the
/// table's stream. Valid values for <code>StreamViewType</code> are:</p>
/// <ul>
/// <li>
/// <p>
/// <code>KEYS_ONLY</code> - Only the key attributes of the modified item
/// are written to the stream.</p>
/// </li>
/// <li>
/// <p>
/// <code>NEW_IMAGE</code> - The entire item, as it appears after it was
/// modified, is written to the stream.</p>
/// </li>
/// <li>
/// <p>
/// <code>OLD_IMAGE</code> - The entire item, as it appeared before it was
/// modified, is written to the stream.</p>
/// </li>
/// <li>
/// <p>
/// <code>NEW_AND_OLD_IMAGES</code> - Both the new and the old item images
/// of the item are written to the stream.</p>
/// </li>
/// </ul>
/// </li>
/// </ul>
pub fn stream_specification(mut self, inp: crate::model::StreamSpecification) -> Self {
self.inner = self.inner.stream_specification(inp);
self
}
/// <p>The settings for DynamoDB Streams on the table. These settings consist of:</p>
/// <ul>
/// <li>
/// <p>
/// <code>StreamEnabled</code> - Indicates whether DynamoDB Streams is to be enabled
/// (true) or disabled (false).</p>
/// </li>
/// <li>
/// <p>
/// <code>StreamViewType</code> - When an item in the table is modified,
/// <code>StreamViewType</code> determines what information is written to the
/// table's stream. Valid values for <code>StreamViewType</code> are:</p>
/// <ul>
/// <li>
/// <p>
/// <code>KEYS_ONLY</code> - Only the key attributes of the modified item
/// are written to the stream.</p>
/// </li>
/// <li>
/// <p>
/// <code>NEW_IMAGE</code> - The entire item, as it appears after it was
/// modified, is written to the stream.</p>
/// </li>
/// <li>
/// <p>
/// <code>OLD_IMAGE</code> - The entire item, as it appeared before it was
/// modified, is written to the stream.</p>
/// </li>
/// <li>
/// <p>
/// <code>NEW_AND_OLD_IMAGES</code> - Both the new and the old item images
/// of the item are written to the stream.</p>
/// </li>
/// </ul>
/// </li>
/// </ul>
pub fn set_stream_specification(
mut self,
input: std::option::Option<crate::model::StreamSpecification>,
) -> Self {
self.inner = self.inner.set_stream_specification(input);
self
}
/// <p>Represents the settings used to enable server-side encryption.</p>
pub fn sse_specification(mut self, inp: crate::model::SseSpecification) -> Self {
self.inner = self.inner.sse_specification(inp);
self
}
/// <p>Represents the settings used to enable server-side encryption.</p>
pub fn set_sse_specification(
mut self,
input: std::option::Option<crate::model::SseSpecification>,
) -> Self {
self.inner = self.inner.set_sse_specification(input);
self
}
/// Appends an item to `Tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>A list of key-value pairs to label the table. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html">Tagging
/// for DynamoDB</a>.</p>
pub fn tags(mut self, inp: impl Into<crate::model::Tag>) -> Self {
self.inner = self.inner.tags(inp);
self
}
/// <p>A list of key-value pairs to label the table. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html">Tagging
/// for DynamoDB</a>.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
/// Fluent builder constructing a request to `DeleteBackup`.
///
/// <p>Deletes an existing backup of a table.</p>
/// <p>You can call <code>DeleteBackup</code> at a maximum rate of 10 times per
/// second.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteBackup<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_backup_input::Builder,
}
impl<C, M, R> DeleteBackup<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteBackup`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteBackupOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteBackupError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteBackupInputOperationOutputAlias,
crate::output::DeleteBackupOutput,
crate::error::DeleteBackupError,
crate::input::DeleteBackupInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ARN associated with the backup.</p>
pub fn backup_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.backup_arn(inp);
self
}
/// <p>The ARN associated with the backup.</p>
pub fn set_backup_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_backup_arn(input);
self
}
}
/// Fluent builder constructing a request to `DeleteItem`.
///
/// <p>Deletes a single item in a table by primary key. You can perform a conditional delete
/// operation that deletes the item if it exists, or if it has an expected attribute
/// value.</p>
/// <p>In addition to deleting an item, you can also return the item's attribute values in
/// the same operation, using the <code>ReturnValues</code> parameter.</p>
/// <p>Unless you specify conditions, the <code>DeleteItem</code> is an idempotent operation;
/// running it multiple times on the same item or attribute does <i>not</i>
/// result in an error response.</p>
/// <p>Conditional deletes are useful for deleting items only if specific conditions are met.
/// If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not
/// deleted.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteItem<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_item_input::Builder,
}
impl<C, M, R> DeleteItem<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteItem`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteItemOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteItemError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteItemInputOperationOutputAlias,
crate::output::DeleteItemOutput,
crate::error::DeleteItemError,
crate::input::DeleteItemInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table from which to delete the item.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table from which to delete the item.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// Adds a key-value pair to `Key`.
///
/// To override the contents of this collection use [`set_key`](Self::set_key).
///
/// <p>A map of attribute names to <code>AttributeValue</code> objects, representing the
/// primary key of the item to delete.</p>
/// <p>For the primary key, you must provide all of the attributes. For example, with a
/// simple primary key, you only need to provide a value for the partition key. For a
/// composite primary key, you must provide values for both the partition key and the sort
/// key.</p>
pub fn key(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::AttributeValue>,
) -> Self {
self.inner = self.inner.key(k, v);
self
}
/// <p>A map of attribute names to <code>AttributeValue</code> objects, representing the
/// primary key of the item to delete.</p>
/// <p>For the primary key, you must provide all of the attributes. For example, with a
/// simple primary key, you only need to provide a value for the partition key. For a
/// composite primary key, you must provide values for both the partition key and the sort
/// key.</p>
pub fn set_key(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
) -> Self {
self.inner = self.inner.set_key(input);
self
}
/// Adds a key-value pair to `Expected`.
///
/// To override the contents of this collection use [`set_expected`](Self::set_expected).
///
/// <p>This is a legacy parameter. Use <code>ConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html">Expected</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expected(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::ExpectedAttributeValue>,
) -> Self {
self.inner = self.inner.expected(k, v);
self
}
/// <p>This is a legacy parameter. Use <code>ConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html">Expected</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expected(
mut self,
input: std::option::Option<
std::collections::HashMap<
std::string::String,
crate::model::ExpectedAttributeValue,
>,
>,
) -> Self {
self.inner = self.inner.set_expected(input);
self
}
/// <p>This is a legacy parameter. Use <code>ConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html">ConditionalOperator</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn conditional_operator(mut self, inp: crate::model::ConditionalOperator) -> Self {
self.inner = self.inner.conditional_operator(inp);
self
}
/// <p>This is a legacy parameter. Use <code>ConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html">ConditionalOperator</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_conditional_operator(
mut self,
input: std::option::Option<crate::model::ConditionalOperator>,
) -> Self {
self.inner = self.inner.set_conditional_operator(input);
self
}
/// <p>Use <code>ReturnValues</code> if you want to get the item attributes as they appeared
/// before they were deleted. For <code>DeleteItem</code>, the valid values are:</p>
/// <ul>
/// <li>
/// <p>
/// <code>NONE</code> - If <code>ReturnValues</code> is not specified, or if its
/// value is <code>NONE</code>, then nothing is returned. (This setting is the
/// default for <code>ReturnValues</code>.)</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL_OLD</code> - The content of the old item is returned.</p>
/// </li>
/// </ul>
/// <note>
/// <p>The <code>ReturnValues</code> parameter is used by several DynamoDB operations;
/// however, <code>DeleteItem</code> does not recognize any values other than
/// <code>NONE</code> or <code>ALL_OLD</code>.</p>
/// </note>
pub fn return_values(mut self, inp: crate::model::ReturnValue) -> Self {
self.inner = self.inner.return_values(inp);
self
}
/// <p>Use <code>ReturnValues</code> if you want to get the item attributes as they appeared
/// before they were deleted. For <code>DeleteItem</code>, the valid values are:</p>
/// <ul>
/// <li>
/// <p>
/// <code>NONE</code> - If <code>ReturnValues</code> is not specified, or if its
/// value is <code>NONE</code>, then nothing is returned. (This setting is the
/// default for <code>ReturnValues</code>.)</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL_OLD</code> - The content of the old item is returned.</p>
/// </li>
/// </ul>
/// <note>
/// <p>The <code>ReturnValues</code> parameter is used by several DynamoDB operations;
/// however, <code>DeleteItem</code> does not recognize any values other than
/// <code>NONE</code> or <code>ALL_OLD</code>.</p>
/// </note>
pub fn set_return_values(
mut self,
input: std::option::Option<crate::model::ReturnValue>,
) -> Self {
self.inner = self.inner.set_return_values(input);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn return_consumed_capacity(
mut self,
inp: crate::model::ReturnConsumedCapacity,
) -> Self {
self.inner = self.inner.return_consumed_capacity(inp);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn set_return_consumed_capacity(
mut self,
input: std::option::Option<crate::model::ReturnConsumedCapacity>,
) -> Self {
self.inner = self.inner.set_return_consumed_capacity(input);
self
}
/// <p>Determines whether item collection metrics are returned. If set to <code>SIZE</code>,
/// the response includes statistics about item collections, if any, that were modified
/// during the operation are returned in the response. If set to <code>NONE</code> (the
/// default), no statistics are returned.</p>
pub fn return_item_collection_metrics(
mut self,
inp: crate::model::ReturnItemCollectionMetrics,
) -> Self {
self.inner = self.inner.return_item_collection_metrics(inp);
self
}
/// <p>Determines whether item collection metrics are returned. If set to <code>SIZE</code>,
/// the response includes statistics about item collections, if any, that were modified
/// during the operation are returned in the response. If set to <code>NONE</code> (the
/// default), no statistics are returned.</p>
pub fn set_return_item_collection_metrics(
mut self,
input: std::option::Option<crate::model::ReturnItemCollectionMetrics>,
) -> Self {
self.inner = self.inner.set_return_item_collection_metrics(input);
self
}
/// <p>A condition that must be satisfied in order for a conditional <code>DeleteItem</code>
/// to succeed.</p>
/// <p>An expression can contain any of the following:</p>
/// <ul>
/// <li>
/// <p>Functions: <code>attribute_exists | attribute_not_exists | attribute_type |
/// contains | begins_with | size</code>
/// </p>
/// <p>These function names are case-sensitive.</p>
/// </li>
/// <li>
/// <p>Comparison operators: <code>= | <> |
/// < | > | <= | >= |
/// BETWEEN | IN </code>
/// </p>
/// </li>
/// <li>
/// <p> Logical operators: <code>AND | OR | NOT</code>
/// </p>
/// </li>
/// </ul>
/// <p>For more information about condition expressions, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Condition Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn condition_expression(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.condition_expression(inp);
self
}
/// <p>A condition that must be satisfied in order for a conditional <code>DeleteItem</code>
/// to succeed.</p>
/// <p>An expression can contain any of the following:</p>
/// <ul>
/// <li>
/// <p>Functions: <code>attribute_exists | attribute_not_exists | attribute_type |
/// contains | begins_with | size</code>
/// </p>
/// <p>These function names are case-sensitive.</p>
/// </li>
/// <li>
/// <p>Comparison operators: <code>= | <> |
/// < | > | <= | >= |
/// BETWEEN | IN </code>
/// </p>
/// </li>
/// <li>
/// <p> Logical operators: <code>AND | OR | NOT</code>
/// </p>
/// </li>
/// </ul>
/// <p>For more information about condition expressions, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Condition Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_condition_expression(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_condition_expression(input);
self
}
/// Adds a key-value pair to `ExpressionAttributeNames`.
///
/// To override the contents of this collection use [`set_expression_attribute_names`](Self::set_expression_attribute_names).
///
/// <p>One or more substitution tokens for attribute names in an expression. The following
/// are some use cases for using <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name in an
/// expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being misinterpreted
/// in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to dereference
/// an attribute name. For example, consider the following attribute name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be used
/// directly in an expression. (For the complete list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>). To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character are
/// <i>expression attribute values</i>, which are placeholders for the
/// actual value at runtime.</p>
/// </note>
/// <p>For more information on expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expression_attribute_names(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.expression_attribute_names(k, v);
self
}
/// <p>One or more substitution tokens for attribute names in an expression. The following
/// are some use cases for using <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name in an
/// expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being misinterpreted
/// in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to dereference
/// an attribute name. For example, consider the following attribute name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be used
/// directly in an expression. (For the complete list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>). To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character are
/// <i>expression attribute values</i>, which are placeholders for the
/// actual value at runtime.</p>
/// </note>
/// <p>For more information on expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expression_attribute_names(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_expression_attribute_names(input);
self
}
/// Adds a key-value pair to `ExpressionAttributeValues`.
///
/// To override the contents of this collection use [`set_expression_attribute_values`](Self::set_expression_attribute_values).
///
/// <p>One or more values that can be substituted in an expression.</p>
/// <p>Use the <b>:</b> (colon) character in an expression to
/// dereference an attribute value. For example, suppose that you wanted to check whether
/// the value of the <i>ProductStatus</i> attribute was one of the following: </p>
/// <p>
/// <code>Available | Backordered | Discontinued</code>
/// </p>
/// <p>You would first need to specify <code>ExpressionAttributeValues</code> as
/// follows:</p>
/// <p>
/// <code>{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
/// ":disc":{"S":"Discontinued"} }</code>
/// </p>
/// <p>You could then use these values in an expression, such as this:</p>
/// <p>
/// <code>ProductStatus IN (:avail, :back, :disc)</code>
/// </p>
/// <p>For more information on expression attribute values, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Condition Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expression_attribute_values(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::AttributeValue>,
) -> Self {
self.inner = self.inner.expression_attribute_values(k, v);
self
}
/// <p>One or more values that can be substituted in an expression.</p>
/// <p>Use the <b>:</b> (colon) character in an expression to
/// dereference an attribute value. For example, suppose that you wanted to check whether
/// the value of the <i>ProductStatus</i> attribute was one of the following: </p>
/// <p>
/// <code>Available | Backordered | Discontinued</code>
/// </p>
/// <p>You would first need to specify <code>ExpressionAttributeValues</code> as
/// follows:</p>
/// <p>
/// <code>{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
/// ":disc":{"S":"Discontinued"} }</code>
/// </p>
/// <p>You could then use these values in an expression, such as this:</p>
/// <p>
/// <code>ProductStatus IN (:avail, :back, :disc)</code>
/// </p>
/// <p>For more information on expression attribute values, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Condition Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expression_attribute_values(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
) -> Self {
self.inner = self.inner.set_expression_attribute_values(input);
self
}
}
/// Fluent builder constructing a request to `DeleteTable`.
///
/// <p>The <code>DeleteTable</code> operation deletes a table and all of its items. After a
/// <code>DeleteTable</code> request, the specified table is in the
/// <code>DELETING</code> state until DynamoDB completes the deletion. If the table is
/// in the <code>ACTIVE</code> state, you can delete it. If a table is in
/// <code>CREATING</code> or <code>UPDATING</code> states, then DynamoDB returns a
/// <code>ResourceInUseException</code>. If the specified table does not exist, DynamoDB
/// returns a <code>ResourceNotFoundException</code>. If table is already in the
/// <code>DELETING</code> state, no error is returned. </p>
/// <note>
/// <p>DynamoDB might continue to accept data read and write operations, such as
/// <code>GetItem</code> and <code>PutItem</code>, on a table in the
/// <code>DELETING</code> state until the table deletion is complete.</p>
/// </note>
/// <p>When you delete a table, any indexes on that table are also deleted.</p>
/// <p>If you have DynamoDB Streams enabled on the table, then the corresponding stream on
/// that table goes into the <code>DISABLED</code> state, and the stream is automatically
/// deleted after 24 hours.</p>
///
/// <p>Use the <code>DescribeTable</code> action to check the status of the table. </p>
#[derive(std::fmt::Debug)]
pub struct DeleteTable<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_table_input::Builder,
}
impl<C, M, R> DeleteTable<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteTable`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteTableOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteTableError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteTableInputOperationOutputAlias,
crate::output::DeleteTableOutput,
crate::error::DeleteTableError,
crate::input::DeleteTableInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table to delete.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table to delete.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeBackup`.
///
/// <p>Describes an existing backup of a table.</p>
/// <p>You can call <code>DescribeBackup</code> at a maximum rate of 10 times per
/// second.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeBackup<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_backup_input::Builder,
}
impl<C, M, R> DescribeBackup<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeBackup`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeBackupOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeBackupError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeBackupInputOperationOutputAlias,
crate::output::DescribeBackupOutput,
crate::error::DescribeBackupError,
crate::input::DescribeBackupInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) associated with the backup.</p>
pub fn backup_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.backup_arn(inp);
self
}
/// <p>The Amazon Resource Name (ARN) associated with the backup.</p>
pub fn set_backup_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_backup_arn(input);
self
}
}
/// Fluent builder constructing a request to `DescribeContinuousBackups`.
///
/// <p>Checks the status of continuous backups and point in time recovery on the specified
/// table. Continuous backups are <code>ENABLED</code> on all tables at table creation. If
/// point in time recovery is enabled, <code>PointInTimeRecoveryStatus</code> will be set to
/// ENABLED.</p>
/// <p> After continuous backups and point in time recovery are enabled, you can restore to
/// any point in time within <code>EarliestRestorableDateTime</code> and
/// <code>LatestRestorableDateTime</code>. </p>
/// <p>
/// <code>LatestRestorableDateTime</code> is typically 5 minutes before the current time.
/// You can restore your table to any point in time during the last 35 days. </p>
/// <p>You can call <code>DescribeContinuousBackups</code> at a maximum rate of 10 times per
/// second.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeContinuousBackups<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_continuous_backups_input::Builder,
}
impl<C, M, R> DescribeContinuousBackups<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeContinuousBackups`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeContinuousBackupsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeContinuousBackupsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeContinuousBackupsInputOperationOutputAlias,
crate::output::DescribeContinuousBackupsOutput,
crate::error::DescribeContinuousBackupsError,
crate::input::DescribeContinuousBackupsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>Name of the table for which the customer wants to check the continuous backups and
/// point in time recovery settings.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>Name of the table for which the customer wants to check the continuous backups and
/// point in time recovery settings.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeContributorInsights`.
///
/// <p>Returns information about contributor insights, for a given table or global secondary
/// index.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeContributorInsights<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_contributor_insights_input::Builder,
}
impl<C, M, R> DescribeContributorInsights<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeContributorInsights`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeContributorInsightsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeContributorInsightsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeContributorInsightsInputOperationOutputAlias,
crate::output::DescribeContributorInsightsOutput,
crate::error::DescribeContributorInsightsError,
crate::input::DescribeContributorInsightsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table to describe.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table to describe.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// <p>The name of the global secondary index to describe, if applicable.</p>
pub fn index_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.index_name(inp);
self
}
/// <p>The name of the global secondary index to describe, if applicable.</p>
pub fn set_index_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_index_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeEndpoints`.
///
/// <p>Returns the regional endpoint information.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeEndpoints<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_endpoints_input::Builder,
}
impl<C, M, R> DescribeEndpoints<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeEndpoints`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeEndpointsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeEndpointsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeEndpointsInputOperationOutputAlias,
crate::output::DescribeEndpointsOutput,
crate::error::DescribeEndpointsError,
crate::input::DescribeEndpointsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
}
/// Fluent builder constructing a request to `DescribeExport`.
///
/// <p>Describes an existing table export.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeExport<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_export_input::Builder,
}
impl<C, M, R> DescribeExport<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeExport`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeExportOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeExportError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeExportInputOperationOutputAlias,
crate::output::DescribeExportOutput,
crate::error::DescribeExportError,
crate::input::DescribeExportInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) associated with the export.</p>
pub fn export_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.export_arn(inp);
self
}
/// <p>The Amazon Resource Name (ARN) associated with the export.</p>
pub fn set_export_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_export_arn(input);
self
}
}
/// Fluent builder constructing a request to `DescribeGlobalTable`.
///
/// <p>Returns information about the specified global table.</p>
/// <note>
/// <p>This operation only applies to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html">Version
/// 2017.11.29</a> of global tables. If you are using global tables <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html">Version
/// 2019.11.21</a> you can use <a href="https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DescribeTable.html">DescribeTable</a> instead.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct DescribeGlobalTable<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_global_table_input::Builder,
}
impl<C, M, R> DescribeGlobalTable<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeGlobalTable`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeGlobalTableOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeGlobalTableError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeGlobalTableInputOperationOutputAlias,
crate::output::DescribeGlobalTableOutput,
crate::error::DescribeGlobalTableError,
crate::input::DescribeGlobalTableInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the global table.</p>
pub fn global_table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.global_table_name(inp);
self
}
/// <p>The name of the global table.</p>
pub fn set_global_table_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_global_table_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeGlobalTableSettings`.
///
/// <p>Describes Region-specific settings for a global table.</p>
/// <note>
/// <p>This operation only applies to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html">Version
/// 2017.11.29</a> of global tables.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct DescribeGlobalTableSettings<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_global_table_settings_input::Builder,
}
impl<C, M, R> DescribeGlobalTableSettings<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeGlobalTableSettings`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeGlobalTableSettingsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeGlobalTableSettingsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeGlobalTableSettingsInputOperationOutputAlias,
crate::output::DescribeGlobalTableSettingsOutput,
crate::error::DescribeGlobalTableSettingsError,
crate::input::DescribeGlobalTableSettingsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the global table to describe.</p>
pub fn global_table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.global_table_name(inp);
self
}
/// <p>The name of the global table to describe.</p>
pub fn set_global_table_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_global_table_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeKinesisStreamingDestination`.
///
/// <p>Returns information about the status of Kinesis streaming.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeKinesisStreamingDestination<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_kinesis_streaming_destination_input::Builder,
}
impl<C, M, R> DescribeKinesisStreamingDestination<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeKinesisStreamingDestination`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeKinesisStreamingDestinationOutput,
aws_smithy_http::result::SdkError<
crate::error::DescribeKinesisStreamingDestinationError,
>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeKinesisStreamingDestinationInputOperationOutputAlias,
crate::output::DescribeKinesisStreamingDestinationOutput,
crate::error::DescribeKinesisStreamingDestinationError,
crate::input::DescribeKinesisStreamingDestinationInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table being described.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table being described.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeLimits`.
///
/// <p>Returns the current provisioned-capacity quotas for your Amazon Web Services account in
/// a Region, both for the Region as a whole and for any one DynamoDB table that you create
/// there.</p>
/// <p>When you establish an Amazon Web Services account, the account has initial quotas on
/// the maximum read capacity units and write capacity units that you can provision across
/// all of your DynamoDB tables in a given Region. Also, there are per-table
/// quotas that apply when you create a table there. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html">Service,
/// Account, and Table Quotas</a> page in the <i>Amazon DynamoDB
/// Developer Guide</i>.</p>
///
/// <p>Although you can increase these quotas by filing a case at <a href="https://console.aws.amazon.com/support/home#/">Amazon Web Services Support Center</a>, obtaining the
/// increase is not instantaneous. The <code>DescribeLimits</code> action lets you write
/// code to compare the capacity you are currently using to those quotas imposed by your
/// account so that you have enough time to apply for an increase before you hit a
/// quota.</p>
///
/// <p>For example, you could use one of the Amazon Web Services SDKs to do the
/// following:</p>
///
/// <ol>
/// <li>
/// <p>Call <code>DescribeLimits</code> for a particular Region to obtain your
/// current account quotas on provisioned capacity there.</p>
/// </li>
/// <li>
/// <p>Create a variable to hold the aggregate read capacity units provisioned for
/// all your tables in that Region, and one to hold the aggregate write capacity
/// units. Zero them both.</p>
/// </li>
/// <li>
/// <p>Call <code>ListTables</code> to obtain a list of all your DynamoDB
/// tables.</p>
/// </li>
/// <li>
/// <p>For each table name listed by <code>ListTables</code>, do the
/// following:</p>
/// <ul>
/// <li>
/// <p>Call <code>DescribeTable</code> with the table name.</p>
/// </li>
/// <li>
/// <p>Use the data returned by <code>DescribeTable</code> to add the read
/// capacity units and write capacity units provisioned for the table itself
/// to your variables.</p>
/// </li>
/// <li>
/// <p>If the table has one or more global secondary indexes (GSIs), loop
/// over these GSIs and add their provisioned capacity values to your
/// variables as well.</p>
/// </li>
/// </ul>
/// </li>
/// <li>
/// <p>Report the account quotas for that Region returned by
/// <code>DescribeLimits</code>, along with the total current provisioned
/// capacity levels you have calculated.</p>
/// </li>
/// </ol>
///
/// <p>This will let you see whether you are getting close to your account-level
/// quotas.</p>
/// <p>The per-table quotas apply only when you are creating a new table. They restrict the
/// sum of the provisioned capacity of the new table itself and all its global secondary
/// indexes.</p>
/// <p>For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned
/// capacity extremely rapidly, but the only quota that applies is that the aggregate
/// provisioned capacity over all your tables and GSIs cannot exceed either of the
/// per-account quotas.</p>
/// <note>
/// <p>
/// <code>DescribeLimits</code> should only be called periodically. You can expect
/// throttling errors if you call it more than once in a minute.</p>
/// </note>
/// <p>The <code>DescribeLimits</code> Request element has no content.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeLimits<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_limits_input::Builder,
}
impl<C, M, R> DescribeLimits<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeLimits`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeLimitsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeLimitsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeLimitsInputOperationOutputAlias,
crate::output::DescribeLimitsOutput,
crate::error::DescribeLimitsError,
crate::input::DescribeLimitsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
}
/// Fluent builder constructing a request to `DescribeTable`.
///
/// <p>Returns information about the table, including the current status of the table, when
/// it was created, the primary key schema, and any indexes on the table.</p>
/// <note>
/// <p>If you issue a <code>DescribeTable</code> request immediately after a
/// <code>CreateTable</code> request, DynamoDB might return a
/// <code>ResourceNotFoundException</code>. This is because
/// <code>DescribeTable</code> uses an eventually consistent query, and the metadata
/// for your table might not be available at that moment. Wait for a few seconds, and
/// then try the <code>DescribeTable</code> request again.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct DescribeTable<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_table_input::Builder,
}
impl<C, M, R> DescribeTable<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeTable`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeTableOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeTableError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeTableInputOperationOutputAlias,
crate::output::DescribeTableOutput,
crate::error::DescribeTableError,
crate::input::DescribeTableInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table to describe.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table to describe.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeTableReplicaAutoScaling`.
///
/// <p>Describes auto scaling settings across replicas of the global table at once.</p>
/// <note>
/// <p>This operation only applies to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html">Version
/// 2019.11.21</a> of global tables.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct DescribeTableReplicaAutoScaling<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_table_replica_auto_scaling_input::Builder,
}
impl<C, M, R> DescribeTableReplicaAutoScaling<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeTableReplicaAutoScaling`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeTableReplicaAutoScalingOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeTableReplicaAutoScalingError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeTableReplicaAutoScalingInputOperationOutputAlias,
crate::output::DescribeTableReplicaAutoScalingOutput,
crate::error::DescribeTableReplicaAutoScalingError,
crate::input::DescribeTableReplicaAutoScalingInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeTimeToLive`.
///
/// <p>Gives a description of the Time to Live (TTL) status on the specified table. </p>
#[derive(std::fmt::Debug)]
pub struct DescribeTimeToLive<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_time_to_live_input::Builder,
}
impl<C, M, R> DescribeTimeToLive<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeTimeToLive`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeTimeToLiveOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeTimeToLiveError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeTimeToLiveInputOperationOutputAlias,
crate::output::DescribeTimeToLiveOutput,
crate::error::DescribeTimeToLiveError,
crate::input::DescribeTimeToLiveInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table to be described.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table to be described.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
}
/// Fluent builder constructing a request to `DisableKinesisStreamingDestination`.
///
/// <p>Stops replication from the DynamoDB table to the Kinesis data stream. This is done
/// without deleting either of the resources.</p>
#[derive(std::fmt::Debug)]
pub struct DisableKinesisStreamingDestination<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::disable_kinesis_streaming_destination_input::Builder,
}
impl<C, M, R> DisableKinesisStreamingDestination<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DisableKinesisStreamingDestination`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DisableKinesisStreamingDestinationOutput,
aws_smithy_http::result::SdkError<
crate::error::DisableKinesisStreamingDestinationError,
>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DisableKinesisStreamingDestinationInputOperationOutputAlias,
crate::output::DisableKinesisStreamingDestinationOutput,
crate::error::DisableKinesisStreamingDestinationError,
crate::input::DisableKinesisStreamingDestinationInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the DynamoDB table.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the DynamoDB table.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// <p>The ARN for a Kinesis data stream.</p>
pub fn stream_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.stream_arn(inp);
self
}
/// <p>The ARN for a Kinesis data stream.</p>
pub fn set_stream_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_stream_arn(input);
self
}
}
/// Fluent builder constructing a request to `EnableKinesisStreamingDestination`.
///
/// <p>Starts table data replication to the specified Kinesis data stream at a timestamp
/// chosen during the enable workflow. If this operation doesn't return results immediately,
/// use DescribeKinesisStreamingDestination to check if streaming to the Kinesis data stream
/// is ACTIVE.</p>
#[derive(std::fmt::Debug)]
pub struct EnableKinesisStreamingDestination<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::enable_kinesis_streaming_destination_input::Builder,
}
impl<C, M, R> EnableKinesisStreamingDestination<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `EnableKinesisStreamingDestination`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::EnableKinesisStreamingDestinationOutput,
aws_smithy_http::result::SdkError<crate::error::EnableKinesisStreamingDestinationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::EnableKinesisStreamingDestinationInputOperationOutputAlias,
crate::output::EnableKinesisStreamingDestinationOutput,
crate::error::EnableKinesisStreamingDestinationError,
crate::input::EnableKinesisStreamingDestinationInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the DynamoDB table.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the DynamoDB table.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// <p>The ARN for a Kinesis data stream.</p>
pub fn stream_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.stream_arn(inp);
self
}
/// <p>The ARN for a Kinesis data stream.</p>
pub fn set_stream_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_stream_arn(input);
self
}
}
/// Fluent builder constructing a request to `ExecuteStatement`.
///
/// <p>This operation allows you to perform reads and singleton writes on data stored in
/// DynamoDB, using PartiQL.</p>
#[derive(std::fmt::Debug)]
pub struct ExecuteStatement<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::execute_statement_input::Builder,
}
impl<C, M, R> ExecuteStatement<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ExecuteStatement`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ExecuteStatementOutput,
aws_smithy_http::result::SdkError<crate::error::ExecuteStatementError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ExecuteStatementInputOperationOutputAlias,
crate::output::ExecuteStatementOutput,
crate::error::ExecuteStatementError,
crate::input::ExecuteStatementInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The PartiQL statement representing the operation to run.</p>
pub fn statement(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.statement(inp);
self
}
/// <p>The PartiQL statement representing the operation to run.</p>
pub fn set_statement(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_statement(input);
self
}
/// Appends an item to `Parameters`.
///
/// To override the contents of this collection use [`set_parameters`](Self::set_parameters).
///
/// <p>The parameters for the PartiQL statement, if any.</p>
pub fn parameters(mut self, inp: impl Into<crate::model::AttributeValue>) -> Self {
self.inner = self.inner.parameters(inp);
self
}
/// <p>The parameters for the PartiQL statement, if any.</p>
pub fn set_parameters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AttributeValue>>,
) -> Self {
self.inner = self.inner.set_parameters(input);
self
}
/// <p>The consistency of a read operation. If set to <code>true</code>, then a strongly
/// consistent read is used; otherwise, an eventually consistent read is used.</p>
pub fn consistent_read(mut self, inp: bool) -> Self {
self.inner = self.inner.consistent_read(inp);
self
}
/// <p>The consistency of a read operation. If set to <code>true</code>, then a strongly
/// consistent read is used; otherwise, an eventually consistent read is used.</p>
pub fn set_consistent_read(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_consistent_read(input);
self
}
/// <p>Set this value to get remaining results, if <code>NextToken</code> was returned in the
/// statement response.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>Set this value to get remaining results, if <code>NextToken</code> was returned in the
/// statement response.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ExecuteTransaction`.
///
/// <p>This operation allows you to perform transactional reads or writes on data stored in
/// DynamoDB, using PartiQL.</p>
/// <note>
/// <p>The entire transaction must consist of either read statements or write statements,
/// you cannot mix both in one transaction. The EXISTS function is an exception and can
/// be used to check the condition of specific attributes of the item in a similar
/// manner to <code>ConditionCheck</code> in the <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis.html#transaction-apis-txwriteitems">TransactWriteItems</a> API.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct ExecuteTransaction<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::execute_transaction_input::Builder,
}
impl<C, M, R> ExecuteTransaction<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ExecuteTransaction`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ExecuteTransactionOutput,
aws_smithy_http::result::SdkError<crate::error::ExecuteTransactionError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ExecuteTransactionInputOperationOutputAlias,
crate::output::ExecuteTransactionOutput,
crate::error::ExecuteTransactionError,
crate::input::ExecuteTransactionInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Appends an item to `TransactStatements`.
///
/// To override the contents of this collection use [`set_transact_statements`](Self::set_transact_statements).
///
/// <p>The list of PartiQL statements representing the transaction to run.</p>
pub fn transact_statements(
mut self,
inp: impl Into<crate::model::ParameterizedStatement>,
) -> Self {
self.inner = self.inner.transact_statements(inp);
self
}
/// <p>The list of PartiQL statements representing the transaction to run.</p>
pub fn set_transact_statements(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ParameterizedStatement>>,
) -> Self {
self.inner = self.inner.set_transact_statements(input);
self
}
/// <p>Set this value to get remaining results, if <code>NextToken</code> was returned in the
/// statement response.</p>
pub fn client_request_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.client_request_token(inp);
self
}
/// <p>Set this value to get remaining results, if <code>NextToken</code> was returned in the
/// statement response.</p>
pub fn set_client_request_token(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_client_request_token(input);
self
}
}
/// Fluent builder constructing a request to `ExportTableToPointInTime`.
///
/// <p>Exports table data to an S3 bucket. The table must have point in time recovery
/// enabled, and you can export data from any time within the point in time recovery
/// window.</p>
#[derive(std::fmt::Debug)]
pub struct ExportTableToPointInTime<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::export_table_to_point_in_time_input::Builder,
}
impl<C, M, R> ExportTableToPointInTime<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ExportTableToPointInTime`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ExportTableToPointInTimeOutput,
aws_smithy_http::result::SdkError<crate::error::ExportTableToPointInTimeError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ExportTableToPointInTimeInputOperationOutputAlias,
crate::output::ExportTableToPointInTimeOutput,
crate::error::ExportTableToPointInTimeError,
crate::input::ExportTableToPointInTimeInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) associated with the table to export.</p>
pub fn table_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_arn(inp);
self
}
/// <p>The Amazon Resource Name (ARN) associated with the table to export.</p>
pub fn set_table_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_arn(input);
self
}
/// <p>Time in the past from which to export table data. The table export will be a snapshot
/// of the table's state at this point in time.</p>
pub fn export_time(mut self, inp: aws_smithy_types::DateTime) -> Self {
self.inner = self.inner.export_time(inp);
self
}
/// <p>Time in the past from which to export table data. The table export will be a snapshot
/// of the table's state at this point in time.</p>
pub fn set_export_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.inner = self.inner.set_export_time(input);
self
}
/// <p>Providing a <code>ClientToken</code> makes the call to
/// <code>ExportTableToPointInTimeInput</code> idempotent, meaning that multiple
/// identical calls have the same effect as one single call.</p>
/// <p>A client token is valid for 8 hours after the first request that uses it is completed.
/// After 8 hours, any request with the same client token is treated as a new request. Do
/// not resubmit the same request with the same client token for more than 8 hours, or the
/// result might not be idempotent.</p>
/// <p>If you submit a request with the same client token but a change in other parameters
/// within the 8-hour idempotency window, DynamoDB returns an
/// <code>IdempotentParameterMismatch</code> exception.</p>
pub fn client_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.client_token(inp);
self
}
/// <p>Providing a <code>ClientToken</code> makes the call to
/// <code>ExportTableToPointInTimeInput</code> idempotent, meaning that multiple
/// identical calls have the same effect as one single call.</p>
/// <p>A client token is valid for 8 hours after the first request that uses it is completed.
/// After 8 hours, any request with the same client token is treated as a new request. Do
/// not resubmit the same request with the same client token for more than 8 hours, or the
/// result might not be idempotent.</p>
/// <p>If you submit a request with the same client token but a change in other parameters
/// within the 8-hour idempotency window, DynamoDB returns an
/// <code>IdempotentParameterMismatch</code> exception.</p>
pub fn set_client_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_client_token(input);
self
}
/// <p>The name of the Amazon S3 bucket to export the snapshot to.</p>
pub fn s3_bucket(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.s3_bucket(inp);
self
}
/// <p>The name of the Amazon S3 bucket to export the snapshot to.</p>
pub fn set_s3_bucket(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_s3_bucket(input);
self
}
/// <p>The ID of the Amazon Web Services account that owns the bucket the export will be
/// stored in.</p>
pub fn s3_bucket_owner(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.s3_bucket_owner(inp);
self
}
/// <p>The ID of the Amazon Web Services account that owns the bucket the export will be
/// stored in.</p>
pub fn set_s3_bucket_owner(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_s3_bucket_owner(input);
self
}
/// <p>The Amazon S3 bucket prefix to use as the file name and path of the exported
/// snapshot.</p>
pub fn s3_prefix(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.s3_prefix(inp);
self
}
/// <p>The Amazon S3 bucket prefix to use as the file name and path of the exported
/// snapshot.</p>
pub fn set_s3_prefix(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_s3_prefix(input);
self
}
/// <p>Type of encryption used on the bucket where export data will be stored. Valid values
/// for <code>S3SseAlgorithm</code> are:</p>
/// <ul>
/// <li>
/// <p>
/// <code>AES256</code> - server-side encryption with Amazon S3 managed
/// keys</p>
/// </li>
/// <li>
/// <p>
/// <code>KMS</code> - server-side encryption with KMS managed
/// keys</p>
/// </li>
/// </ul>
pub fn s3_sse_algorithm(mut self, inp: crate::model::S3SseAlgorithm) -> Self {
self.inner = self.inner.s3_sse_algorithm(inp);
self
}
/// <p>Type of encryption used on the bucket where export data will be stored. Valid values
/// for <code>S3SseAlgorithm</code> are:</p>
/// <ul>
/// <li>
/// <p>
/// <code>AES256</code> - server-side encryption with Amazon S3 managed
/// keys</p>
/// </li>
/// <li>
/// <p>
/// <code>KMS</code> - server-side encryption with KMS managed
/// keys</p>
/// </li>
/// </ul>
pub fn set_s3_sse_algorithm(
mut self,
input: std::option::Option<crate::model::S3SseAlgorithm>,
) -> Self {
self.inner = self.inner.set_s3_sse_algorithm(input);
self
}
/// <p>The ID of the KMS managed key used to encrypt the S3 bucket where
/// export data will be stored (if applicable).</p>
pub fn s3_sse_kms_key_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.s3_sse_kms_key_id(inp);
self
}
/// <p>The ID of the KMS managed key used to encrypt the S3 bucket where
/// export data will be stored (if applicable).</p>
pub fn set_s3_sse_kms_key_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_s3_sse_kms_key_id(input);
self
}
/// <p>The format for the exported data. Valid values for <code>ExportFormat</code> are
/// <code>DYNAMODB_JSON</code> or <code>ION</code>.</p>
pub fn export_format(mut self, inp: crate::model::ExportFormat) -> Self {
self.inner = self.inner.export_format(inp);
self
}
/// <p>The format for the exported data. Valid values for <code>ExportFormat</code> are
/// <code>DYNAMODB_JSON</code> or <code>ION</code>.</p>
pub fn set_export_format(
mut self,
input: std::option::Option<crate::model::ExportFormat>,
) -> Self {
self.inner = self.inner.set_export_format(input);
self
}
}
/// Fluent builder constructing a request to `GetItem`.
///
/// <p>The <code>GetItem</code> operation returns a set of attributes for the item with the
/// given primary key. If there is no matching item, <code>GetItem</code> does not return
/// any data and there will be no <code>Item</code> element in the response.</p>
/// <p>
/// <code>GetItem</code> provides an eventually consistent read by default. If your
/// application requires a strongly consistent read, set <code>ConsistentRead</code> to
/// <code>true</code>. Although a strongly consistent read might take more time than an
/// eventually consistent read, it always returns the last updated value.</p>
#[derive(std::fmt::Debug)]
pub struct GetItem<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_item_input::Builder,
}
impl<C, M, R> GetItem<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetItem`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetItemOutput,
aws_smithy_http::result::SdkError<crate::error::GetItemError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetItemInputOperationOutputAlias,
crate::output::GetItemOutput,
crate::error::GetItemError,
crate::input::GetItemInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table containing the requested item.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table containing the requested item.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// Adds a key-value pair to `Key`.
///
/// To override the contents of this collection use [`set_key`](Self::set_key).
///
/// <p>A map of attribute names to <code>AttributeValue</code> objects, representing the
/// primary key of the item to retrieve.</p>
/// <p>For the primary key, you must provide all of the attributes. For example, with a
/// simple primary key, you only need to provide a value for the partition key. For a
/// composite primary key, you must provide values for both the partition key and the sort
/// key.</p>
pub fn key(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::AttributeValue>,
) -> Self {
self.inner = self.inner.key(k, v);
self
}
/// <p>A map of attribute names to <code>AttributeValue</code> objects, representing the
/// primary key of the item to retrieve.</p>
/// <p>For the primary key, you must provide all of the attributes. For example, with a
/// simple primary key, you only need to provide a value for the partition key. For a
/// composite primary key, you must provide values for both the partition key and the sort
/// key.</p>
pub fn set_key(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
) -> Self {
self.inner = self.inner.set_key(input);
self
}
/// Appends an item to `AttributesToGet`.
///
/// To override the contents of this collection use [`set_attributes_to_get`](Self::set_attributes_to_get).
///
/// <p>This is a legacy parameter. Use <code>ProjectionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html">AttributesToGet</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn attributes_to_get(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.attributes_to_get(inp);
self
}
/// <p>This is a legacy parameter. Use <code>ProjectionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html">AttributesToGet</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_attributes_to_get(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_attributes_to_get(input);
self
}
/// <p>Determines the read consistency model: If set to <code>true</code>, then the operation
/// uses strongly consistent reads; otherwise, the operation uses eventually consistent
/// reads.</p>
pub fn consistent_read(mut self, inp: bool) -> Self {
self.inner = self.inner.consistent_read(inp);
self
}
/// <p>Determines the read consistency model: If set to <code>true</code>, then the operation
/// uses strongly consistent reads; otherwise, the operation uses eventually consistent
/// reads.</p>
pub fn set_consistent_read(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_consistent_read(input);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn return_consumed_capacity(
mut self,
inp: crate::model::ReturnConsumedCapacity,
) -> Self {
self.inner = self.inner.return_consumed_capacity(inp);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn set_return_consumed_capacity(
mut self,
input: std::option::Option<crate::model::ReturnConsumedCapacity>,
) -> Self {
self.inner = self.inner.set_return_consumed_capacity(input);
self
}
/// <p>A string that identifies one or more attributes to retrieve from the table. These
/// attributes can include scalars, sets, or elements of a JSON document. The attributes in
/// the expression must be separated by commas.</p>
/// <p>If no attribute names are specified, then all attributes are returned. If any of the
/// requested attributes are not found, they do not appear in the result.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn projection_expression(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.projection_expression(inp);
self
}
/// <p>A string that identifies one or more attributes to retrieve from the table. These
/// attributes can include scalars, sets, or elements of a JSON document. The attributes in
/// the expression must be separated by commas.</p>
/// <p>If no attribute names are specified, then all attributes are returned. If any of the
/// requested attributes are not found, they do not appear in the result.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_projection_expression(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_projection_expression(input);
self
}
/// Adds a key-value pair to `ExpressionAttributeNames`.
///
/// To override the contents of this collection use [`set_expression_attribute_names`](Self::set_expression_attribute_names).
///
/// <p>One or more substitution tokens for attribute names in an expression. The following
/// are some use cases for using <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name in an
/// expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being misinterpreted
/// in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to dereference
/// an attribute name. For example, consider the following attribute name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be used
/// directly in an expression. (For the complete list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>). To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character are
/// <i>expression attribute values</i>, which are placeholders for the
/// actual value at runtime.</p>
/// </note>
/// <p>For more information on expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expression_attribute_names(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.expression_attribute_names(k, v);
self
}
/// <p>One or more substitution tokens for attribute names in an expression. The following
/// are some use cases for using <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name in an
/// expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being misinterpreted
/// in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to dereference
/// an attribute name. For example, consider the following attribute name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be used
/// directly in an expression. (For the complete list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>). To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character are
/// <i>expression attribute values</i>, which are placeholders for the
/// actual value at runtime.</p>
/// </note>
/// <p>For more information on expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expression_attribute_names(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_expression_attribute_names(input);
self
}
}
/// Fluent builder constructing a request to `ListBackups`.
///
/// <p>List backups associated with an Amazon Web Services account. To list backups for a
/// given table, specify <code>TableName</code>. <code>ListBackups</code> returns a
/// paginated list of results with at most 1 MB worth of items in a page. You can also
/// specify a maximum number of entries to be returned in a page.</p>
/// <p>In the request, start time is inclusive, but end time is exclusive. Note that these
/// boundaries are for the time at which the original backup was requested.</p>
/// <p>You can call <code>ListBackups</code> a maximum of five times per second.</p>
#[derive(std::fmt::Debug)]
pub struct ListBackups<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_backups_input::Builder,
}
impl<C, M, R> ListBackups<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListBackups`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListBackupsOutput,
aws_smithy_http::result::SdkError<crate::error::ListBackupsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListBackupsInputOperationOutputAlias,
crate::output::ListBackupsOutput,
crate::error::ListBackupsError,
crate::input::ListBackupsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The backups from the table specified by <code>TableName</code> are listed. </p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The backups from the table specified by <code>TableName</code> are listed. </p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// <p>Maximum number of backups to return at once.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>Maximum number of backups to return at once.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
/// <p>Only backups created after this time are listed. <code>TimeRangeLowerBound</code> is
/// inclusive.</p>
pub fn time_range_lower_bound(mut self, inp: aws_smithy_types::DateTime) -> Self {
self.inner = self.inner.time_range_lower_bound(inp);
self
}
/// <p>Only backups created after this time are listed. <code>TimeRangeLowerBound</code> is
/// inclusive.</p>
pub fn set_time_range_lower_bound(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.inner = self.inner.set_time_range_lower_bound(input);
self
}
/// <p>Only backups created before this time are listed. <code>TimeRangeUpperBound</code> is
/// exclusive. </p>
pub fn time_range_upper_bound(mut self, inp: aws_smithy_types::DateTime) -> Self {
self.inner = self.inner.time_range_upper_bound(inp);
self
}
/// <p>Only backups created before this time are listed. <code>TimeRangeUpperBound</code> is
/// exclusive. </p>
pub fn set_time_range_upper_bound(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.inner = self.inner.set_time_range_upper_bound(input);
self
}
/// <p>
/// <code>LastEvaluatedBackupArn</code> is the Amazon Resource Name (ARN) of the backup last
/// evaluated when the current page of results was returned, inclusive of the current page
/// of results. This value may be specified as the <code>ExclusiveStartBackupArn</code> of a
/// new <code>ListBackups</code> operation in order to fetch the next page of results.
/// </p>
pub fn exclusive_start_backup_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.exclusive_start_backup_arn(inp);
self
}
/// <p>
/// <code>LastEvaluatedBackupArn</code> is the Amazon Resource Name (ARN) of the backup last
/// evaluated when the current page of results was returned, inclusive of the current page
/// of results. This value may be specified as the <code>ExclusiveStartBackupArn</code> of a
/// new <code>ListBackups</code> operation in order to fetch the next page of results.
/// </p>
pub fn set_exclusive_start_backup_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_exclusive_start_backup_arn(input);
self
}
/// <p>The backups from the table specified by <code>BackupType</code> are listed.</p>
/// <p>Where <code>BackupType</code> can be:</p>
/// <ul>
/// <li>
/// <p>
/// <code>USER</code> - On-demand backup created by you.</p>
/// </li>
/// <li>
/// <p>
/// <code>SYSTEM</code> - On-demand backup automatically created by DynamoDB.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL</code> - All types of on-demand backups (USER and SYSTEM).</p>
/// </li>
/// </ul>
pub fn backup_type(mut self, inp: crate::model::BackupTypeFilter) -> Self {
self.inner = self.inner.backup_type(inp);
self
}
/// <p>The backups from the table specified by <code>BackupType</code> are listed.</p>
/// <p>Where <code>BackupType</code> can be:</p>
/// <ul>
/// <li>
/// <p>
/// <code>USER</code> - On-demand backup created by you.</p>
/// </li>
/// <li>
/// <p>
/// <code>SYSTEM</code> - On-demand backup automatically created by DynamoDB.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL</code> - All types of on-demand backups (USER and SYSTEM).</p>
/// </li>
/// </ul>
pub fn set_backup_type(
mut self,
input: std::option::Option<crate::model::BackupTypeFilter>,
) -> Self {
self.inner = self.inner.set_backup_type(input);
self
}
}
/// Fluent builder constructing a request to `ListContributorInsights`.
///
/// <p>Returns a list of ContributorInsightsSummary for a table and all its global secondary
/// indexes.</p>
#[derive(std::fmt::Debug)]
pub struct ListContributorInsights<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_contributor_insights_input::Builder,
}
impl<C, M, R> ListContributorInsights<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListContributorInsights`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListContributorInsightsOutput,
aws_smithy_http::result::SdkError<crate::error::ListContributorInsightsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListContributorInsightsInputOperationOutputAlias,
crate::output::ListContributorInsightsOutput,
crate::error::ListContributorInsightsError,
crate::input::ListContributorInsightsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// <p>A token to for the desired page, if there is one.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>A token to for the desired page, if there is one.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>Maximum number of results to return per page.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>Maximum number of results to return per page.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
/// Fluent builder constructing a request to `ListExports`.
///
/// <p>Lists completed exports within the past 90 days.</p>
#[derive(std::fmt::Debug)]
pub struct ListExports<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_exports_input::Builder,
}
impl<C, M, R> ListExports<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListExports`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListExportsOutput,
aws_smithy_http::result::SdkError<crate::error::ListExportsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListExportsInputOperationOutputAlias,
crate::output::ListExportsOutput,
crate::error::ListExportsError,
crate::input::ListExportsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) associated with the exported table.</p>
pub fn table_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_arn(inp);
self
}
/// <p>The Amazon Resource Name (ARN) associated with the exported table.</p>
pub fn set_table_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_arn(input);
self
}
/// <p>Maximum number of results to return per page.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>Maximum number of results to return per page.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>An optional string that, if supplied, must be copied from the output of a previous
/// call to <code>ListExports</code>. When provided in this manner, the API fetches the next
/// page of results.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>An optional string that, if supplied, must be copied from the output of a previous
/// call to <code>ListExports</code>. When provided in this manner, the API fetches the next
/// page of results.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListGlobalTables`.
///
/// <p>Lists all global tables that have a replica in the specified Region.</p>
/// <note>
/// <p>This operation only applies to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html">Version
/// 2017.11.29</a> of global tables.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct ListGlobalTables<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_global_tables_input::Builder,
}
impl<C, M, R> ListGlobalTables<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListGlobalTables`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListGlobalTablesOutput,
aws_smithy_http::result::SdkError<crate::error::ListGlobalTablesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListGlobalTablesInputOperationOutputAlias,
crate::output::ListGlobalTablesOutput,
crate::error::ListGlobalTablesError,
crate::input::ListGlobalTablesInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The first global table name that this operation will evaluate.</p>
pub fn exclusive_start_global_table_name(
mut self,
inp: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.exclusive_start_global_table_name(inp);
self
}
/// <p>The first global table name that this operation will evaluate.</p>
pub fn set_exclusive_start_global_table_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_exclusive_start_global_table_name(input);
self
}
/// <p>The maximum number of table names to return, if the parameter is not specified
/// DynamoDB defaults to 100.</p>
/// <p>If the number of global tables DynamoDB finds reaches this limit, it stops the
/// operation and returns the table names collected up to that point, with a table name in
/// the <code>LastEvaluatedGlobalTableName</code> to apply in a subsequent operation to the
/// <code>ExclusiveStartGlobalTableName</code> parameter.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>The maximum number of table names to return, if the parameter is not specified
/// DynamoDB defaults to 100.</p>
/// <p>If the number of global tables DynamoDB finds reaches this limit, it stops the
/// operation and returns the table names collected up to that point, with a table name in
/// the <code>LastEvaluatedGlobalTableName</code> to apply in a subsequent operation to the
/// <code>ExclusiveStartGlobalTableName</code> parameter.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
/// <p>Lists the global tables in a specific Region.</p>
pub fn region_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.region_name(inp);
self
}
/// <p>Lists the global tables in a specific Region.</p>
pub fn set_region_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_region_name(input);
self
}
}
/// Fluent builder constructing a request to `ListTables`.
///
/// <p>Returns an array of table names associated with the current account and endpoint. The
/// output from <code>ListTables</code> is paginated, with each page returning a maximum of
/// 100 table names.</p>
#[derive(std::fmt::Debug)]
pub struct ListTables<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_tables_input::Builder,
}
impl<C, M, R> ListTables<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListTables`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTablesOutput,
aws_smithy_http::result::SdkError<crate::error::ListTablesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListTablesInputOperationOutputAlias,
crate::output::ListTablesOutput,
crate::error::ListTablesError,
crate::input::ListTablesInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The first table name that this operation will evaluate. Use the value that was
/// returned for <code>LastEvaluatedTableName</code> in a previous operation, so that you
/// can obtain the next page of results.</p>
pub fn exclusive_start_table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.exclusive_start_table_name(inp);
self
}
/// <p>The first table name that this operation will evaluate. Use the value that was
/// returned for <code>LastEvaluatedTableName</code> in a previous operation, so that you
/// can obtain the next page of results.</p>
pub fn set_exclusive_start_table_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_exclusive_start_table_name(input);
self
}
/// <p>A maximum number of table names to return. If this parameter is not specified, the
/// limit is 100.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>A maximum number of table names to return. If this parameter is not specified, the
/// limit is 100.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
}
/// Fluent builder constructing a request to `ListTagsOfResource`.
///
/// <p>List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10
/// times per second, per account.</p>
/// <p>For an overview on tagging DynamoDB resources, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html">Tagging for DynamoDB</a>
/// in the <i>Amazon DynamoDB Developer Guide</i>.</p>
#[derive(std::fmt::Debug)]
pub struct ListTagsOfResource<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_tags_of_resource_input::Builder,
}
impl<C, M, R> ListTagsOfResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListTagsOfResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTagsOfResourceOutput,
aws_smithy_http::result::SdkError<crate::error::ListTagsOfResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListTagsOfResourceInputOperationOutputAlias,
crate::output::ListTagsOfResourceOutput,
crate::error::ListTagsOfResourceError,
crate::input::ListTagsOfResourceInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon DynamoDB resource with tags to be listed. This value is an Amazon Resource
/// Name (ARN).</p>
pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(inp);
self
}
/// <p>The Amazon DynamoDB resource with tags to be listed. This value is an Amazon Resource
/// Name (ARN).</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// <p>An optional string that, if supplied, must be copied from the output of a previous
/// call to ListTagOfResource. When provided in this manner, this API fetches the next page
/// of results.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>An optional string that, if supplied, must be copied from the output of a previous
/// call to ListTagOfResource. When provided in this manner, this API fetches the next page
/// of results.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `PutItem`.
///
/// <p>Creates a new item, or replaces an old item with a new item. If an item that has the
/// same primary key as the new item already exists in the specified table, the new item
/// completely replaces the existing item. You can perform a conditional put operation (add
/// a new item if one with the specified primary key doesn't exist), or replace an existing
/// item if it has certain attribute values. You can return the item's attribute values in
/// the same operation, using the <code>ReturnValues</code> parameter.</p>
/// <important>
/// <p>This topic provides general information about the <code>PutItem</code> API.</p>
/// <p>For information on how to call the <code>PutItem</code> API using the Amazon Web Services SDK in specific languages, see the following:</p>
/// <ul>
/// <li>
/// <p>
/// <a href="http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem"> PutItem in the Command Line Interface</a>
/// </p>
/// </li>
/// <li>
/// <p>
/// <a href="http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem"> PutItem in the SDK for .NET</a>
/// </p>
/// </li>
/// <li>
/// <p>
/// <a href="http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem"> PutItem in the SDK for C++</a>
/// </p>
/// </li>
/// <li>
/// <p>
/// <a href="http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem"> PutItem in the SDK for Go</a>
/// </p>
/// </li>
/// <li>
/// <p>
/// <a href="http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem"> PutItem in the SDK for Java</a>
/// </p>
/// </li>
/// <li>
/// <p>
/// <a href="http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem"> PutItem in the SDK for JavaScript</a>
/// </p>
/// </li>
/// <li>
/// <p>
/// <a href="http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem"> PutItem in the SDK for PHP V3</a>
/// </p>
/// </li>
/// <li>
/// <p>
/// <a href="http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem">
/// PutItem in the SDK for Python (Boto)</a>
/// </p>
/// </li>
/// <li>
/// <p>
/// <a href="http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem"> PutItem in the SDK for Ruby V2</a>
/// </p>
/// </li>
/// </ul>
/// </important>
///
/// <p>When you add an item, the primary key attributes are the only required attributes.
/// Attribute values cannot be null.</p>
/// <p>Empty String and Binary attribute values are allowed. Attribute values of type String
/// and Binary must have a length greater than zero if the attribute is used as a key
/// attribute for a table or index. Set type attributes cannot be empty. </p>
/// <p>Invalid Requests with empty values will be rejected with a
/// <code>ValidationException</code> exception.</p>
/// <note>
/// <p>To prevent a new item from replacing an existing item, use a conditional
/// expression that contains the <code>attribute_not_exists</code> function with the
/// name of the attribute being used as the partition key for the table. Since every
/// record must contain that attribute, the <code>attribute_not_exists</code> function
/// will only succeed if no matching item exists.</p>
/// </note>
/// <p>For more information about <code>PutItem</code>, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html">Working with
/// Items</a> in the <i>Amazon DynamoDB Developer Guide</i>.</p>
#[derive(std::fmt::Debug)]
pub struct PutItem<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_item_input::Builder,
}
impl<C, M, R> PutItem<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutItem`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutItemOutput,
aws_smithy_http::result::SdkError<crate::error::PutItemError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutItemInputOperationOutputAlias,
crate::output::PutItemOutput,
crate::error::PutItemError,
crate::input::PutItemInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table to contain the item.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table to contain the item.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// Adds a key-value pair to `Item`.
///
/// To override the contents of this collection use [`set_item`](Self::set_item).
///
/// <p>A map of attribute name/value pairs, one for each attribute. Only the primary key
/// attributes are required; you can optionally provide other attribute name-value pairs for
/// the item.</p>
/// <p>You must provide all of the attributes for the primary key. For example, with a simple
/// primary key, you only need to provide a value for the partition key. For a composite
/// primary key, you must provide both values for both the partition key and the sort
/// key.</p>
/// <p>If you specify any attributes that are part of an index key, then the data types for
/// those attributes must match those of the schema in the table's attribute
/// definition.</p>
/// <p>Empty String and Binary attribute values are allowed. Attribute values of type String
/// and Binary must have a length greater than zero if the attribute is used as a key
/// attribute for a table or index.</p>
///
/// <p>For more information about primary keys, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey">Primary Key</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
/// <p>Each element in the <code>Item</code> map is an <code>AttributeValue</code>
/// object.</p>
pub fn item(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::AttributeValue>,
) -> Self {
self.inner = self.inner.item(k, v);
self
}
/// <p>A map of attribute name/value pairs, one for each attribute. Only the primary key
/// attributes are required; you can optionally provide other attribute name-value pairs for
/// the item.</p>
/// <p>You must provide all of the attributes for the primary key. For example, with a simple
/// primary key, you only need to provide a value for the partition key. For a composite
/// primary key, you must provide both values for both the partition key and the sort
/// key.</p>
/// <p>If you specify any attributes that are part of an index key, then the data types for
/// those attributes must match those of the schema in the table's attribute
/// definition.</p>
/// <p>Empty String and Binary attribute values are allowed. Attribute values of type String
/// and Binary must have a length greater than zero if the attribute is used as a key
/// attribute for a table or index.</p>
///
/// <p>For more information about primary keys, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey">Primary Key</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
/// <p>Each element in the <code>Item</code> map is an <code>AttributeValue</code>
/// object.</p>
pub fn set_item(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
) -> Self {
self.inner = self.inner.set_item(input);
self
}
/// Adds a key-value pair to `Expected`.
///
/// To override the contents of this collection use [`set_expected`](Self::set_expected).
///
/// <p>This is a legacy parameter. Use <code>ConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html">Expected</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expected(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::ExpectedAttributeValue>,
) -> Self {
self.inner = self.inner.expected(k, v);
self
}
/// <p>This is a legacy parameter. Use <code>ConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html">Expected</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expected(
mut self,
input: std::option::Option<
std::collections::HashMap<
std::string::String,
crate::model::ExpectedAttributeValue,
>,
>,
) -> Self {
self.inner = self.inner.set_expected(input);
self
}
/// <p>Use <code>ReturnValues</code> if you want to get the item attributes as they appeared
/// before they were updated with the <code>PutItem</code> request. For
/// <code>PutItem</code>, the valid values are:</p>
/// <ul>
/// <li>
/// <p>
/// <code>NONE</code> - If <code>ReturnValues</code> is not specified, or if its
/// value is <code>NONE</code>, then nothing is returned. (This setting is the
/// default for <code>ReturnValues</code>.)</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL_OLD</code> - If <code>PutItem</code> overwrote an attribute name-value
/// pair, then the content of the old item is returned.</p>
/// </li>
/// </ul>
/// <p>The values returned are strongly consistent.</p>
/// <note>
/// <p>The <code>ReturnValues</code> parameter is used by several DynamoDB operations;
/// however, <code>PutItem</code> does not recognize any values other than
/// <code>NONE</code> or <code>ALL_OLD</code>.</p>
/// </note>
pub fn return_values(mut self, inp: crate::model::ReturnValue) -> Self {
self.inner = self.inner.return_values(inp);
self
}
/// <p>Use <code>ReturnValues</code> if you want to get the item attributes as they appeared
/// before they were updated with the <code>PutItem</code> request. For
/// <code>PutItem</code>, the valid values are:</p>
/// <ul>
/// <li>
/// <p>
/// <code>NONE</code> - If <code>ReturnValues</code> is not specified, or if its
/// value is <code>NONE</code>, then nothing is returned. (This setting is the
/// default for <code>ReturnValues</code>.)</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL_OLD</code> - If <code>PutItem</code> overwrote an attribute name-value
/// pair, then the content of the old item is returned.</p>
/// </li>
/// </ul>
/// <p>The values returned are strongly consistent.</p>
/// <note>
/// <p>The <code>ReturnValues</code> parameter is used by several DynamoDB operations;
/// however, <code>PutItem</code> does not recognize any values other than
/// <code>NONE</code> or <code>ALL_OLD</code>.</p>
/// </note>
pub fn set_return_values(
mut self,
input: std::option::Option<crate::model::ReturnValue>,
) -> Self {
self.inner = self.inner.set_return_values(input);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn return_consumed_capacity(
mut self,
inp: crate::model::ReturnConsumedCapacity,
) -> Self {
self.inner = self.inner.return_consumed_capacity(inp);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn set_return_consumed_capacity(
mut self,
input: std::option::Option<crate::model::ReturnConsumedCapacity>,
) -> Self {
self.inner = self.inner.set_return_consumed_capacity(input);
self
}
/// <p>Determines whether item collection metrics are returned. If set to <code>SIZE</code>,
/// the response includes statistics about item collections, if any, that were modified
/// during the operation are returned in the response. If set to <code>NONE</code> (the
/// default), no statistics are returned.</p>
pub fn return_item_collection_metrics(
mut self,
inp: crate::model::ReturnItemCollectionMetrics,
) -> Self {
self.inner = self.inner.return_item_collection_metrics(inp);
self
}
/// <p>Determines whether item collection metrics are returned. If set to <code>SIZE</code>,
/// the response includes statistics about item collections, if any, that were modified
/// during the operation are returned in the response. If set to <code>NONE</code> (the
/// default), no statistics are returned.</p>
pub fn set_return_item_collection_metrics(
mut self,
input: std::option::Option<crate::model::ReturnItemCollectionMetrics>,
) -> Self {
self.inner = self.inner.set_return_item_collection_metrics(input);
self
}
/// <p>This is a legacy parameter. Use <code>ConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html">ConditionalOperator</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn conditional_operator(mut self, inp: crate::model::ConditionalOperator) -> Self {
self.inner = self.inner.conditional_operator(inp);
self
}
/// <p>This is a legacy parameter. Use <code>ConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html">ConditionalOperator</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_conditional_operator(
mut self,
input: std::option::Option<crate::model::ConditionalOperator>,
) -> Self {
self.inner = self.inner.set_conditional_operator(input);
self
}
/// <p>A condition that must be satisfied in order for a conditional <code>PutItem</code>
/// operation to succeed.</p>
/// <p>An expression can contain any of the following:</p>
/// <ul>
/// <li>
/// <p>Functions: <code>attribute_exists | attribute_not_exists | attribute_type |
/// contains | begins_with | size</code>
/// </p>
/// <p>These function names are case-sensitive.</p>
/// </li>
/// <li>
/// <p>Comparison operators: <code>= | <> |
/// < | > | <= | >= |
/// BETWEEN | IN </code>
/// </p>
/// </li>
/// <li>
/// <p> Logical operators: <code>AND | OR | NOT</code>
/// </p>
/// </li>
/// </ul>
/// <p>For more information on condition expressions, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Condition Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn condition_expression(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.condition_expression(inp);
self
}
/// <p>A condition that must be satisfied in order for a conditional <code>PutItem</code>
/// operation to succeed.</p>
/// <p>An expression can contain any of the following:</p>
/// <ul>
/// <li>
/// <p>Functions: <code>attribute_exists | attribute_not_exists | attribute_type |
/// contains | begins_with | size</code>
/// </p>
/// <p>These function names are case-sensitive.</p>
/// </li>
/// <li>
/// <p>Comparison operators: <code>= | <> |
/// < | > | <= | >= |
/// BETWEEN | IN </code>
/// </p>
/// </li>
/// <li>
/// <p> Logical operators: <code>AND | OR | NOT</code>
/// </p>
/// </li>
/// </ul>
/// <p>For more information on condition expressions, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Condition Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_condition_expression(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_condition_expression(input);
self
}
/// Adds a key-value pair to `ExpressionAttributeNames`.
///
/// To override the contents of this collection use [`set_expression_attribute_names`](Self::set_expression_attribute_names).
///
/// <p>One or more substitution tokens for attribute names in an expression. The following
/// are some use cases for using <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name in an
/// expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being misinterpreted
/// in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to dereference
/// an attribute name. For example, consider the following attribute name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be used
/// directly in an expression. (For the complete list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>). To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character are
/// <i>expression attribute values</i>, which are placeholders for the
/// actual value at runtime.</p>
/// </note>
/// <p>For more information on expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expression_attribute_names(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.expression_attribute_names(k, v);
self
}
/// <p>One or more substitution tokens for attribute names in an expression. The following
/// are some use cases for using <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name in an
/// expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being misinterpreted
/// in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to dereference
/// an attribute name. For example, consider the following attribute name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be used
/// directly in an expression. (For the complete list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>). To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character are
/// <i>expression attribute values</i>, which are placeholders for the
/// actual value at runtime.</p>
/// </note>
/// <p>For more information on expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expression_attribute_names(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_expression_attribute_names(input);
self
}
/// Adds a key-value pair to `ExpressionAttributeValues`.
///
/// To override the contents of this collection use [`set_expression_attribute_values`](Self::set_expression_attribute_values).
///
/// <p>One or more values that can be substituted in an expression.</p>
/// <p>Use the <b>:</b> (colon) character in an expression to
/// dereference an attribute value. For example, suppose that you wanted to check whether
/// the value of the <i>ProductStatus</i> attribute was one of the following: </p>
/// <p>
/// <code>Available | Backordered | Discontinued</code>
/// </p>
/// <p>You would first need to specify <code>ExpressionAttributeValues</code> as
/// follows:</p>
/// <p>
/// <code>{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
/// ":disc":{"S":"Discontinued"} }</code>
/// </p>
/// <p>You could then use these values in an expression, such as this:</p>
/// <p>
/// <code>ProductStatus IN (:avail, :back, :disc)</code>
/// </p>
/// <p>For more information on expression attribute values, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Condition Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expression_attribute_values(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::AttributeValue>,
) -> Self {
self.inner = self.inner.expression_attribute_values(k, v);
self
}
/// <p>One or more values that can be substituted in an expression.</p>
/// <p>Use the <b>:</b> (colon) character in an expression to
/// dereference an attribute value. For example, suppose that you wanted to check whether
/// the value of the <i>ProductStatus</i> attribute was one of the following: </p>
/// <p>
/// <code>Available | Backordered | Discontinued</code>
/// </p>
/// <p>You would first need to specify <code>ExpressionAttributeValues</code> as
/// follows:</p>
/// <p>
/// <code>{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
/// ":disc":{"S":"Discontinued"} }</code>
/// </p>
/// <p>You could then use these values in an expression, such as this:</p>
/// <p>
/// <code>ProductStatus IN (:avail, :back, :disc)</code>
/// </p>
/// <p>For more information on expression attribute values, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Condition Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expression_attribute_values(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
) -> Self {
self.inner = self.inner.set_expression_attribute_values(input);
self
}
}
/// Fluent builder constructing a request to `Query`.
///
/// <p>You must provide the name of the partition key attribute and a single value for that
/// attribute. <code>Query</code> returns all items with that partition key value.
/// Optionally, you can provide a sort key attribute and use a comparison operator to refine
/// the search results.</p>
///
/// <p>Use the <code>KeyConditionExpression</code> parameter to provide a specific value for
/// the partition key. The <code>Query</code> operation will return all of the items from
/// the table or index with that partition key value. You can optionally narrow the scope of
/// the <code>Query</code> operation by specifying a sort key value and a comparison
/// operator in <code>KeyConditionExpression</code>. To further refine the
/// <code>Query</code> results, you can optionally provide a
/// <code>FilterExpression</code>. A <code>FilterExpression</code> determines which
/// items within the results should be returned to you. All of the other results are
/// discarded. </p>
/// <p> A <code>Query</code> operation always returns a result set. If no matching items are
/// found, the result set will be empty. Queries that do not return results consume the
/// minimum number of read capacity units for that type of read operation. </p>
/// <note>
/// <p> DynamoDB calculates the number of read capacity units consumed based on item
/// size, not on the amount of data that is returned to an application. The number of
/// capacity units consumed will be the same whether you request all of the attributes
/// (the default behavior) or just some of them (using a projection expression). The
/// number will also be the same whether or not you use a <code>FilterExpression</code>.
/// </p>
/// </note>
/// <p>
/// <code>Query</code> results are always sorted by the sort key value. If the data type of
/// the sort key is Number, the results are returned in numeric order; otherwise, the
/// results are returned in order of UTF-8 bytes. By default, the sort order is ascending.
/// To reverse the order, set the <code>ScanIndexForward</code> parameter to false. </p>
/// <p> A single <code>Query</code> operation will read up to the maximum number of items set
/// (if using the <code>Limit</code> parameter) or a maximum of 1 MB of data and then apply
/// any filtering to the results using <code>FilterExpression</code>. If
/// <code>LastEvaluatedKey</code> is present in the response, you will need to paginate
/// the result set. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination">Paginating
/// the Results</a> in the <i>Amazon DynamoDB Developer Guide</i>. </p>
/// <p>
/// <code>FilterExpression</code> is applied after a <code>Query</code> finishes, but before
/// the results are returned. A <code>FilterExpression</code> cannot contain partition key
/// or sort key attributes. You need to specify those attributes in the
/// <code>KeyConditionExpression</code>. </p>
/// <note>
/// <p> A <code>Query</code> operation can return an empty result set and a
/// <code>LastEvaluatedKey</code> if all the items read for the page of results are
/// filtered out. </p>
/// </note>
/// <p>You can query a table, a local secondary index, or a global secondary index. For a
/// query on a table or on a local secondary index, you can set the
/// <code>ConsistentRead</code> parameter to <code>true</code> and obtain a strongly
/// consistent result. Global secondary indexes support eventually consistent reads only, so
/// do not specify <code>ConsistentRead</code> when querying a global secondary
/// index.</p>
#[derive(std::fmt::Debug)]
pub struct Query<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::query_input::Builder,
}
impl<C, M, R> Query<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `Query`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::QueryOutput,
aws_smithy_http::result::SdkError<crate::error::QueryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::QueryInputOperationOutputAlias,
crate::output::QueryOutput,
crate::error::QueryError,
crate::input::QueryInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table containing the requested items.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table containing the requested items.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// <p>The name of an index to query. This index can be any local secondary index or global
/// secondary index on the table. Note that if you use the <code>IndexName</code> parameter,
/// you must also provide <code>TableName.</code>
/// </p>
pub fn index_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.index_name(inp);
self
}
/// <p>The name of an index to query. This index can be any local secondary index or global
/// secondary index on the table. Note that if you use the <code>IndexName</code> parameter,
/// you must also provide <code>TableName.</code>
/// </p>
pub fn set_index_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_index_name(input);
self
}
/// <p>The attributes to be returned in the result. You can retrieve all item attributes,
/// specific item attributes, the count of matching items, or in the case of an index, some
/// or all of the attributes projected into the index.</p>
/// <ul>
/// <li>
/// <p>
/// <code>ALL_ATTRIBUTES</code> - Returns all of the item attributes from the
/// specified table or index. If you query a local secondary index, then for each
/// matching item in the index, DynamoDB fetches the entire item from the parent
/// table. If the index is configured to project all item attributes, then all of
/// the data can be obtained from the local secondary index, and no fetching is
/// required.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL_PROJECTED_ATTRIBUTES</code> - Allowed only when querying an index.
/// Retrieves all attributes that have been projected into the index. If the index
/// is configured to project all attributes, this return value is equivalent to
/// specifying <code>ALL_ATTRIBUTES</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>COUNT</code> - Returns the number of matching items, rather than the
/// matching items themselves.</p>
/// </li>
/// <li>
/// <p>
/// <code>SPECIFIC_ATTRIBUTES</code> - Returns only the attributes listed in
/// <code>AttributesToGet</code>. This return value is equivalent to specifying
/// <code>AttributesToGet</code> without specifying any value for
/// <code>Select</code>.</p>
/// <p>If you query or scan a local secondary index and request only attributes that
/// are projected into that index, the operation will read only the index and not
/// the table. If any of the requested attributes are not projected into the local
/// secondary index, DynamoDB fetches each of these attributes from the parent
/// table. This extra fetching incurs additional throughput cost and latency.</p>
/// <p>If you query or scan a global secondary index, you can only request attributes
/// that are projected into the index. Global secondary index queries cannot fetch
/// attributes from the parent table.</p>
/// </li>
/// </ul>
/// <p>If neither <code>Select</code> nor <code>AttributesToGet</code> are specified,
/// DynamoDB defaults to <code>ALL_ATTRIBUTES</code> when accessing a table, and
/// <code>ALL_PROJECTED_ATTRIBUTES</code> when accessing an index. You cannot use both
/// <code>Select</code> and <code>AttributesToGet</code> together in a single request,
/// unless the value for <code>Select</code> is <code>SPECIFIC_ATTRIBUTES</code>. (This
/// usage is equivalent to specifying <code>AttributesToGet</code> without any value for
/// <code>Select</code>.)</p>
/// <note>
/// <p>If you use the <code>ProjectionExpression</code> parameter, then the value for
/// <code>Select</code> can only be <code>SPECIFIC_ATTRIBUTES</code>. Any other
/// value for <code>Select</code> will return an error.</p>
/// </note>
pub fn select(mut self, inp: crate::model::Select) -> Self {
self.inner = self.inner.select(inp);
self
}
/// <p>The attributes to be returned in the result. You can retrieve all item attributes,
/// specific item attributes, the count of matching items, or in the case of an index, some
/// or all of the attributes projected into the index.</p>
/// <ul>
/// <li>
/// <p>
/// <code>ALL_ATTRIBUTES</code> - Returns all of the item attributes from the
/// specified table or index. If you query a local secondary index, then for each
/// matching item in the index, DynamoDB fetches the entire item from the parent
/// table. If the index is configured to project all item attributes, then all of
/// the data can be obtained from the local secondary index, and no fetching is
/// required.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL_PROJECTED_ATTRIBUTES</code> - Allowed only when querying an index.
/// Retrieves all attributes that have been projected into the index. If the index
/// is configured to project all attributes, this return value is equivalent to
/// specifying <code>ALL_ATTRIBUTES</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>COUNT</code> - Returns the number of matching items, rather than the
/// matching items themselves.</p>
/// </li>
/// <li>
/// <p>
/// <code>SPECIFIC_ATTRIBUTES</code> - Returns only the attributes listed in
/// <code>AttributesToGet</code>. This return value is equivalent to specifying
/// <code>AttributesToGet</code> without specifying any value for
/// <code>Select</code>.</p>
/// <p>If you query or scan a local secondary index and request only attributes that
/// are projected into that index, the operation will read only the index and not
/// the table. If any of the requested attributes are not projected into the local
/// secondary index, DynamoDB fetches each of these attributes from the parent
/// table. This extra fetching incurs additional throughput cost and latency.</p>
/// <p>If you query or scan a global secondary index, you can only request attributes
/// that are projected into the index. Global secondary index queries cannot fetch
/// attributes from the parent table.</p>
/// </li>
/// </ul>
/// <p>If neither <code>Select</code> nor <code>AttributesToGet</code> are specified,
/// DynamoDB defaults to <code>ALL_ATTRIBUTES</code> when accessing a table, and
/// <code>ALL_PROJECTED_ATTRIBUTES</code> when accessing an index. You cannot use both
/// <code>Select</code> and <code>AttributesToGet</code> together in a single request,
/// unless the value for <code>Select</code> is <code>SPECIFIC_ATTRIBUTES</code>. (This
/// usage is equivalent to specifying <code>AttributesToGet</code> without any value for
/// <code>Select</code>.)</p>
/// <note>
/// <p>If you use the <code>ProjectionExpression</code> parameter, then the value for
/// <code>Select</code> can only be <code>SPECIFIC_ATTRIBUTES</code>. Any other
/// value for <code>Select</code> will return an error.</p>
/// </note>
pub fn set_select(mut self, input: std::option::Option<crate::model::Select>) -> Self {
self.inner = self.inner.set_select(input);
self
}
/// Appends an item to `AttributesToGet`.
///
/// To override the contents of this collection use [`set_attributes_to_get`](Self::set_attributes_to_get).
///
/// <p>This is a legacy parameter. Use <code>ProjectionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html">AttributesToGet</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn attributes_to_get(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.attributes_to_get(inp);
self
}
/// <p>This is a legacy parameter. Use <code>ProjectionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html">AttributesToGet</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_attributes_to_get(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_attributes_to_get(input);
self
}
/// <p>The maximum number of items to evaluate (not necessarily the number of matching
/// items). If DynamoDB processes the number of items up to the limit while processing the
/// results, it stops the operation and returns the matching values up to that point, and a
/// key in <code>LastEvaluatedKey</code> to apply in a subsequent operation, so that you can
/// pick up where you left off. Also, if the processed dataset size exceeds 1 MB before
/// DynamoDB reaches this limit, it stops the operation and returns the matching values up
/// to the limit, and a key in <code>LastEvaluatedKey</code> to apply in a subsequent
/// operation to continue the operation. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html">Query and Scan</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>The maximum number of items to evaluate (not necessarily the number of matching
/// items). If DynamoDB processes the number of items up to the limit while processing the
/// results, it stops the operation and returns the matching values up to that point, and a
/// key in <code>LastEvaluatedKey</code> to apply in a subsequent operation, so that you can
/// pick up where you left off. Also, if the processed dataset size exceeds 1 MB before
/// DynamoDB reaches this limit, it stops the operation and returns the matching values up
/// to the limit, and a key in <code>LastEvaluatedKey</code> to apply in a subsequent
/// operation to continue the operation. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html">Query and Scan</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
/// <p>Determines the read consistency model: If set to <code>true</code>, then the operation
/// uses strongly consistent reads; otherwise, the operation uses eventually consistent
/// reads.</p>
/// <p>Strongly consistent reads are not supported on global secondary indexes. If you query
/// a global secondary index with <code>ConsistentRead</code> set to <code>true</code>, you
/// will receive a <code>ValidationException</code>.</p>
pub fn consistent_read(mut self, inp: bool) -> Self {
self.inner = self.inner.consistent_read(inp);
self
}
/// <p>Determines the read consistency model: If set to <code>true</code>, then the operation
/// uses strongly consistent reads; otherwise, the operation uses eventually consistent
/// reads.</p>
/// <p>Strongly consistent reads are not supported on global secondary indexes. If you query
/// a global secondary index with <code>ConsistentRead</code> set to <code>true</code>, you
/// will receive a <code>ValidationException</code>.</p>
pub fn set_consistent_read(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_consistent_read(input);
self
}
/// Adds a key-value pair to `KeyConditions`.
///
/// To override the contents of this collection use [`set_key_conditions`](Self::set_key_conditions).
///
/// <p>This is a legacy parameter. Use <code>KeyConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html">KeyConditions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn key_conditions(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::Condition>,
) -> Self {
self.inner = self.inner.key_conditions(k, v);
self
}
/// <p>This is a legacy parameter. Use <code>KeyConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html">KeyConditions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_key_conditions(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::Condition>,
>,
) -> Self {
self.inner = self.inner.set_key_conditions(input);
self
}
/// Adds a key-value pair to `QueryFilter`.
///
/// To override the contents of this collection use [`set_query_filter`](Self::set_query_filter).
///
/// <p>This is a legacy parameter. Use <code>FilterExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html">QueryFilter</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn query_filter(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::Condition>,
) -> Self {
self.inner = self.inner.query_filter(k, v);
self
}
/// <p>This is a legacy parameter. Use <code>FilterExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html">QueryFilter</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_query_filter(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::Condition>,
>,
) -> Self {
self.inner = self.inner.set_query_filter(input);
self
}
/// <p>This is a legacy parameter. Use <code>FilterExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html">ConditionalOperator</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn conditional_operator(mut self, inp: crate::model::ConditionalOperator) -> Self {
self.inner = self.inner.conditional_operator(inp);
self
}
/// <p>This is a legacy parameter. Use <code>FilterExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html">ConditionalOperator</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_conditional_operator(
mut self,
input: std::option::Option<crate::model::ConditionalOperator>,
) -> Self {
self.inner = self.inner.set_conditional_operator(input);
self
}
/// <p>Specifies the order for index traversal: If <code>true</code> (default), the traversal
/// is performed in ascending order; if <code>false</code>, the traversal is performed in
/// descending order. </p>
/// <p>Items with the same partition key value are stored in sorted order by sort key. If the
/// sort key data type is Number, the results are stored in numeric order. For type String,
/// the results are stored in order of UTF-8 bytes. For type Binary, DynamoDB treats each
/// byte of the binary data as unsigned.</p>
/// <p>If <code>ScanIndexForward</code> is <code>true</code>, DynamoDB returns the results in
/// the order in which they are stored (by sort key value). This is the default behavior. If
/// <code>ScanIndexForward</code> is <code>false</code>, DynamoDB reads the results in
/// reverse order by sort key value, and then returns the results to the client.</p>
pub fn scan_index_forward(mut self, inp: bool) -> Self {
self.inner = self.inner.scan_index_forward(inp);
self
}
/// <p>Specifies the order for index traversal: If <code>true</code> (default), the traversal
/// is performed in ascending order; if <code>false</code>, the traversal is performed in
/// descending order. </p>
/// <p>Items with the same partition key value are stored in sorted order by sort key. If the
/// sort key data type is Number, the results are stored in numeric order. For type String,
/// the results are stored in order of UTF-8 bytes. For type Binary, DynamoDB treats each
/// byte of the binary data as unsigned.</p>
/// <p>If <code>ScanIndexForward</code> is <code>true</code>, DynamoDB returns the results in
/// the order in which they are stored (by sort key value). This is the default behavior. If
/// <code>ScanIndexForward</code> is <code>false</code>, DynamoDB reads the results in
/// reverse order by sort key value, and then returns the results to the client.</p>
pub fn set_scan_index_forward(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_scan_index_forward(input);
self
}
/// Adds a key-value pair to `ExclusiveStartKey`.
///
/// To override the contents of this collection use [`set_exclusive_start_key`](Self::set_exclusive_start_key).
///
/// <p>The primary key of the first item that this operation will evaluate. Use the value
/// that was returned for <code>LastEvaluatedKey</code> in the previous operation.</p>
/// <p>The data type for <code>ExclusiveStartKey</code> must be String, Number, or Binary. No
/// set data types are allowed.</p>
pub fn exclusive_start_key(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::AttributeValue>,
) -> Self {
self.inner = self.inner.exclusive_start_key(k, v);
self
}
/// <p>The primary key of the first item that this operation will evaluate. Use the value
/// that was returned for <code>LastEvaluatedKey</code> in the previous operation.</p>
/// <p>The data type for <code>ExclusiveStartKey</code> must be String, Number, or Binary. No
/// set data types are allowed.</p>
pub fn set_exclusive_start_key(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
) -> Self {
self.inner = self.inner.set_exclusive_start_key(input);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn return_consumed_capacity(
mut self,
inp: crate::model::ReturnConsumedCapacity,
) -> Self {
self.inner = self.inner.return_consumed_capacity(inp);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn set_return_consumed_capacity(
mut self,
input: std::option::Option<crate::model::ReturnConsumedCapacity>,
) -> Self {
self.inner = self.inner.set_return_consumed_capacity(input);
self
}
/// <p>A string that identifies one or more attributes to retrieve from the table. These
/// attributes can include scalars, sets, or elements of a JSON document. The attributes in
/// the expression must be separated by commas.</p>
/// <p>If no attribute names are specified, then all attributes will be returned. If any of
/// the requested attributes are not found, they will not appear in the result.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Accessing Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn projection_expression(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.projection_expression(inp);
self
}
/// <p>A string that identifies one or more attributes to retrieve from the table. These
/// attributes can include scalars, sets, or elements of a JSON document. The attributes in
/// the expression must be separated by commas.</p>
/// <p>If no attribute names are specified, then all attributes will be returned. If any of
/// the requested attributes are not found, they will not appear in the result.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Accessing Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_projection_expression(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_projection_expression(input);
self
}
/// <p>A string that contains conditions that DynamoDB applies after the <code>Query</code>
/// operation, but before the data is returned to you. Items that do not satisfy the
/// <code>FilterExpression</code> criteria are not returned.</p>
/// <p>A <code>FilterExpression</code> does not allow key attributes. You cannot define a
/// filter expression based on a partition key or a sort key.</p>
/// <note>
/// <p>A <code>FilterExpression</code> is applied after the items have already been read;
/// the process of filtering does not consume any additional read capacity units.</p>
/// </note>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults">Filter
/// Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn filter_expression(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_expression(inp);
self
}
/// <p>A string that contains conditions that DynamoDB applies after the <code>Query</code>
/// operation, but before the data is returned to you. Items that do not satisfy the
/// <code>FilterExpression</code> criteria are not returned.</p>
/// <p>A <code>FilterExpression</code> does not allow key attributes. You cannot define a
/// filter expression based on a partition key or a sort key.</p>
/// <note>
/// <p>A <code>FilterExpression</code> is applied after the items have already been read;
/// the process of filtering does not consume any additional read capacity units.</p>
/// </note>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults">Filter
/// Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_filter_expression(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_filter_expression(input);
self
}
/// <p>The condition that specifies the key values for items to be retrieved by the
/// <code>Query</code> action.</p>
///
/// <p>The condition must perform an equality test on a single partition key value.</p>
/// <p>The condition can optionally perform one of several comparison tests on a single sort
/// key value. This allows <code>Query</code> to retrieve one item with a given partition
/// key value and sort key value, or several items that have the same partition key value
/// but different sort key values.</p>
///
/// <p>The partition key equality test is required, and must be specified in the following
/// format:</p>
///
/// <p>
/// <code>partitionKeyName</code>
/// <i>=</i>
/// <code>:partitionkeyval</code>
/// </p>
///
/// <p>If you also want to provide a condition for the sort key, it must be combined using
/// <code>AND</code> with the condition for the sort key. Following is an example, using
/// the <b>=</b> comparison operator for the sort key:</p>
///
/// <p>
/// <code>partitionKeyName</code>
/// <code>=</code>
/// <code>:partitionkeyval</code>
/// <code>AND</code>
/// <code>sortKeyName</code>
/// <code>=</code>
/// <code>:sortkeyval</code>
/// </p>
/// <p>Valid comparisons for the sort key condition are as follows:</p>
/// <ul>
/// <li>
/// <p>
/// <code>sortKeyName</code>
/// <code>=</code>
/// <code>:sortkeyval</code> - true if the sort key value is equal to
/// <code>:sortkeyval</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>sortKeyName</code>
/// <code><</code>
/// <code>:sortkeyval</code> - true if the sort key value is less than
/// <code>:sortkeyval</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>sortKeyName</code>
/// <code><=</code>
/// <code>:sortkeyval</code> - true if the sort key value is less than or equal to
/// <code>:sortkeyval</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>sortKeyName</code>
/// <code>></code>
/// <code>:sortkeyval</code> - true if the sort key value is greater than
/// <code>:sortkeyval</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>sortKeyName</code>
/// <code>>= </code>
/// <code>:sortkeyval</code> - true if the sort key value is greater than or equal
/// to <code>:sortkeyval</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>sortKeyName</code>
/// <code>BETWEEN</code>
/// <code>:sortkeyval1</code>
/// <code>AND</code>
/// <code>:sortkeyval2</code> - true if the sort key value is greater than or equal
/// to <code>:sortkeyval1</code>, and less than or equal to
/// <code>:sortkeyval2</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>begins_with (</code>
/// <code>sortKeyName</code>, <code>:sortkeyval</code>
/// <code>)</code> - true if the sort key value begins with a particular operand.
/// (You cannot use this function with a sort key that is of type Number.) Note that
/// the function name <code>begins_with</code> is case-sensitive.</p>
///
/// </li>
/// </ul>
///
/// <p>Use the <code>ExpressionAttributeValues</code> parameter to replace tokens such as
/// <code>:partitionval</code> and <code>:sortval</code> with actual values at
/// runtime.</p>
///
/// <p>You can optionally use the <code>ExpressionAttributeNames</code> parameter to replace
/// the names of the partition key and sort key with placeholder tokens. This option might
/// be necessary if an attribute name conflicts with a DynamoDB reserved word. For example,
/// the following <code>KeyConditionExpression</code> parameter causes an error because
/// <i>Size</i> is a reserved word:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Size = :myval</code>
/// </p>
/// </li>
/// </ul>
/// <p>To work around this, define a placeholder (such a <code>#S</code>) to represent the
/// attribute name <i>Size</i>. <code>KeyConditionExpression</code> then is as
/// follows:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#S = :myval</code>
/// </p>
/// </li>
/// </ul>
/// <p>For a list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a>
/// in the <i>Amazon DynamoDB Developer Guide</i>.</p>
///
/// <p>For more information on <code>ExpressionAttributeNames</code> and
/// <code>ExpressionAttributeValues</code>, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html">Using
/// Placeholders for Attribute Names and Values</a> in the <i>Amazon DynamoDB
/// Developer Guide</i>.</p>
pub fn key_condition_expression(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.key_condition_expression(inp);
self
}
/// <p>The condition that specifies the key values for items to be retrieved by the
/// <code>Query</code> action.</p>
///
/// <p>The condition must perform an equality test on a single partition key value.</p>
/// <p>The condition can optionally perform one of several comparison tests on a single sort
/// key value. This allows <code>Query</code> to retrieve one item with a given partition
/// key value and sort key value, or several items that have the same partition key value
/// but different sort key values.</p>
///
/// <p>The partition key equality test is required, and must be specified in the following
/// format:</p>
///
/// <p>
/// <code>partitionKeyName</code>
/// <i>=</i>
/// <code>:partitionkeyval</code>
/// </p>
///
/// <p>If you also want to provide a condition for the sort key, it must be combined using
/// <code>AND</code> with the condition for the sort key. Following is an example, using
/// the <b>=</b> comparison operator for the sort key:</p>
///
/// <p>
/// <code>partitionKeyName</code>
/// <code>=</code>
/// <code>:partitionkeyval</code>
/// <code>AND</code>
/// <code>sortKeyName</code>
/// <code>=</code>
/// <code>:sortkeyval</code>
/// </p>
/// <p>Valid comparisons for the sort key condition are as follows:</p>
/// <ul>
/// <li>
/// <p>
/// <code>sortKeyName</code>
/// <code>=</code>
/// <code>:sortkeyval</code> - true if the sort key value is equal to
/// <code>:sortkeyval</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>sortKeyName</code>
/// <code><</code>
/// <code>:sortkeyval</code> - true if the sort key value is less than
/// <code>:sortkeyval</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>sortKeyName</code>
/// <code><=</code>
/// <code>:sortkeyval</code> - true if the sort key value is less than or equal to
/// <code>:sortkeyval</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>sortKeyName</code>
/// <code>></code>
/// <code>:sortkeyval</code> - true if the sort key value is greater than
/// <code>:sortkeyval</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>sortKeyName</code>
/// <code>>= </code>
/// <code>:sortkeyval</code> - true if the sort key value is greater than or equal
/// to <code>:sortkeyval</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>sortKeyName</code>
/// <code>BETWEEN</code>
/// <code>:sortkeyval1</code>
/// <code>AND</code>
/// <code>:sortkeyval2</code> - true if the sort key value is greater than or equal
/// to <code>:sortkeyval1</code>, and less than or equal to
/// <code>:sortkeyval2</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>begins_with (</code>
/// <code>sortKeyName</code>, <code>:sortkeyval</code>
/// <code>)</code> - true if the sort key value begins with a particular operand.
/// (You cannot use this function with a sort key that is of type Number.) Note that
/// the function name <code>begins_with</code> is case-sensitive.</p>
///
/// </li>
/// </ul>
///
/// <p>Use the <code>ExpressionAttributeValues</code> parameter to replace tokens such as
/// <code>:partitionval</code> and <code>:sortval</code> with actual values at
/// runtime.</p>
///
/// <p>You can optionally use the <code>ExpressionAttributeNames</code> parameter to replace
/// the names of the partition key and sort key with placeholder tokens. This option might
/// be necessary if an attribute name conflicts with a DynamoDB reserved word. For example,
/// the following <code>KeyConditionExpression</code> parameter causes an error because
/// <i>Size</i> is a reserved word:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Size = :myval</code>
/// </p>
/// </li>
/// </ul>
/// <p>To work around this, define a placeholder (such a <code>#S</code>) to represent the
/// attribute name <i>Size</i>. <code>KeyConditionExpression</code> then is as
/// follows:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#S = :myval</code>
/// </p>
/// </li>
/// </ul>
/// <p>For a list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a>
/// in the <i>Amazon DynamoDB Developer Guide</i>.</p>
///
/// <p>For more information on <code>ExpressionAttributeNames</code> and
/// <code>ExpressionAttributeValues</code>, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html">Using
/// Placeholders for Attribute Names and Values</a> in the <i>Amazon DynamoDB
/// Developer Guide</i>.</p>
pub fn set_key_condition_expression(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_key_condition_expression(input);
self
}
/// Adds a key-value pair to `ExpressionAttributeNames`.
///
/// To override the contents of this collection use [`set_expression_attribute_names`](Self::set_expression_attribute_names).
///
/// <p>One or more substitution tokens for attribute names in an expression. The following
/// are some use cases for using <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name in an
/// expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being misinterpreted
/// in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to dereference
/// an attribute name. For example, consider the following attribute name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be used
/// directly in an expression. (For the complete list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>). To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character are
/// <i>expression attribute values</i>, which are placeholders for the
/// actual value at runtime.</p>
/// </note>
/// <p>For more information on expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expression_attribute_names(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.expression_attribute_names(k, v);
self
}
/// <p>One or more substitution tokens for attribute names in an expression. The following
/// are some use cases for using <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name in an
/// expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being misinterpreted
/// in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to dereference
/// an attribute name. For example, consider the following attribute name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be used
/// directly in an expression. (For the complete list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>). To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character are
/// <i>expression attribute values</i>, which are placeholders for the
/// actual value at runtime.</p>
/// </note>
/// <p>For more information on expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expression_attribute_names(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_expression_attribute_names(input);
self
}
/// Adds a key-value pair to `ExpressionAttributeValues`.
///
/// To override the contents of this collection use [`set_expression_attribute_values`](Self::set_expression_attribute_values).
///
/// <p>One or more values that can be substituted in an expression.</p>
/// <p>Use the <b>:</b> (colon) character in an expression to
/// dereference an attribute value. For example, suppose that you wanted to check whether
/// the value of the <i>ProductStatus</i> attribute was one of the following: </p>
/// <p>
/// <code>Available | Backordered | Discontinued</code>
/// </p>
/// <p>You would first need to specify <code>ExpressionAttributeValues</code> as
/// follows:</p>
/// <p>
/// <code>{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
/// ":disc":{"S":"Discontinued"} }</code>
/// </p>
/// <p>You could then use these values in an expression, such as this:</p>
/// <p>
/// <code>ProductStatus IN (:avail, :back, :disc)</code>
/// </p>
/// <p>For more information on expression attribute values, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Specifying Conditions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expression_attribute_values(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::AttributeValue>,
) -> Self {
self.inner = self.inner.expression_attribute_values(k, v);
self
}
/// <p>One or more values that can be substituted in an expression.</p>
/// <p>Use the <b>:</b> (colon) character in an expression to
/// dereference an attribute value. For example, suppose that you wanted to check whether
/// the value of the <i>ProductStatus</i> attribute was one of the following: </p>
/// <p>
/// <code>Available | Backordered | Discontinued</code>
/// </p>
/// <p>You would first need to specify <code>ExpressionAttributeValues</code> as
/// follows:</p>
/// <p>
/// <code>{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
/// ":disc":{"S":"Discontinued"} }</code>
/// </p>
/// <p>You could then use these values in an expression, such as this:</p>
/// <p>
/// <code>ProductStatus IN (:avail, :back, :disc)</code>
/// </p>
/// <p>For more information on expression attribute values, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Specifying Conditions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expression_attribute_values(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
) -> Self {
self.inner = self.inner.set_expression_attribute_values(input);
self
}
}
/// Fluent builder constructing a request to `RestoreTableFromBackup`.
///
/// <p>Creates a new table from an existing backup. Any number of users can execute up to 4
/// concurrent restores (any type of restore) in a given account. </p>
/// <p>You can call <code>RestoreTableFromBackup</code> at a maximum rate of 10 times per
/// second.</p>
/// <p>You must manually set up the following on the restored table:</p>
/// <ul>
/// <li>
/// <p>Auto scaling policies</p>
/// </li>
/// <li>
/// <p>IAM policies</p>
/// </li>
/// <li>
/// <p>Amazon CloudWatch metrics and alarms</p>
/// </li>
/// <li>
/// <p>Tags</p>
/// </li>
/// <li>
/// <p>Stream settings</p>
/// </li>
/// <li>
/// <p>Time to Live (TTL) settings</p>
/// </li>
/// </ul>
#[derive(std::fmt::Debug)]
pub struct RestoreTableFromBackup<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::restore_table_from_backup_input::Builder,
}
impl<C, M, R> RestoreTableFromBackup<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `RestoreTableFromBackup`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::RestoreTableFromBackupOutput,
aws_smithy_http::result::SdkError<crate::error::RestoreTableFromBackupError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::RestoreTableFromBackupInputOperationOutputAlias,
crate::output::RestoreTableFromBackupOutput,
crate::error::RestoreTableFromBackupError,
crate::input::RestoreTableFromBackupInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the new table to which the backup must be restored.</p>
pub fn target_table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.target_table_name(inp);
self
}
/// <p>The name of the new table to which the backup must be restored.</p>
pub fn set_target_table_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_target_table_name(input);
self
}
/// <p>The Amazon Resource Name (ARN) associated with the backup.</p>
pub fn backup_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.backup_arn(inp);
self
}
/// <p>The Amazon Resource Name (ARN) associated with the backup.</p>
pub fn set_backup_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_backup_arn(input);
self
}
/// <p>The billing mode of the restored table.</p>
pub fn billing_mode_override(mut self, inp: crate::model::BillingMode) -> Self {
self.inner = self.inner.billing_mode_override(inp);
self
}
/// <p>The billing mode of the restored table.</p>
pub fn set_billing_mode_override(
mut self,
input: std::option::Option<crate::model::BillingMode>,
) -> Self {
self.inner = self.inner.set_billing_mode_override(input);
self
}
/// Appends an item to `GlobalSecondaryIndexOverride`.
///
/// To override the contents of this collection use [`set_global_secondary_index_override`](Self::set_global_secondary_index_override).
///
/// <p>List of global secondary indexes for the restored table. The indexes provided should
/// match existing secondary indexes. You can choose to exclude some or all of the indexes
/// at the time of restore.</p>
pub fn global_secondary_index_override(
mut self,
inp: impl Into<crate::model::GlobalSecondaryIndex>,
) -> Self {
self.inner = self.inner.global_secondary_index_override(inp);
self
}
/// <p>List of global secondary indexes for the restored table. The indexes provided should
/// match existing secondary indexes. You can choose to exclude some or all of the indexes
/// at the time of restore.</p>
pub fn set_global_secondary_index_override(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::GlobalSecondaryIndex>>,
) -> Self {
self.inner = self.inner.set_global_secondary_index_override(input);
self
}
/// Appends an item to `LocalSecondaryIndexOverride`.
///
/// To override the contents of this collection use [`set_local_secondary_index_override`](Self::set_local_secondary_index_override).
///
/// <p>List of local secondary indexes for the restored table. The indexes provided should
/// match existing secondary indexes. You can choose to exclude some or all of the indexes
/// at the time of restore.</p>
pub fn local_secondary_index_override(
mut self,
inp: impl Into<crate::model::LocalSecondaryIndex>,
) -> Self {
self.inner = self.inner.local_secondary_index_override(inp);
self
}
/// <p>List of local secondary indexes for the restored table. The indexes provided should
/// match existing secondary indexes. You can choose to exclude some or all of the indexes
/// at the time of restore.</p>
pub fn set_local_secondary_index_override(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::LocalSecondaryIndex>>,
) -> Self {
self.inner = self.inner.set_local_secondary_index_override(input);
self
}
/// <p>Provisioned throughput settings for the restored table.</p>
pub fn provisioned_throughput_override(
mut self,
inp: crate::model::ProvisionedThroughput,
) -> Self {
self.inner = self.inner.provisioned_throughput_override(inp);
self
}
/// <p>Provisioned throughput settings for the restored table.</p>
pub fn set_provisioned_throughput_override(
mut self,
input: std::option::Option<crate::model::ProvisionedThroughput>,
) -> Self {
self.inner = self.inner.set_provisioned_throughput_override(input);
self
}
/// <p>The new server-side encryption settings for the restored table.</p>
pub fn sse_specification_override(mut self, inp: crate::model::SseSpecification) -> Self {
self.inner = self.inner.sse_specification_override(inp);
self
}
/// <p>The new server-side encryption settings for the restored table.</p>
pub fn set_sse_specification_override(
mut self,
input: std::option::Option<crate::model::SseSpecification>,
) -> Self {
self.inner = self.inner.set_sse_specification_override(input);
self
}
}
/// Fluent builder constructing a request to `RestoreTableToPointInTime`.
///
/// <p>Restores the specified table to the specified point in time within
/// <code>EarliestRestorableDateTime</code> and <code>LatestRestorableDateTime</code>.
/// You can restore your table to any point in time during the last 35 days. Any number of
/// users can execute up to 4 concurrent restores (any type of restore) in a given account. </p>
/// <p> When you restore using point in time recovery, DynamoDB restores your table data to
/// the state based on the selected date and time (day:hour:minute:second) to a new table. </p>
/// <p> Along with data, the following are also included on the new restored table using
/// point in time recovery: </p>
/// <ul>
/// <li>
/// <p>Global secondary indexes (GSIs)</p>
/// </li>
/// <li>
/// <p>Local secondary indexes (LSIs)</p>
/// </li>
/// <li>
/// <p>Provisioned read and write capacity</p>
/// </li>
/// <li>
/// <p>Encryption settings</p>
/// <important>
/// <p> All these settings come from the current settings of the source table at
/// the time of restore. </p>
/// </important>
/// </li>
/// </ul>
///
/// <p>You must manually set up the following on the restored table:</p>
/// <ul>
/// <li>
/// <p>Auto scaling policies</p>
/// </li>
/// <li>
/// <p>IAM policies</p>
/// </li>
/// <li>
/// <p>Amazon CloudWatch metrics and alarms</p>
/// </li>
/// <li>
/// <p>Tags</p>
/// </li>
/// <li>
/// <p>Stream settings</p>
/// </li>
/// <li>
/// <p>Time to Live (TTL) settings</p>
/// </li>
/// <li>
/// <p>Point in time recovery settings</p>
/// </li>
/// </ul>
#[derive(std::fmt::Debug)]
pub struct RestoreTableToPointInTime<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::restore_table_to_point_in_time_input::Builder,
}
impl<C, M, R> RestoreTableToPointInTime<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `RestoreTableToPointInTime`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::RestoreTableToPointInTimeOutput,
aws_smithy_http::result::SdkError<crate::error::RestoreTableToPointInTimeError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::RestoreTableToPointInTimeInputOperationOutputAlias,
crate::output::RestoreTableToPointInTimeOutput,
crate::error::RestoreTableToPointInTimeError,
crate::input::RestoreTableToPointInTimeInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The DynamoDB table that will be restored. This value is an Amazon Resource Name
/// (ARN).</p>
pub fn source_table_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.source_table_arn(inp);
self
}
/// <p>The DynamoDB table that will be restored. This value is an Amazon Resource Name
/// (ARN).</p>
pub fn set_source_table_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_source_table_arn(input);
self
}
/// <p>Name of the source table that is being restored.</p>
pub fn source_table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.source_table_name(inp);
self
}
/// <p>Name of the source table that is being restored.</p>
pub fn set_source_table_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_source_table_name(input);
self
}
/// <p>The name of the new table to which it must be restored to.</p>
pub fn target_table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.target_table_name(inp);
self
}
/// <p>The name of the new table to which it must be restored to.</p>
pub fn set_target_table_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_target_table_name(input);
self
}
/// <p>Restore the table to the latest possible time. <code>LatestRestorableDateTime</code>
/// is typically 5 minutes before the current time. </p>
pub fn use_latest_restorable_time(mut self, inp: bool) -> Self {
self.inner = self.inner.use_latest_restorable_time(inp);
self
}
/// <p>Restore the table to the latest possible time. <code>LatestRestorableDateTime</code>
/// is typically 5 minutes before the current time. </p>
pub fn set_use_latest_restorable_time(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_use_latest_restorable_time(input);
self
}
/// <p>Time in the past to restore the table to.</p>
pub fn restore_date_time(mut self, inp: aws_smithy_types::DateTime) -> Self {
self.inner = self.inner.restore_date_time(inp);
self
}
/// <p>Time in the past to restore the table to.</p>
pub fn set_restore_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.inner = self.inner.set_restore_date_time(input);
self
}
/// <p>The billing mode of the restored table.</p>
pub fn billing_mode_override(mut self, inp: crate::model::BillingMode) -> Self {
self.inner = self.inner.billing_mode_override(inp);
self
}
/// <p>The billing mode of the restored table.</p>
pub fn set_billing_mode_override(
mut self,
input: std::option::Option<crate::model::BillingMode>,
) -> Self {
self.inner = self.inner.set_billing_mode_override(input);
self
}
/// Appends an item to `GlobalSecondaryIndexOverride`.
///
/// To override the contents of this collection use [`set_global_secondary_index_override`](Self::set_global_secondary_index_override).
///
/// <p>List of global secondary indexes for the restored table. The indexes provided should
/// match existing secondary indexes. You can choose to exclude some or all of the indexes
/// at the time of restore.</p>
pub fn global_secondary_index_override(
mut self,
inp: impl Into<crate::model::GlobalSecondaryIndex>,
) -> Self {
self.inner = self.inner.global_secondary_index_override(inp);
self
}
/// <p>List of global secondary indexes for the restored table. The indexes provided should
/// match existing secondary indexes. You can choose to exclude some or all of the indexes
/// at the time of restore.</p>
pub fn set_global_secondary_index_override(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::GlobalSecondaryIndex>>,
) -> Self {
self.inner = self.inner.set_global_secondary_index_override(input);
self
}
/// Appends an item to `LocalSecondaryIndexOverride`.
///
/// To override the contents of this collection use [`set_local_secondary_index_override`](Self::set_local_secondary_index_override).
///
/// <p>List of local secondary indexes for the restored table. The indexes provided should
/// match existing secondary indexes. You can choose to exclude some or all of the indexes
/// at the time of restore.</p>
pub fn local_secondary_index_override(
mut self,
inp: impl Into<crate::model::LocalSecondaryIndex>,
) -> Self {
self.inner = self.inner.local_secondary_index_override(inp);
self
}
/// <p>List of local secondary indexes for the restored table. The indexes provided should
/// match existing secondary indexes. You can choose to exclude some or all of the indexes
/// at the time of restore.</p>
pub fn set_local_secondary_index_override(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::LocalSecondaryIndex>>,
) -> Self {
self.inner = self.inner.set_local_secondary_index_override(input);
self
}
/// <p>Provisioned throughput settings for the restored table.</p>
pub fn provisioned_throughput_override(
mut self,
inp: crate::model::ProvisionedThroughput,
) -> Self {
self.inner = self.inner.provisioned_throughput_override(inp);
self
}
/// <p>Provisioned throughput settings for the restored table.</p>
pub fn set_provisioned_throughput_override(
mut self,
input: std::option::Option<crate::model::ProvisionedThroughput>,
) -> Self {
self.inner = self.inner.set_provisioned_throughput_override(input);
self
}
/// <p>The new server-side encryption settings for the restored table.</p>
pub fn sse_specification_override(mut self, inp: crate::model::SseSpecification) -> Self {
self.inner = self.inner.sse_specification_override(inp);
self
}
/// <p>The new server-side encryption settings for the restored table.</p>
pub fn set_sse_specification_override(
mut self,
input: std::option::Option<crate::model::SseSpecification>,
) -> Self {
self.inner = self.inner.set_sse_specification_override(input);
self
}
}
/// Fluent builder constructing a request to `Scan`.
///
/// <p>The <code>Scan</code> operation returns one or more items and item attributes by
/// accessing every item in a table or a secondary index. To have DynamoDB return fewer
/// items, you can provide a <code>FilterExpression</code> operation.</p>
/// <p>If the total number of scanned items exceeds the maximum dataset size limit of 1 MB,
/// the scan stops and results are returned to the user as a <code>LastEvaluatedKey</code>
/// value to continue the scan in a subsequent operation. The results also include the
/// number of items exceeding the limit. A scan can result in no table data meeting the
/// filter criteria. </p>
/// <p>A single <code>Scan</code> operation reads up to the maximum number of items set (if
/// using the <code>Limit</code> parameter) or a maximum of 1 MB of data and then apply any
/// filtering to the results using <code>FilterExpression</code>. If
/// <code>LastEvaluatedKey</code> is present in the response, you need to paginate the
/// result set. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination">Paginating the
/// Results</a> in the <i>Amazon DynamoDB Developer Guide</i>. </p>
/// <p>
/// <code>Scan</code> operations proceed sequentially; however, for faster performance on
/// a large table or secondary index, applications can request a parallel <code>Scan</code>
/// operation by providing the <code>Segment</code> and <code>TotalSegments</code>
/// parameters. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan">Parallel
/// Scan</a> in the <i>Amazon DynamoDB Developer Guide</i>.</p>
/// <p>
/// <code>Scan</code> uses eventually consistent reads when accessing the data in a table;
/// therefore, the result set might not include the changes to data in the table immediately
/// before the operation began. If you need a consistent copy of the data, as of the time
/// that the <code>Scan</code> begins, you can set the <code>ConsistentRead</code> parameter
/// to <code>true</code>.</p>
#[derive(std::fmt::Debug)]
pub struct Scan<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::scan_input::Builder,
}
impl<C, M, R> Scan<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `Scan`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ScanOutput,
aws_smithy_http::result::SdkError<crate::error::ScanError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ScanInputOperationOutputAlias,
crate::output::ScanOutput,
crate::error::ScanError,
crate::input::ScanInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table containing the requested items; or, if you provide
/// <code>IndexName</code>, the name of the table to which that index belongs.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table containing the requested items; or, if you provide
/// <code>IndexName</code>, the name of the table to which that index belongs.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// <p>The name of a secondary index to scan. This index can be any local secondary index or
/// global secondary index. Note that if you use the <code>IndexName</code> parameter, you
/// must also provide <code>TableName</code>.</p>
pub fn index_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.index_name(inp);
self
}
/// <p>The name of a secondary index to scan. This index can be any local secondary index or
/// global secondary index. Note that if you use the <code>IndexName</code> parameter, you
/// must also provide <code>TableName</code>.</p>
pub fn set_index_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_index_name(input);
self
}
/// Appends an item to `AttributesToGet`.
///
/// To override the contents of this collection use [`set_attributes_to_get`](Self::set_attributes_to_get).
///
/// <p>This is a legacy parameter. Use <code>ProjectionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html">AttributesToGet</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn attributes_to_get(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.attributes_to_get(inp);
self
}
/// <p>This is a legacy parameter. Use <code>ProjectionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html">AttributesToGet</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_attributes_to_get(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_attributes_to_get(input);
self
}
/// <p>The maximum number of items to evaluate (not necessarily the number of matching
/// items). If DynamoDB processes the number of items up to the limit while processing the
/// results, it stops the operation and returns the matching values up to that point, and a
/// key in <code>LastEvaluatedKey</code> to apply in a subsequent operation, so that you can
/// pick up where you left off. Also, if the processed dataset size exceeds 1 MB before
/// DynamoDB reaches this limit, it stops the operation and returns the matching values up
/// to the limit, and a key in <code>LastEvaluatedKey</code> to apply in a subsequent
/// operation to continue the operation. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html">Working with Queries</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn limit(mut self, inp: i32) -> Self {
self.inner = self.inner.limit(inp);
self
}
/// <p>The maximum number of items to evaluate (not necessarily the number of matching
/// items). If DynamoDB processes the number of items up to the limit while processing the
/// results, it stops the operation and returns the matching values up to that point, and a
/// key in <code>LastEvaluatedKey</code> to apply in a subsequent operation, so that you can
/// pick up where you left off. Also, if the processed dataset size exceeds 1 MB before
/// DynamoDB reaches this limit, it stops the operation and returns the matching values up
/// to the limit, and a key in <code>LastEvaluatedKey</code> to apply in a subsequent
/// operation to continue the operation. For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html">Working with Queries</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_limit(input);
self
}
/// <p>The attributes to be returned in the result. You can retrieve all item attributes,
/// specific item attributes, the count of matching items, or in the case of an index, some
/// or all of the attributes projected into the index.</p>
/// <ul>
/// <li>
/// <p>
/// <code>ALL_ATTRIBUTES</code> - Returns all of the item attributes from the
/// specified table or index. If you query a local secondary index, then for each
/// matching item in the index, DynamoDB fetches the entire item from the parent
/// table. If the index is configured to project all item attributes, then all of
/// the data can be obtained from the local secondary index, and no fetching is
/// required.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL_PROJECTED_ATTRIBUTES</code> - Allowed only when querying an index.
/// Retrieves all attributes that have been projected into the index. If the index
/// is configured to project all attributes, this return value is equivalent to
/// specifying <code>ALL_ATTRIBUTES</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>COUNT</code> - Returns the number of matching items, rather than the
/// matching items themselves.</p>
/// </li>
/// <li>
/// <p>
/// <code>SPECIFIC_ATTRIBUTES</code> - Returns only the attributes listed in
/// <code>AttributesToGet</code>. This return value is equivalent to specifying
/// <code>AttributesToGet</code> without specifying any value for
/// <code>Select</code>.</p>
/// <p>If you query or scan a local secondary index and request only attributes that
/// are projected into that index, the operation reads only the index and not the
/// table. If any of the requested attributes are not projected into the local
/// secondary index, DynamoDB fetches each of these attributes from the parent
/// table. This extra fetching incurs additional throughput cost and latency.</p>
/// <p>If you query or scan a global secondary index, you can only request attributes
/// that are projected into the index. Global secondary index queries cannot fetch
/// attributes from the parent table.</p>
/// </li>
/// </ul>
/// <p>If neither <code>Select</code> nor <code>AttributesToGet</code> are specified,
/// DynamoDB defaults to <code>ALL_ATTRIBUTES</code> when accessing a table, and
/// <code>ALL_PROJECTED_ATTRIBUTES</code> when accessing an index. You cannot use both
/// <code>Select</code> and <code>AttributesToGet</code> together in a single request,
/// unless the value for <code>Select</code> is <code>SPECIFIC_ATTRIBUTES</code>. (This
/// usage is equivalent to specifying <code>AttributesToGet</code> without any value for
/// <code>Select</code>.)</p>
/// <note>
/// <p>If you use the <code>ProjectionExpression</code> parameter, then the value for
/// <code>Select</code> can only be <code>SPECIFIC_ATTRIBUTES</code>. Any other
/// value for <code>Select</code> will return an error.</p>
/// </note>
pub fn select(mut self, inp: crate::model::Select) -> Self {
self.inner = self.inner.select(inp);
self
}
/// <p>The attributes to be returned in the result. You can retrieve all item attributes,
/// specific item attributes, the count of matching items, or in the case of an index, some
/// or all of the attributes projected into the index.</p>
/// <ul>
/// <li>
/// <p>
/// <code>ALL_ATTRIBUTES</code> - Returns all of the item attributes from the
/// specified table or index. If you query a local secondary index, then for each
/// matching item in the index, DynamoDB fetches the entire item from the parent
/// table. If the index is configured to project all item attributes, then all of
/// the data can be obtained from the local secondary index, and no fetching is
/// required.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL_PROJECTED_ATTRIBUTES</code> - Allowed only when querying an index.
/// Retrieves all attributes that have been projected into the index. If the index
/// is configured to project all attributes, this return value is equivalent to
/// specifying <code>ALL_ATTRIBUTES</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>COUNT</code> - Returns the number of matching items, rather than the
/// matching items themselves.</p>
/// </li>
/// <li>
/// <p>
/// <code>SPECIFIC_ATTRIBUTES</code> - Returns only the attributes listed in
/// <code>AttributesToGet</code>. This return value is equivalent to specifying
/// <code>AttributesToGet</code> without specifying any value for
/// <code>Select</code>.</p>
/// <p>If you query or scan a local secondary index and request only attributes that
/// are projected into that index, the operation reads only the index and not the
/// table. If any of the requested attributes are not projected into the local
/// secondary index, DynamoDB fetches each of these attributes from the parent
/// table. This extra fetching incurs additional throughput cost and latency.</p>
/// <p>If you query or scan a global secondary index, you can only request attributes
/// that are projected into the index. Global secondary index queries cannot fetch
/// attributes from the parent table.</p>
/// </li>
/// </ul>
/// <p>If neither <code>Select</code> nor <code>AttributesToGet</code> are specified,
/// DynamoDB defaults to <code>ALL_ATTRIBUTES</code> when accessing a table, and
/// <code>ALL_PROJECTED_ATTRIBUTES</code> when accessing an index. You cannot use both
/// <code>Select</code> and <code>AttributesToGet</code> together in a single request,
/// unless the value for <code>Select</code> is <code>SPECIFIC_ATTRIBUTES</code>. (This
/// usage is equivalent to specifying <code>AttributesToGet</code> without any value for
/// <code>Select</code>.)</p>
/// <note>
/// <p>If you use the <code>ProjectionExpression</code> parameter, then the value for
/// <code>Select</code> can only be <code>SPECIFIC_ATTRIBUTES</code>. Any other
/// value for <code>Select</code> will return an error.</p>
/// </note>
pub fn set_select(mut self, input: std::option::Option<crate::model::Select>) -> Self {
self.inner = self.inner.set_select(input);
self
}
/// Adds a key-value pair to `ScanFilter`.
///
/// To override the contents of this collection use [`set_scan_filter`](Self::set_scan_filter).
///
/// <p>This is a legacy parameter. Use <code>FilterExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html">ScanFilter</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn scan_filter(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::Condition>,
) -> Self {
self.inner = self.inner.scan_filter(k, v);
self
}
/// <p>This is a legacy parameter. Use <code>FilterExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html">ScanFilter</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_scan_filter(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::Condition>,
>,
) -> Self {
self.inner = self.inner.set_scan_filter(input);
self
}
/// <p>This is a legacy parameter. Use <code>FilterExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html">ConditionalOperator</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn conditional_operator(mut self, inp: crate::model::ConditionalOperator) -> Self {
self.inner = self.inner.conditional_operator(inp);
self
}
/// <p>This is a legacy parameter. Use <code>FilterExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html">ConditionalOperator</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_conditional_operator(
mut self,
input: std::option::Option<crate::model::ConditionalOperator>,
) -> Self {
self.inner = self.inner.set_conditional_operator(input);
self
}
/// Adds a key-value pair to `ExclusiveStartKey`.
///
/// To override the contents of this collection use [`set_exclusive_start_key`](Self::set_exclusive_start_key).
///
/// <p>The primary key of the first item that this operation will evaluate. Use the value
/// that was returned for <code>LastEvaluatedKey</code> in the previous operation.</p>
/// <p>The data type for <code>ExclusiveStartKey</code> must be String, Number or Binary. No
/// set data types are allowed.</p>
/// <p>In a parallel scan, a <code>Scan</code> request that includes
/// <code>ExclusiveStartKey</code> must specify the same segment whose previous
/// <code>Scan</code> returned the corresponding value of
/// <code>LastEvaluatedKey</code>.</p>
pub fn exclusive_start_key(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::AttributeValue>,
) -> Self {
self.inner = self.inner.exclusive_start_key(k, v);
self
}
/// <p>The primary key of the first item that this operation will evaluate. Use the value
/// that was returned for <code>LastEvaluatedKey</code> in the previous operation.</p>
/// <p>The data type for <code>ExclusiveStartKey</code> must be String, Number or Binary. No
/// set data types are allowed.</p>
/// <p>In a parallel scan, a <code>Scan</code> request that includes
/// <code>ExclusiveStartKey</code> must specify the same segment whose previous
/// <code>Scan</code> returned the corresponding value of
/// <code>LastEvaluatedKey</code>.</p>
pub fn set_exclusive_start_key(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
) -> Self {
self.inner = self.inner.set_exclusive_start_key(input);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn return_consumed_capacity(
mut self,
inp: crate::model::ReturnConsumedCapacity,
) -> Self {
self.inner = self.inner.return_consumed_capacity(inp);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn set_return_consumed_capacity(
mut self,
input: std::option::Option<crate::model::ReturnConsumedCapacity>,
) -> Self {
self.inner = self.inner.set_return_consumed_capacity(input);
self
}
/// <p>For a parallel <code>Scan</code> request, <code>TotalSegments</code> represents the
/// total number of segments into which the <code>Scan</code> operation will be divided. The
/// value of <code>TotalSegments</code> corresponds to the number of application workers
/// that will perform the parallel scan. For example, if you want to use four application
/// threads to scan a table or an index, specify a <code>TotalSegments</code> value of
/// 4.</p>
/// <p>The value for <code>TotalSegments</code> must be greater than or equal to 1, and less
/// than or equal to 1000000. If you specify a <code>TotalSegments</code> value of 1, the
/// <code>Scan</code> operation will be sequential rather than parallel.</p>
/// <p>If you specify <code>TotalSegments</code>, you must also specify
/// <code>Segment</code>.</p>
pub fn total_segments(mut self, inp: i32) -> Self {
self.inner = self.inner.total_segments(inp);
self
}
/// <p>For a parallel <code>Scan</code> request, <code>TotalSegments</code> represents the
/// total number of segments into which the <code>Scan</code> operation will be divided. The
/// value of <code>TotalSegments</code> corresponds to the number of application workers
/// that will perform the parallel scan. For example, if you want to use four application
/// threads to scan a table or an index, specify a <code>TotalSegments</code> value of
/// 4.</p>
/// <p>The value for <code>TotalSegments</code> must be greater than or equal to 1, and less
/// than or equal to 1000000. If you specify a <code>TotalSegments</code> value of 1, the
/// <code>Scan</code> operation will be sequential rather than parallel.</p>
/// <p>If you specify <code>TotalSegments</code>, you must also specify
/// <code>Segment</code>.</p>
pub fn set_total_segments(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_total_segments(input);
self
}
/// <p>For a parallel <code>Scan</code> request, <code>Segment</code> identifies an
/// individual segment to be scanned by an application worker.</p>
/// <p>Segment IDs are zero-based, so the first segment is always 0. For example, if you want
/// to use four application threads to scan a table or an index, then the first thread
/// specifies a <code>Segment</code> value of 0, the second thread specifies 1, and so
/// on.</p>
/// <p>The value of <code>LastEvaluatedKey</code> returned from a parallel <code>Scan</code>
/// request must be used as <code>ExclusiveStartKey</code> with the same segment ID in a
/// subsequent <code>Scan</code> operation.</p>
/// <p>The value for <code>Segment</code> must be greater than or equal to 0, and less than
/// the value provided for <code>TotalSegments</code>.</p>
/// <p>If you provide <code>Segment</code>, you must also provide
/// <code>TotalSegments</code>.</p>
pub fn segment(mut self, inp: i32) -> Self {
self.inner = self.inner.segment(inp);
self
}
/// <p>For a parallel <code>Scan</code> request, <code>Segment</code> identifies an
/// individual segment to be scanned by an application worker.</p>
/// <p>Segment IDs are zero-based, so the first segment is always 0. For example, if you want
/// to use four application threads to scan a table or an index, then the first thread
/// specifies a <code>Segment</code> value of 0, the second thread specifies 1, and so
/// on.</p>
/// <p>The value of <code>LastEvaluatedKey</code> returned from a parallel <code>Scan</code>
/// request must be used as <code>ExclusiveStartKey</code> with the same segment ID in a
/// subsequent <code>Scan</code> operation.</p>
/// <p>The value for <code>Segment</code> must be greater than or equal to 0, and less than
/// the value provided for <code>TotalSegments</code>.</p>
/// <p>If you provide <code>Segment</code>, you must also provide
/// <code>TotalSegments</code>.</p>
pub fn set_segment(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_segment(input);
self
}
/// <p>A string that identifies one or more attributes to retrieve from the specified table
/// or index. These attributes can include scalars, sets, or elements of a JSON document.
/// The attributes in the expression must be separated by commas.</p>
/// <p>If no attribute names are specified, then all attributes will be returned. If any of
/// the requested attributes are not found, they will not appear in the result.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn projection_expression(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.projection_expression(inp);
self
}
/// <p>A string that identifies one or more attributes to retrieve from the specified table
/// or index. These attributes can include scalars, sets, or elements of a JSON document.
/// The attributes in the expression must be separated by commas.</p>
/// <p>If no attribute names are specified, then all attributes will be returned. If any of
/// the requested attributes are not found, they will not appear in the result.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_projection_expression(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_projection_expression(input);
self
}
/// <p>A string that contains conditions that DynamoDB applies after the <code>Scan</code>
/// operation, but before the data is returned to you. Items that do not satisfy the
/// <code>FilterExpression</code> criteria are not returned.</p>
/// <note>
/// <p>A <code>FilterExpression</code> is applied after the items have already been read;
/// the process of filtering does not consume any additional read capacity units.</p>
/// </note>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults">Filter
/// Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn filter_expression(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_expression(inp);
self
}
/// <p>A string that contains conditions that DynamoDB applies after the <code>Scan</code>
/// operation, but before the data is returned to you. Items that do not satisfy the
/// <code>FilterExpression</code> criteria are not returned.</p>
/// <note>
/// <p>A <code>FilterExpression</code> is applied after the items have already been read;
/// the process of filtering does not consume any additional read capacity units.</p>
/// </note>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults">Filter
/// Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_filter_expression(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_filter_expression(input);
self
}
/// Adds a key-value pair to `ExpressionAttributeNames`.
///
/// To override the contents of this collection use [`set_expression_attribute_names`](Self::set_expression_attribute_names).
///
/// <p>One or more substitution tokens for attribute names in an expression. The following
/// are some use cases for using <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name in an
/// expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being misinterpreted
/// in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to dereference
/// an attribute name. For example, consider the following attribute name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be used
/// directly in an expression. (For the complete list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>). To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character are
/// <i>expression attribute values</i>, which are placeholders for the
/// actual value at runtime.</p>
/// </note>
/// <p>For more information on expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expression_attribute_names(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.expression_attribute_names(k, v);
self
}
/// <p>One or more substitution tokens for attribute names in an expression. The following
/// are some use cases for using <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name in an
/// expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being misinterpreted
/// in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to dereference
/// an attribute name. For example, consider the following attribute name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be used
/// directly in an expression. (For the complete list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>). To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character are
/// <i>expression attribute values</i>, which are placeholders for the
/// actual value at runtime.</p>
/// </note>
/// <p>For more information on expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expression_attribute_names(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_expression_attribute_names(input);
self
}
/// Adds a key-value pair to `ExpressionAttributeValues`.
///
/// To override the contents of this collection use [`set_expression_attribute_values`](Self::set_expression_attribute_values).
///
/// <p>One or more values that can be substituted in an expression.</p>
/// <p>Use the <b>:</b> (colon) character in an expression to
/// dereference an attribute value. For example, suppose that you wanted to check whether
/// the value of the <code>ProductStatus</code> attribute was one of the following: </p>
/// <p>
/// <code>Available | Backordered | Discontinued</code>
/// </p>
/// <p>You would first need to specify <code>ExpressionAttributeValues</code> as
/// follows:</p>
/// <p>
/// <code>{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
/// ":disc":{"S":"Discontinued"} }</code>
/// </p>
/// <p>You could then use these values in an expression, such as this:</p>
/// <p>
/// <code>ProductStatus IN (:avail, :back, :disc)</code>
/// </p>
/// <p>For more information on expression attribute values, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Condition Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expression_attribute_values(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::AttributeValue>,
) -> Self {
self.inner = self.inner.expression_attribute_values(k, v);
self
}
/// <p>One or more values that can be substituted in an expression.</p>
/// <p>Use the <b>:</b> (colon) character in an expression to
/// dereference an attribute value. For example, suppose that you wanted to check whether
/// the value of the <code>ProductStatus</code> attribute was one of the following: </p>
/// <p>
/// <code>Available | Backordered | Discontinued</code>
/// </p>
/// <p>You would first need to specify <code>ExpressionAttributeValues</code> as
/// follows:</p>
/// <p>
/// <code>{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
/// ":disc":{"S":"Discontinued"} }</code>
/// </p>
/// <p>You could then use these values in an expression, such as this:</p>
/// <p>
/// <code>ProductStatus IN (:avail, :back, :disc)</code>
/// </p>
/// <p>For more information on expression attribute values, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Condition Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expression_attribute_values(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
) -> Self {
self.inner = self.inner.set_expression_attribute_values(input);
self
}
/// <p>A Boolean value that determines the read consistency model during the scan:</p>
/// <ul>
/// <li>
/// <p>If <code>ConsistentRead</code> is <code>false</code>, then the data returned
/// from <code>Scan</code> might not contain the results from other recently
/// completed write operations (<code>PutItem</code>, <code>UpdateItem</code>, or
/// <code>DeleteItem</code>).</p>
/// </li>
/// <li>
/// <p>If <code>ConsistentRead</code> is <code>true</code>, then all of the write
/// operations that completed before the <code>Scan</code> began are guaranteed to
/// be contained in the <code>Scan</code> response.</p>
/// </li>
/// </ul>
/// <p>The default setting for <code>ConsistentRead</code> is <code>false</code>.</p>
/// <p>The <code>ConsistentRead</code> parameter is not supported on global secondary
/// indexes. If you scan a global secondary index with <code>ConsistentRead</code> set to
/// true, you will receive a <code>ValidationException</code>.</p>
pub fn consistent_read(mut self, inp: bool) -> Self {
self.inner = self.inner.consistent_read(inp);
self
}
/// <p>A Boolean value that determines the read consistency model during the scan:</p>
/// <ul>
/// <li>
/// <p>If <code>ConsistentRead</code> is <code>false</code>, then the data returned
/// from <code>Scan</code> might not contain the results from other recently
/// completed write operations (<code>PutItem</code>, <code>UpdateItem</code>, or
/// <code>DeleteItem</code>).</p>
/// </li>
/// <li>
/// <p>If <code>ConsistentRead</code> is <code>true</code>, then all of the write
/// operations that completed before the <code>Scan</code> began are guaranteed to
/// be contained in the <code>Scan</code> response.</p>
/// </li>
/// </ul>
/// <p>The default setting for <code>ConsistentRead</code> is <code>false</code>.</p>
/// <p>The <code>ConsistentRead</code> parameter is not supported on global secondary
/// indexes. If you scan a global secondary index with <code>ConsistentRead</code> set to
/// true, you will receive a <code>ValidationException</code>.</p>
pub fn set_consistent_read(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_consistent_read(input);
self
}
}
/// Fluent builder constructing a request to `TagResource`.
///
/// <p>Associate a set of tags with an Amazon DynamoDB resource. You can then activate these
/// user-defined tags so that they appear on the Billing and Cost Management console for
/// cost allocation tracking. You can call TagResource up to five times per second, per
/// account. </p>
/// <p>For an overview on tagging DynamoDB resources, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html">Tagging for DynamoDB</a>
/// in the <i>Amazon DynamoDB Developer Guide</i>.</p>
#[derive(std::fmt::Debug)]
pub struct TagResource<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::tag_resource_input::Builder,
}
impl<C, M, R> TagResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `TagResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::TagResourceOutput,
aws_smithy_http::result::SdkError<crate::error::TagResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::TagResourceInputOperationOutputAlias,
crate::output::TagResourceOutput,
crate::error::TagResourceError,
crate::input::TagResourceInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>Identifies the Amazon DynamoDB resource to which tags should be added. This value is
/// an Amazon Resource Name (ARN).</p>
pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(inp);
self
}
/// <p>Identifies the Amazon DynamoDB resource to which tags should be added. This value is
/// an Amazon Resource Name (ARN).</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// Appends an item to `Tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The tags to be assigned to the Amazon DynamoDB resource.</p>
pub fn tags(mut self, inp: impl Into<crate::model::Tag>) -> Self {
self.inner = self.inner.tags(inp);
self
}
/// <p>The tags to be assigned to the Amazon DynamoDB resource.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
/// Fluent builder constructing a request to `TransactGetItems`.
///
/// <p>
/// <code>TransactGetItems</code> is a synchronous operation that atomically retrieves
/// multiple items from one or more tables (but not from indexes) in a single account and
/// Region. A <code>TransactGetItems</code> call can contain up to 25
/// <code>TransactGetItem</code> objects, each of which contains a <code>Get</code>
/// structure that specifies an item to retrieve from a table in the account and Region. A
/// call to <code>TransactGetItems</code> cannot retrieve items from tables in more than one
/// Amazon Web Services account or Region. The aggregate size of the items in the
/// transaction cannot exceed 4 MB.</p>
/// <p>DynamoDB rejects the entire <code>TransactGetItems</code> request if any of
/// the following is true:</p>
/// <ul>
/// <li>
/// <p>A conflicting operation is in the process of updating an item to be
/// read.</p>
/// </li>
/// <li>
/// <p>There is insufficient provisioned capacity for the transaction to be
/// completed.</p>
/// </li>
/// <li>
/// <p>There is a user error, such as an invalid data format.</p>
/// </li>
/// <li>
/// <p>The aggregate size of the items in the transaction cannot exceed 4 MB.</p>
/// </li>
/// </ul>
#[derive(std::fmt::Debug)]
pub struct TransactGetItems<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::transact_get_items_input::Builder,
}
impl<C, M, R> TransactGetItems<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `TransactGetItems`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::TransactGetItemsOutput,
aws_smithy_http::result::SdkError<crate::error::TransactGetItemsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::TransactGetItemsInputOperationOutputAlias,
crate::output::TransactGetItemsOutput,
crate::error::TransactGetItemsError,
crate::input::TransactGetItemsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Appends an item to `TransactItems`.
///
/// To override the contents of this collection use [`set_transact_items`](Self::set_transact_items).
///
/// <p>An ordered array of up to 25 <code>TransactGetItem</code> objects, each of which
/// contains a <code>Get</code> structure.</p>
pub fn transact_items(mut self, inp: impl Into<crate::model::TransactGetItem>) -> Self {
self.inner = self.inner.transact_items(inp);
self
}
/// <p>An ordered array of up to 25 <code>TransactGetItem</code> objects, each of which
/// contains a <code>Get</code> structure.</p>
pub fn set_transact_items(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::TransactGetItem>>,
) -> Self {
self.inner = self.inner.set_transact_items(input);
self
}
/// <p>A value of <code>TOTAL</code> causes consumed capacity information to be returned, and
/// a value of <code>NONE</code> prevents that information from being returned. No other
/// value is valid.</p>
pub fn return_consumed_capacity(
mut self,
inp: crate::model::ReturnConsumedCapacity,
) -> Self {
self.inner = self.inner.return_consumed_capacity(inp);
self
}
/// <p>A value of <code>TOTAL</code> causes consumed capacity information to be returned, and
/// a value of <code>NONE</code> prevents that information from being returned. No other
/// value is valid.</p>
pub fn set_return_consumed_capacity(
mut self,
input: std::option::Option<crate::model::ReturnConsumedCapacity>,
) -> Self {
self.inner = self.inner.set_return_consumed_capacity(input);
self
}
}
/// Fluent builder constructing a request to `TransactWriteItems`.
///
/// <p>
/// <code>TransactWriteItems</code> is a synchronous write operation that groups up to 25
/// action requests. These actions can target items in different tables, but not in
/// different Amazon Web Services accounts or Regions, and no two actions can target the same
/// item. For example, you cannot both <code>ConditionCheck</code> and <code>Update</code>
/// the same item. The aggregate size of the items in the transaction cannot exceed 4
/// MB.</p>
///
/// <p>The actions are completed atomically so that either all of them succeed, or all of
/// them fail. They are defined by the following objects:</p>
///
/// <ul>
/// <li>
/// <p>
/// <code>Put</code> — Initiates a <code>PutItem</code>
/// operation to write a new item. This structure specifies the primary key of the
/// item to be written, the name of the table to write it in, an optional condition
/// expression that must be satisfied for the write to succeed, a list of the item's
/// attributes, and a field indicating whether to retrieve the item's attributes if
/// the condition is not met.</p>
/// </li>
/// <li>
/// <p>
/// <code>Update</code> — Initiates an <code>UpdateItem</code>
/// operation to update an existing item. This structure specifies the primary key
/// of the item to be updated, the name of the table where it resides, an optional
/// condition expression that must be satisfied for the update to succeed, an
/// expression that defines one or more attributes to be updated, and a field
/// indicating whether to retrieve the item's attributes if the condition is not
/// met.</p>
/// </li>
/// <li>
/// <p>
/// <code>Delete</code> — Initiates a <code>DeleteItem</code>
/// operation to delete an existing item. This structure specifies the primary key
/// of the item to be deleted, the name of the table where it resides, an optional
/// condition expression that must be satisfied for the deletion to succeed, and a
/// field indicating whether to retrieve the item's attributes if the condition is
/// not met.</p>
/// </li>
/// <li>
/// <p>
/// <code>ConditionCheck</code> — Applies a condition to an item
/// that is not being modified by the transaction. This structure specifies the
/// primary key of the item to be checked, the name of the table where it resides, a
/// condition expression that must be satisfied for the transaction to succeed, and
/// a field indicating whether to retrieve the item's attributes if the condition is
/// not met.</p>
/// </li>
/// </ul>
///
/// <p>DynamoDB rejects the entire <code>TransactWriteItems</code> request if any of the
/// following is true:</p>
/// <ul>
/// <li>
/// <p>A condition in one of the condition expressions is not met.</p>
/// </li>
/// <li>
/// <p>An ongoing operation is in the process of updating the same item.</p>
/// </li>
/// <li>
/// <p>There is insufficient provisioned capacity for the transaction to be
/// completed.</p>
/// </li>
/// <li>
/// <p>An item size becomes too large (bigger than 400 KB), a local secondary index
/// (LSI) becomes too large, or a similar validation error occurs because of changes
/// made by the transaction.</p>
/// </li>
/// <li>
/// <p>The aggregate size of the items in the transaction exceeds 4 MB.</p>
/// </li>
/// <li>
/// <p>There is a user error, such as an invalid data format.</p>
/// </li>
/// </ul>
#[derive(std::fmt::Debug)]
pub struct TransactWriteItems<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::transact_write_items_input::Builder,
}
impl<C, M, R> TransactWriteItems<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `TransactWriteItems`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::TransactWriteItemsOutput,
aws_smithy_http::result::SdkError<crate::error::TransactWriteItemsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::TransactWriteItemsInputOperationOutputAlias,
crate::output::TransactWriteItemsOutput,
crate::error::TransactWriteItemsError,
crate::input::TransactWriteItemsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Appends an item to `TransactItems`.
///
/// To override the contents of this collection use [`set_transact_items`](Self::set_transact_items).
///
/// <p>An ordered array of up to 25 <code>TransactWriteItem</code> objects, each of which
/// contains a <code>ConditionCheck</code>, <code>Put</code>, <code>Update</code>, or
/// <code>Delete</code> object. These can operate on items in different tables, but the
/// tables must reside in the same Amazon Web Services account and Region, and no two of them
/// can operate on the same item. </p>
pub fn transact_items(mut self, inp: impl Into<crate::model::TransactWriteItem>) -> Self {
self.inner = self.inner.transact_items(inp);
self
}
/// <p>An ordered array of up to 25 <code>TransactWriteItem</code> objects, each of which
/// contains a <code>ConditionCheck</code>, <code>Put</code>, <code>Update</code>, or
/// <code>Delete</code> object. These can operate on items in different tables, but the
/// tables must reside in the same Amazon Web Services account and Region, and no two of them
/// can operate on the same item. </p>
pub fn set_transact_items(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::TransactWriteItem>>,
) -> Self {
self.inner = self.inner.set_transact_items(input);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn return_consumed_capacity(
mut self,
inp: crate::model::ReturnConsumedCapacity,
) -> Self {
self.inner = self.inner.return_consumed_capacity(inp);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn set_return_consumed_capacity(
mut self,
input: std::option::Option<crate::model::ReturnConsumedCapacity>,
) -> Self {
self.inner = self.inner.set_return_consumed_capacity(input);
self
}
/// <p>Determines whether item collection metrics are returned. If set to <code>SIZE</code>,
/// the response includes statistics about item collections (if any), that were modified
/// during the operation and are returned in the response. If set to <code>NONE</code> (the
/// default), no statistics are returned. </p>
pub fn return_item_collection_metrics(
mut self,
inp: crate::model::ReturnItemCollectionMetrics,
) -> Self {
self.inner = self.inner.return_item_collection_metrics(inp);
self
}
/// <p>Determines whether item collection metrics are returned. If set to <code>SIZE</code>,
/// the response includes statistics about item collections (if any), that were modified
/// during the operation and are returned in the response. If set to <code>NONE</code> (the
/// default), no statistics are returned. </p>
pub fn set_return_item_collection_metrics(
mut self,
input: std::option::Option<crate::model::ReturnItemCollectionMetrics>,
) -> Self {
self.inner = self.inner.set_return_item_collection_metrics(input);
self
}
/// <p>Providing a <code>ClientRequestToken</code> makes the call to
/// <code>TransactWriteItems</code> idempotent, meaning that multiple identical calls
/// have the same effect as one single call.</p>
/// <p>Although multiple identical calls using the same client request token produce the same
/// result on the server (no side effects), the responses to the calls might not be the
/// same. If the <code>ReturnConsumedCapacity></code> parameter is set, then the initial
/// <code>TransactWriteItems</code> call returns the amount of write capacity units
/// consumed in making the changes. Subsequent <code>TransactWriteItems</code> calls with
/// the same client token return the number of read capacity units consumed in reading the
/// item.</p>
/// <p>A client request token is valid for 10 minutes after the first request that uses it is
/// completed. After 10 minutes, any request with the same client token is treated as a new
/// request. Do not resubmit the same request with the same client token for more than 10
/// minutes, or the result might not be idempotent.</p>
/// <p>If you submit a request with the same client token but a change in other parameters
/// within the 10-minute idempotency window, DynamoDB returns an
/// <code>IdempotentParameterMismatch</code> exception.</p>
pub fn client_request_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.client_request_token(inp);
self
}
/// <p>Providing a <code>ClientRequestToken</code> makes the call to
/// <code>TransactWriteItems</code> idempotent, meaning that multiple identical calls
/// have the same effect as one single call.</p>
/// <p>Although multiple identical calls using the same client request token produce the same
/// result on the server (no side effects), the responses to the calls might not be the
/// same. If the <code>ReturnConsumedCapacity></code> parameter is set, then the initial
/// <code>TransactWriteItems</code> call returns the amount of write capacity units
/// consumed in making the changes. Subsequent <code>TransactWriteItems</code> calls with
/// the same client token return the number of read capacity units consumed in reading the
/// item.</p>
/// <p>A client request token is valid for 10 minutes after the first request that uses it is
/// completed. After 10 minutes, any request with the same client token is treated as a new
/// request. Do not resubmit the same request with the same client token for more than 10
/// minutes, or the result might not be idempotent.</p>
/// <p>If you submit a request with the same client token but a change in other parameters
/// within the 10-minute idempotency window, DynamoDB returns an
/// <code>IdempotentParameterMismatch</code> exception.</p>
pub fn set_client_request_token(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_client_request_token(input);
self
}
}
/// Fluent builder constructing a request to `UntagResource`.
///
/// <p>Removes the association of tags from an Amazon DynamoDB resource. You can call
/// <code>UntagResource</code> up to five times per second, per account. </p>
/// <p>For an overview on tagging DynamoDB resources, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html">Tagging for DynamoDB</a>
/// in the <i>Amazon DynamoDB Developer Guide</i>.</p>
#[derive(std::fmt::Debug)]
pub struct UntagResource<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::untag_resource_input::Builder,
}
impl<C, M, R> UntagResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UntagResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UntagResourceOutput,
aws_smithy_http::result::SdkError<crate::error::UntagResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UntagResourceInputOperationOutputAlias,
crate::output::UntagResourceOutput,
crate::error::UntagResourceError,
crate::input::UntagResourceInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The DynamoDB resource that the tags will be removed from. This value is an Amazon
/// Resource Name (ARN).</p>
pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(inp);
self
}
/// <p>The DynamoDB resource that the tags will be removed from. This value is an Amazon
/// Resource Name (ARN).</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// Appends an item to `TagKeys`.
///
/// To override the contents of this collection use [`set_tag_keys`](Self::set_tag_keys).
///
/// <p>A list of tag keys. Existing tags of the resource whose keys are members of this list
/// will be removed from the DynamoDB resource.</p>
pub fn tag_keys(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.tag_keys(inp);
self
}
/// <p>A list of tag keys. Existing tags of the resource whose keys are members of this list
/// will be removed from the DynamoDB resource.</p>
pub fn set_tag_keys(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_tag_keys(input);
self
}
}
/// Fluent builder constructing a request to `UpdateContinuousBackups`.
///
/// <p>
/// <code>UpdateContinuousBackups</code> enables or disables point in time recovery for
/// the specified table. A successful <code>UpdateContinuousBackups</code> call returns the
/// current <code>ContinuousBackupsDescription</code>. Continuous backups are
/// <code>ENABLED</code> on all tables at table creation. If point in time recovery is
/// enabled, <code>PointInTimeRecoveryStatus</code> will be set to ENABLED.</p>
/// <p> Once continuous backups and point in time recovery are enabled, you can restore to
/// any point in time within <code>EarliestRestorableDateTime</code> and
/// <code>LatestRestorableDateTime</code>. </p>
/// <p>
/// <code>LatestRestorableDateTime</code> is typically 5 minutes before the current time.
/// You can restore your table to any point in time during the last 35 days. </p>
#[derive(std::fmt::Debug)]
pub struct UpdateContinuousBackups<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_continuous_backups_input::Builder,
}
impl<C, M, R> UpdateContinuousBackups<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateContinuousBackups`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateContinuousBackupsOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateContinuousBackupsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateContinuousBackupsInputOperationOutputAlias,
crate::output::UpdateContinuousBackupsOutput,
crate::error::UpdateContinuousBackupsError,
crate::input::UpdateContinuousBackupsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// <p>Represents the settings used to enable point in time recovery.</p>
pub fn point_in_time_recovery_specification(
mut self,
inp: crate::model::PointInTimeRecoverySpecification,
) -> Self {
self.inner = self.inner.point_in_time_recovery_specification(inp);
self
}
/// <p>Represents the settings used to enable point in time recovery.</p>
pub fn set_point_in_time_recovery_specification(
mut self,
input: std::option::Option<crate::model::PointInTimeRecoverySpecification>,
) -> Self {
self.inner = self.inner.set_point_in_time_recovery_specification(input);
self
}
}
/// Fluent builder constructing a request to `UpdateContributorInsights`.
///
/// <p>Updates the status for contributor insights for a specific table or index. CloudWatch
/// Contributor Insights for DynamoDB graphs display the partition key and (if applicable)
/// sort key of frequently accessed items and frequently throttled items in plaintext. If
/// you require the use of AWS Key Management Service (KMS) to encrypt this table’s
/// partition key and sort key data with an AWS managed key or customer managed key, you
/// should not enable CloudWatch Contributor Insights for DynamoDB for this table.</p>
#[derive(std::fmt::Debug)]
pub struct UpdateContributorInsights<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_contributor_insights_input::Builder,
}
impl<C, M, R> UpdateContributorInsights<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateContributorInsights`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateContributorInsightsOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateContributorInsightsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateContributorInsightsInputOperationOutputAlias,
crate::output::UpdateContributorInsightsOutput,
crate::error::UpdateContributorInsightsError,
crate::input::UpdateContributorInsightsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// <p>The global secondary index name, if applicable.</p>
pub fn index_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.index_name(inp);
self
}
/// <p>The global secondary index name, if applicable.</p>
pub fn set_index_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_index_name(input);
self
}
/// <p>Represents the contributor insights action.</p>
pub fn contributor_insights_action(
mut self,
inp: crate::model::ContributorInsightsAction,
) -> Self {
self.inner = self.inner.contributor_insights_action(inp);
self
}
/// <p>Represents the contributor insights action.</p>
pub fn set_contributor_insights_action(
mut self,
input: std::option::Option<crate::model::ContributorInsightsAction>,
) -> Self {
self.inner = self.inner.set_contributor_insights_action(input);
self
}
}
/// Fluent builder constructing a request to `UpdateGlobalTable`.
///
/// <p>Adds or removes replicas in the specified global table. The global table must already
/// exist to be able to use this operation. Any replica to be added must be empty, have the
/// same name as the global table, have the same key schema, have DynamoDB Streams enabled,
/// and have the same provisioned and maximum write capacity units.</p>
/// <note>
/// <p>Although you can use <code>UpdateGlobalTable</code> to add replicas and remove
/// replicas in a single request, for simplicity we recommend that you issue separate
/// requests for adding or removing replicas.</p>
/// </note>
/// <p> If global secondary indexes are specified, then the following conditions must also be
/// met: </p>
/// <ul>
/// <li>
/// <p> The global secondary indexes must have the same name. </p>
/// </li>
/// <li>
/// <p> The global secondary indexes must have the same hash key and sort key (if
/// present). </p>
/// </li>
/// <li>
/// <p> The global secondary indexes must have the same provisioned and maximum write
/// capacity units. </p>
/// </li>
/// </ul>
#[derive(std::fmt::Debug)]
pub struct UpdateGlobalTable<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_global_table_input::Builder,
}
impl<C, M, R> UpdateGlobalTable<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateGlobalTable`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateGlobalTableOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateGlobalTableError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateGlobalTableInputOperationOutputAlias,
crate::output::UpdateGlobalTableOutput,
crate::error::UpdateGlobalTableError,
crate::input::UpdateGlobalTableInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The global table name.</p>
pub fn global_table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.global_table_name(inp);
self
}
/// <p>The global table name.</p>
pub fn set_global_table_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_global_table_name(input);
self
}
/// Appends an item to `ReplicaUpdates`.
///
/// To override the contents of this collection use [`set_replica_updates`](Self::set_replica_updates).
///
/// <p>A list of Regions that should be added or removed from the global table.</p>
pub fn replica_updates(mut self, inp: impl Into<crate::model::ReplicaUpdate>) -> Self {
self.inner = self.inner.replica_updates(inp);
self
}
/// <p>A list of Regions that should be added or removed from the global table.</p>
pub fn set_replica_updates(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ReplicaUpdate>>,
) -> Self {
self.inner = self.inner.set_replica_updates(input);
self
}
}
/// Fluent builder constructing a request to `UpdateGlobalTableSettings`.
///
/// <p>Updates settings for a global table.</p>
#[derive(std::fmt::Debug)]
pub struct UpdateGlobalTableSettings<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_global_table_settings_input::Builder,
}
impl<C, M, R> UpdateGlobalTableSettings<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateGlobalTableSettings`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateGlobalTableSettingsOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateGlobalTableSettingsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateGlobalTableSettingsInputOperationOutputAlias,
crate::output::UpdateGlobalTableSettingsOutput,
crate::error::UpdateGlobalTableSettingsError,
crate::input::UpdateGlobalTableSettingsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the global table</p>
pub fn global_table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.global_table_name(inp);
self
}
/// <p>The name of the global table</p>
pub fn set_global_table_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_global_table_name(input);
self
}
/// <p>The billing mode of the global table. If <code>GlobalTableBillingMode</code> is not
/// specified, the global table defaults to <code>PROVISIONED</code> capacity billing
/// mode.</p>
/// <ul>
/// <li>
/// <p>
/// <code>PROVISIONED</code> - We recommend using <code>PROVISIONED</code> for
/// predictable workloads. <code>PROVISIONED</code> sets the billing mode to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual">Provisioned Mode</a>.</p>
/// </li>
/// <li>
/// <p>
/// <code>PAY_PER_REQUEST</code> - We recommend using <code>PAY_PER_REQUEST</code>
/// for unpredictable workloads. <code>PAY_PER_REQUEST</code> sets the billing mode
/// to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand">On-Demand Mode</a>. </p>
/// </li>
/// </ul>
pub fn global_table_billing_mode(mut self, inp: crate::model::BillingMode) -> Self {
self.inner = self.inner.global_table_billing_mode(inp);
self
}
/// <p>The billing mode of the global table. If <code>GlobalTableBillingMode</code> is not
/// specified, the global table defaults to <code>PROVISIONED</code> capacity billing
/// mode.</p>
/// <ul>
/// <li>
/// <p>
/// <code>PROVISIONED</code> - We recommend using <code>PROVISIONED</code> for
/// predictable workloads. <code>PROVISIONED</code> sets the billing mode to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual">Provisioned Mode</a>.</p>
/// </li>
/// <li>
/// <p>
/// <code>PAY_PER_REQUEST</code> - We recommend using <code>PAY_PER_REQUEST</code>
/// for unpredictable workloads. <code>PAY_PER_REQUEST</code> sets the billing mode
/// to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand">On-Demand Mode</a>. </p>
/// </li>
/// </ul>
pub fn set_global_table_billing_mode(
mut self,
input: std::option::Option<crate::model::BillingMode>,
) -> Self {
self.inner = self.inner.set_global_table_billing_mode(input);
self
}
/// <p>The maximum number of writes consumed per second before DynamoDB returns a
/// <code>ThrottlingException.</code>
/// </p>
pub fn global_table_provisioned_write_capacity_units(mut self, inp: i64) -> Self {
self.inner = self
.inner
.global_table_provisioned_write_capacity_units(inp);
self
}
/// <p>The maximum number of writes consumed per second before DynamoDB returns a
/// <code>ThrottlingException.</code>
/// </p>
pub fn set_global_table_provisioned_write_capacity_units(
mut self,
input: std::option::Option<i64>,
) -> Self {
self.inner = self
.inner
.set_global_table_provisioned_write_capacity_units(input);
self
}
/// <p>Auto scaling settings for managing provisioned write capacity for the global
/// table.</p>
pub fn global_table_provisioned_write_capacity_auto_scaling_settings_update(
mut self,
inp: crate::model::AutoScalingSettingsUpdate,
) -> Self {
self.inner = self
.inner
.global_table_provisioned_write_capacity_auto_scaling_settings_update(inp);
self
}
/// <p>Auto scaling settings for managing provisioned write capacity for the global
/// table.</p>
pub fn set_global_table_provisioned_write_capacity_auto_scaling_settings_update(
mut self,
input: std::option::Option<crate::model::AutoScalingSettingsUpdate>,
) -> Self {
self.inner = self
.inner
.set_global_table_provisioned_write_capacity_auto_scaling_settings_update(input);
self
}
/// Appends an item to `GlobalTableGlobalSecondaryIndexSettingsUpdate`.
///
/// To override the contents of this collection use [`set_global_table_global_secondary_index_settings_update`](Self::set_global_table_global_secondary_index_settings_update).
///
/// <p>Represents the settings of a global secondary index for a global table that will be
/// modified.</p>
pub fn global_table_global_secondary_index_settings_update(
mut self,
inp: impl Into<crate::model::GlobalTableGlobalSecondaryIndexSettingsUpdate>,
) -> Self {
self.inner = self
.inner
.global_table_global_secondary_index_settings_update(inp);
self
}
/// <p>Represents the settings of a global secondary index for a global table that will be
/// modified.</p>
pub fn set_global_table_global_secondary_index_settings_update(
mut self,
input: std::option::Option<
std::vec::Vec<crate::model::GlobalTableGlobalSecondaryIndexSettingsUpdate>,
>,
) -> Self {
self.inner = self
.inner
.set_global_table_global_secondary_index_settings_update(input);
self
}
/// Appends an item to `ReplicaSettingsUpdate`.
///
/// To override the contents of this collection use [`set_replica_settings_update`](Self::set_replica_settings_update).
///
/// <p>Represents the settings for a global table in a Region that will be modified.</p>
pub fn replica_settings_update(
mut self,
inp: impl Into<crate::model::ReplicaSettingsUpdate>,
) -> Self {
self.inner = self.inner.replica_settings_update(inp);
self
}
/// <p>Represents the settings for a global table in a Region that will be modified.</p>
pub fn set_replica_settings_update(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ReplicaSettingsUpdate>>,
) -> Self {
self.inner = self.inner.set_replica_settings_update(input);
self
}
}
/// Fluent builder constructing a request to `UpdateItem`.
///
/// <p>Edits an existing item's attributes, or adds a new item to the table if it does not
/// already exist. You can put, delete, or add attribute values. You can also perform a
/// conditional update on an existing item (insert a new attribute name-value pair if it
/// doesn't exist, or replace an existing name-value pair if it has certain expected
/// attribute values).</p>
/// <p>You can also return the item's attribute values in the same <code>UpdateItem</code>
/// operation using the <code>ReturnValues</code> parameter.</p>
#[derive(std::fmt::Debug)]
pub struct UpdateItem<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_item_input::Builder,
}
impl<C, M, R> UpdateItem<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateItem`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateItemOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateItemError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateItemInputOperationOutputAlias,
crate::output::UpdateItemOutput,
crate::error::UpdateItemError,
crate::input::UpdateItemInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table containing the item to update.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table containing the item to update.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// Adds a key-value pair to `Key`.
///
/// To override the contents of this collection use [`set_key`](Self::set_key).
///
/// <p>The primary key of the item to be updated. Each element consists of an attribute name
/// and a value for that attribute.</p>
/// <p>For the primary key, you must provide all of the attributes. For example, with a
/// simple primary key, you only need to provide a value for the partition key. For a
/// composite primary key, you must provide values for both the partition key and the sort
/// key.</p>
pub fn key(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::AttributeValue>,
) -> Self {
self.inner = self.inner.key(k, v);
self
}
/// <p>The primary key of the item to be updated. Each element consists of an attribute name
/// and a value for that attribute.</p>
/// <p>For the primary key, you must provide all of the attributes. For example, with a
/// simple primary key, you only need to provide a value for the partition key. For a
/// composite primary key, you must provide values for both the partition key and the sort
/// key.</p>
pub fn set_key(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
) -> Self {
self.inner = self.inner.set_key(input);
self
}
/// Adds a key-value pair to `AttributeUpdates`.
///
/// To override the contents of this collection use [`set_attribute_updates`](Self::set_attribute_updates).
///
/// <p>This is a legacy parameter. Use <code>UpdateExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html">AttributeUpdates</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn attribute_updates(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::AttributeValueUpdate>,
) -> Self {
self.inner = self.inner.attribute_updates(k, v);
self
}
/// <p>This is a legacy parameter. Use <code>UpdateExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html">AttributeUpdates</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_attribute_updates(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValueUpdate>,
>,
) -> Self {
self.inner = self.inner.set_attribute_updates(input);
self
}
/// Adds a key-value pair to `Expected`.
///
/// To override the contents of this collection use [`set_expected`](Self::set_expected).
///
/// <p>This is a legacy parameter. Use <code>ConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html">Expected</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expected(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::ExpectedAttributeValue>,
) -> Self {
self.inner = self.inner.expected(k, v);
self
}
/// <p>This is a legacy parameter. Use <code>ConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html">Expected</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expected(
mut self,
input: std::option::Option<
std::collections::HashMap<
std::string::String,
crate::model::ExpectedAttributeValue,
>,
>,
) -> Self {
self.inner = self.inner.set_expected(input);
self
}
/// <p>This is a legacy parameter. Use <code>ConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html">ConditionalOperator</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn conditional_operator(mut self, inp: crate::model::ConditionalOperator) -> Self {
self.inner = self.inner.conditional_operator(inp);
self
}
/// <p>This is a legacy parameter. Use <code>ConditionExpression</code> instead. For more
/// information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html">ConditionalOperator</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_conditional_operator(
mut self,
input: std::option::Option<crate::model::ConditionalOperator>,
) -> Self {
self.inner = self.inner.set_conditional_operator(input);
self
}
/// <p>Use <code>ReturnValues</code> if you want to get the item attributes as they appear
/// before or after they are updated. For <code>UpdateItem</code>, the valid values
/// are:</p>
/// <ul>
/// <li>
/// <p>
/// <code>NONE</code> - If <code>ReturnValues</code> is not specified, or if its
/// value is <code>NONE</code>, then nothing is returned. (This setting is the
/// default for <code>ReturnValues</code>.)</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL_OLD</code> - Returns all of the attributes of the item, as they
/// appeared before the UpdateItem operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>UPDATED_OLD</code> - Returns only the updated attributes, as they appeared
/// before the UpdateItem operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL_NEW</code> - Returns all of the attributes of the item, as they appear
/// after the UpdateItem operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>UPDATED_NEW</code> - Returns only the updated attributes, as they appear
/// after the UpdateItem operation.</p>
/// </li>
/// </ul>
/// <p>There is no additional cost associated with requesting a return value aside from the
/// small network and processing overhead of receiving a larger response. No read capacity
/// units are consumed.</p>
/// <p>The values returned are strongly consistent.</p>
pub fn return_values(mut self, inp: crate::model::ReturnValue) -> Self {
self.inner = self.inner.return_values(inp);
self
}
/// <p>Use <code>ReturnValues</code> if you want to get the item attributes as they appear
/// before or after they are updated. For <code>UpdateItem</code>, the valid values
/// are:</p>
/// <ul>
/// <li>
/// <p>
/// <code>NONE</code> - If <code>ReturnValues</code> is not specified, or if its
/// value is <code>NONE</code>, then nothing is returned. (This setting is the
/// default for <code>ReturnValues</code>.)</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL_OLD</code> - Returns all of the attributes of the item, as they
/// appeared before the UpdateItem operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>UPDATED_OLD</code> - Returns only the updated attributes, as they appeared
/// before the UpdateItem operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALL_NEW</code> - Returns all of the attributes of the item, as they appear
/// after the UpdateItem operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>UPDATED_NEW</code> - Returns only the updated attributes, as they appear
/// after the UpdateItem operation.</p>
/// </li>
/// </ul>
/// <p>There is no additional cost associated with requesting a return value aside from the
/// small network and processing overhead of receiving a larger response. No read capacity
/// units are consumed.</p>
/// <p>The values returned are strongly consistent.</p>
pub fn set_return_values(
mut self,
input: std::option::Option<crate::model::ReturnValue>,
) -> Self {
self.inner = self.inner.set_return_values(input);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn return_consumed_capacity(
mut self,
inp: crate::model::ReturnConsumedCapacity,
) -> Self {
self.inner = self.inner.return_consumed_capacity(inp);
self
}
/// <p>Determines the level of detail about provisioned throughput consumption that is
/// returned in the response:</p>
/// <ul>
/// <li>
/// <p>
/// <code>INDEXES</code> - The response includes the aggregate
/// <code>ConsumedCapacity</code> for the operation, together with
/// <code>ConsumedCapacity</code> for each table and secondary index that was
/// accessed.</p>
/// <p>Note that some operations, such as <code>GetItem</code> and
/// <code>BatchGetItem</code>, do not access any indexes at all. In these cases,
/// specifying <code>INDEXES</code> will only return <code>ConsumedCapacity</code>
/// information for table(s).</p>
/// </li>
/// <li>
/// <p>
/// <code>TOTAL</code> - The response includes only the aggregate
/// <code>ConsumedCapacity</code> for the operation.</p>
/// </li>
/// <li>
/// <p>
/// <code>NONE</code> - No <code>ConsumedCapacity</code> details are included in the
/// response.</p>
/// </li>
/// </ul>
pub fn set_return_consumed_capacity(
mut self,
input: std::option::Option<crate::model::ReturnConsumedCapacity>,
) -> Self {
self.inner = self.inner.set_return_consumed_capacity(input);
self
}
/// <p>Determines whether item collection metrics are returned. If set to <code>SIZE</code>,
/// the response includes statistics about item collections, if any, that were modified
/// during the operation are returned in the response. If set to <code>NONE</code> (the
/// default), no statistics are returned.</p>
pub fn return_item_collection_metrics(
mut self,
inp: crate::model::ReturnItemCollectionMetrics,
) -> Self {
self.inner = self.inner.return_item_collection_metrics(inp);
self
}
/// <p>Determines whether item collection metrics are returned. If set to <code>SIZE</code>,
/// the response includes statistics about item collections, if any, that were modified
/// during the operation are returned in the response. If set to <code>NONE</code> (the
/// default), no statistics are returned.</p>
pub fn set_return_item_collection_metrics(
mut self,
input: std::option::Option<crate::model::ReturnItemCollectionMetrics>,
) -> Self {
self.inner = self.inner.set_return_item_collection_metrics(input);
self
}
/// <p>An expression that defines one or more attributes to be updated, the action to be
/// performed on them, and new values for them.</p>
/// <p>The following action values are available for <code>UpdateExpression</code>.</p>
/// <ul>
/// <li>
/// <p>
/// <code>SET</code> - Adds one or more attributes and values to an item. If any of
/// these attributes already exist, they are replaced by the new values. You can
/// also use <code>SET</code> to add or subtract from an attribute that is of type
/// Number. For example: <code>SET myNum = myNum + :val</code>
/// </p>
/// <p>
/// <code>SET</code> supports the following functions:</p>
/// <ul>
/// <li>
/// <p>
/// <code>if_not_exists (path, operand)</code> - if the item does not
/// contain an attribute at the specified path, then
/// <code>if_not_exists</code> evaluates to operand; otherwise, it
/// evaluates to path. You can use this function to avoid overwriting an
/// attribute that may already be present in the item.</p>
/// </li>
/// <li>
/// <p>
/// <code>list_append (operand, operand)</code> - evaluates to a list with a
/// new element added to it. You can append the new element to the start or
/// the end of the list by reversing the order of the operands.</p>
/// </li>
/// </ul>
/// <p>These function names are case-sensitive.</p>
/// </li>
/// <li>
/// <p>
/// <code>REMOVE</code> - Removes one or more attributes from an item.</p>
/// </li>
/// <li>
/// <p>
/// <code>ADD</code> - Adds the specified value to the item, if the attribute does
/// not already exist. If the attribute does exist, then the behavior of
/// <code>ADD</code> depends on the data type of the attribute:</p>
/// <ul>
/// <li>
/// <p>If the existing attribute is a number, and if <code>Value</code> is
/// also a number, then <code>Value</code> is mathematically added to the
/// existing attribute. If <code>Value</code> is a negative number, then it
/// is subtracted from the existing attribute.</p>
/// <note>
/// <p>If you use <code>ADD</code> to increment or decrement a number
/// value for an item that doesn't exist before the update, DynamoDB
/// uses <code>0</code> as the initial value.</p>
/// <p>Similarly, if you use <code>ADD</code> for an existing item to
/// increment or decrement an attribute value that doesn't exist before
/// the update, DynamoDB uses <code>0</code> as the initial value. For
/// example, suppose that the item you want to update doesn't have an
/// attribute named <code>itemcount</code>, but you decide to
/// <code>ADD</code> the number <code>3</code> to this attribute
/// anyway. DynamoDB will create the <code>itemcount</code> attribute,
/// set its initial value to <code>0</code>, and finally add
/// <code>3</code> to it. The result will be a new
/// <code>itemcount</code> attribute in the item, with a value of
/// <code>3</code>.</p>
/// </note>
/// </li>
/// <li>
/// <p>If the existing data type is a set and if <code>Value</code> is also a
/// set, then <code>Value</code> is added to the existing set. For example,
/// if the attribute value is the set <code>[1,2]</code>, and the
/// <code>ADD</code> action specified <code>[3]</code>, then the final
/// attribute value is <code>[1,2,3]</code>. An error occurs if an
/// <code>ADD</code> action is specified for a set attribute and the
/// attribute type specified does not match the existing set type. </p>
/// <p>Both sets must have the same primitive data type. For example, if the
/// existing data type is a set of strings, the <code>Value</code> must also
/// be a set of strings.</p>
/// </li>
/// </ul>
/// <important>
/// <p>The <code>ADD</code> action only supports Number and set data types. In
/// addition, <code>ADD</code> can only be used on top-level attributes, not
/// nested attributes.</p>
/// </important>
/// </li>
/// <li>
/// <p>
/// <code>DELETE</code> - Deletes an element from a set.</p>
/// <p>If a set of values is specified, then those values are subtracted from the old
/// set. For example, if the attribute value was the set <code>[a,b,c]</code> and
/// the <code>DELETE</code> action specifies <code>[a,c]</code>, then the final
/// attribute value is <code>[b]</code>. Specifying an empty set is an error.</p>
/// <important>
/// <p>The <code>DELETE</code> action only supports set data types. In addition,
/// <code>DELETE</code> can only be used on top-level attributes, not nested
/// attributes.</p>
/// </important>
///
/// </li>
/// </ul>
/// <p>You can have many actions in a single expression, such as the following: <code>SET
/// a=:value1, b=:value2 DELETE :value3, :value4, :value5</code>
/// </p>
/// <p>For more information on update expressions, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html">Modifying
/// Items and Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn update_expression(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.update_expression(inp);
self
}
/// <p>An expression that defines one or more attributes to be updated, the action to be
/// performed on them, and new values for them.</p>
/// <p>The following action values are available for <code>UpdateExpression</code>.</p>
/// <ul>
/// <li>
/// <p>
/// <code>SET</code> - Adds one or more attributes and values to an item. If any of
/// these attributes already exist, they are replaced by the new values. You can
/// also use <code>SET</code> to add or subtract from an attribute that is of type
/// Number. For example: <code>SET myNum = myNum + :val</code>
/// </p>
/// <p>
/// <code>SET</code> supports the following functions:</p>
/// <ul>
/// <li>
/// <p>
/// <code>if_not_exists (path, operand)</code> - if the item does not
/// contain an attribute at the specified path, then
/// <code>if_not_exists</code> evaluates to operand; otherwise, it
/// evaluates to path. You can use this function to avoid overwriting an
/// attribute that may already be present in the item.</p>
/// </li>
/// <li>
/// <p>
/// <code>list_append (operand, operand)</code> - evaluates to a list with a
/// new element added to it. You can append the new element to the start or
/// the end of the list by reversing the order of the operands.</p>
/// </li>
/// </ul>
/// <p>These function names are case-sensitive.</p>
/// </li>
/// <li>
/// <p>
/// <code>REMOVE</code> - Removes one or more attributes from an item.</p>
/// </li>
/// <li>
/// <p>
/// <code>ADD</code> - Adds the specified value to the item, if the attribute does
/// not already exist. If the attribute does exist, then the behavior of
/// <code>ADD</code> depends on the data type of the attribute:</p>
/// <ul>
/// <li>
/// <p>If the existing attribute is a number, and if <code>Value</code> is
/// also a number, then <code>Value</code> is mathematically added to the
/// existing attribute. If <code>Value</code> is a negative number, then it
/// is subtracted from the existing attribute.</p>
/// <note>
/// <p>If you use <code>ADD</code> to increment or decrement a number
/// value for an item that doesn't exist before the update, DynamoDB
/// uses <code>0</code> as the initial value.</p>
/// <p>Similarly, if you use <code>ADD</code> for an existing item to
/// increment or decrement an attribute value that doesn't exist before
/// the update, DynamoDB uses <code>0</code> as the initial value. For
/// example, suppose that the item you want to update doesn't have an
/// attribute named <code>itemcount</code>, but you decide to
/// <code>ADD</code> the number <code>3</code> to this attribute
/// anyway. DynamoDB will create the <code>itemcount</code> attribute,
/// set its initial value to <code>0</code>, and finally add
/// <code>3</code> to it. The result will be a new
/// <code>itemcount</code> attribute in the item, with a value of
/// <code>3</code>.</p>
/// </note>
/// </li>
/// <li>
/// <p>If the existing data type is a set and if <code>Value</code> is also a
/// set, then <code>Value</code> is added to the existing set. For example,
/// if the attribute value is the set <code>[1,2]</code>, and the
/// <code>ADD</code> action specified <code>[3]</code>, then the final
/// attribute value is <code>[1,2,3]</code>. An error occurs if an
/// <code>ADD</code> action is specified for a set attribute and the
/// attribute type specified does not match the existing set type. </p>
/// <p>Both sets must have the same primitive data type. For example, if the
/// existing data type is a set of strings, the <code>Value</code> must also
/// be a set of strings.</p>
/// </li>
/// </ul>
/// <important>
/// <p>The <code>ADD</code> action only supports Number and set data types. In
/// addition, <code>ADD</code> can only be used on top-level attributes, not
/// nested attributes.</p>
/// </important>
/// </li>
/// <li>
/// <p>
/// <code>DELETE</code> - Deletes an element from a set.</p>
/// <p>If a set of values is specified, then those values are subtracted from the old
/// set. For example, if the attribute value was the set <code>[a,b,c]</code> and
/// the <code>DELETE</code> action specifies <code>[a,c]</code>, then the final
/// attribute value is <code>[b]</code>. Specifying an empty set is an error.</p>
/// <important>
/// <p>The <code>DELETE</code> action only supports set data types. In addition,
/// <code>DELETE</code> can only be used on top-level attributes, not nested
/// attributes.</p>
/// </important>
///
/// </li>
/// </ul>
/// <p>You can have many actions in a single expression, such as the following: <code>SET
/// a=:value1, b=:value2 DELETE :value3, :value4, :value5</code>
/// </p>
/// <p>For more information on update expressions, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html">Modifying
/// Items and Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_update_expression(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_update_expression(input);
self
}
/// <p>A condition that must be satisfied in order for a conditional update to
/// succeed.</p>
/// <p>An expression can contain any of the following:</p>
/// <ul>
/// <li>
/// <p>Functions: <code>attribute_exists | attribute_not_exists | attribute_type |
/// contains | begins_with | size</code>
/// </p>
/// <p>These function names are case-sensitive.</p>
/// </li>
/// <li>
/// <p>Comparison operators: <code>= | <> |
/// < | > | <= | >= |
/// BETWEEN | IN </code>
/// </p>
/// </li>
/// <li>
/// <p> Logical operators: <code>AND | OR | NOT</code>
/// </p>
/// </li>
/// </ul>
/// <p>For more information about condition expressions, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Specifying Conditions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn condition_expression(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.condition_expression(inp);
self
}
/// <p>A condition that must be satisfied in order for a conditional update to
/// succeed.</p>
/// <p>An expression can contain any of the following:</p>
/// <ul>
/// <li>
/// <p>Functions: <code>attribute_exists | attribute_not_exists | attribute_type |
/// contains | begins_with | size</code>
/// </p>
/// <p>These function names are case-sensitive.</p>
/// </li>
/// <li>
/// <p>Comparison operators: <code>= | <> |
/// < | > | <= | >= |
/// BETWEEN | IN </code>
/// </p>
/// </li>
/// <li>
/// <p> Logical operators: <code>AND | OR | NOT</code>
/// </p>
/// </li>
/// </ul>
/// <p>For more information about condition expressions, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Specifying Conditions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_condition_expression(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_condition_expression(input);
self
}
/// Adds a key-value pair to `ExpressionAttributeNames`.
///
/// To override the contents of this collection use [`set_expression_attribute_names`](Self::set_expression_attribute_names).
///
/// <p>One or more substitution tokens for attribute names in an expression. The following
/// are some use cases for using <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name in an
/// expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being misinterpreted
/// in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to dereference
/// an attribute name. For example, consider the following attribute name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be used
/// directly in an expression. (For the complete list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.) To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character are
/// <i>expression attribute values</i>, which are placeholders for the
/// actual value at runtime.</p>
/// </note>
/// <p>For more information about expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expression_attribute_names(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.expression_attribute_names(k, v);
self
}
/// <p>One or more substitution tokens for attribute names in an expression. The following
/// are some use cases for using <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>To access an attribute whose name conflicts with a DynamoDB reserved
/// word.</p>
/// </li>
/// <li>
/// <p>To create a placeholder for repeating occurrences of an attribute name in an
/// expression.</p>
/// </li>
/// <li>
/// <p>To prevent special characters in an attribute name from being misinterpreted
/// in an expression.</p>
/// </li>
/// </ul>
/// <p>Use the <b>#</b> character in an expression to dereference
/// an attribute name. For example, consider the following attribute name:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Percentile</code>
/// </p>
/// </li>
/// </ul>
/// <p>The name of this attribute conflicts with a reserved word, so it cannot be used
/// directly in an expression. (For the complete list of reserved words, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html">Reserved Words</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.) To work around this, you could specify the following for
/// <code>ExpressionAttributeNames</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>{"#P":"Percentile"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>You could then use this substitution in an expression, as in this example:</p>
/// <ul>
/// <li>
/// <p>
/// <code>#P = :val</code>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Tokens that begin with the <b>:</b> character are
/// <i>expression attribute values</i>, which are placeholders for the
/// actual value at runtime.</p>
/// </note>
/// <p>For more information about expression attribute names, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html">Specifying Item Attributes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expression_attribute_names(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_expression_attribute_names(input);
self
}
/// Adds a key-value pair to `ExpressionAttributeValues`.
///
/// To override the contents of this collection use [`set_expression_attribute_values`](Self::set_expression_attribute_values).
///
/// <p>One or more values that can be substituted in an expression.</p>
/// <p>Use the <b>:</b> (colon) character in an expression to
/// dereference an attribute value. For example, suppose that you wanted to check whether
/// the value of the <code>ProductStatus</code> attribute was one of the following: </p>
/// <p>
/// <code>Available | Backordered | Discontinued</code>
/// </p>
/// <p>You would first need to specify <code>ExpressionAttributeValues</code> as
/// follows:</p>
/// <p>
/// <code>{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
/// ":disc":{"S":"Discontinued"} }</code>
/// </p>
/// <p>You could then use these values in an expression, such as this:</p>
/// <p>
/// <code>ProductStatus IN (:avail, :back, :disc)</code>
/// </p>
/// <p>For more information on expression attribute values, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Condition Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn expression_attribute_values(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::AttributeValue>,
) -> Self {
self.inner = self.inner.expression_attribute_values(k, v);
self
}
/// <p>One or more values that can be substituted in an expression.</p>
/// <p>Use the <b>:</b> (colon) character in an expression to
/// dereference an attribute value. For example, suppose that you wanted to check whether
/// the value of the <code>ProductStatus</code> attribute was one of the following: </p>
/// <p>
/// <code>Available | Backordered | Discontinued</code>
/// </p>
/// <p>You would first need to specify <code>ExpressionAttributeValues</code> as
/// follows:</p>
/// <p>
/// <code>{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
/// ":disc":{"S":"Discontinued"} }</code>
/// </p>
/// <p>You could then use these values in an expression, such as this:</p>
/// <p>
/// <code>ProductStatus IN (:avail, :back, :disc)</code>
/// </p>
/// <p>For more information on expression attribute values, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html">Condition Expressions</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>.</p>
pub fn set_expression_attribute_values(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
) -> Self {
self.inner = self.inner.set_expression_attribute_values(input);
self
}
}
/// Fluent builder constructing a request to `UpdateTable`.
///
/// <p>Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB
/// Streams settings for a given table.</p>
/// <p>You can only perform one of the following operations at once:</p>
/// <ul>
/// <li>
/// <p>Modify the provisioned throughput settings of the table.</p>
/// </li>
/// <li>
/// <p>Enable or disable DynamoDB Streams on the table.</p>
/// </li>
/// <li>
/// <p>Remove a global secondary index from the table.</p>
/// </li>
/// <li>
/// <p>Create a new global secondary index on the table. After the index begins
/// backfilling, you can use <code>UpdateTable</code> to perform other
/// operations.</p>
/// </li>
/// </ul>
/// <p>
/// <code>UpdateTable</code> is an asynchronous operation; while it is executing, the table
/// status changes from <code>ACTIVE</code> to <code>UPDATING</code>. While it is
/// <code>UPDATING</code>, you cannot issue another <code>UpdateTable</code> request.
/// When the table returns to the <code>ACTIVE</code> state, the <code>UpdateTable</code>
/// operation is complete.</p>
#[derive(std::fmt::Debug)]
pub struct UpdateTable<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_table_input::Builder,
}
impl<C, M, R> UpdateTable<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateTable`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateTableOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateTableError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateTableInputOperationOutputAlias,
crate::output::UpdateTableOutput,
crate::error::UpdateTableError,
crate::input::UpdateTableInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Appends an item to `AttributeDefinitions`.
///
/// To override the contents of this collection use [`set_attribute_definitions`](Self::set_attribute_definitions).
///
/// <p>An array of attributes that describe the key schema for the table and indexes. If you
/// are adding a new global secondary index to the table, <code>AttributeDefinitions</code>
/// must include the key element(s) of the new index.</p>
pub fn attribute_definitions(
mut self,
inp: impl Into<crate::model::AttributeDefinition>,
) -> Self {
self.inner = self.inner.attribute_definitions(inp);
self
}
/// <p>An array of attributes that describe the key schema for the table and indexes. If you
/// are adding a new global secondary index to the table, <code>AttributeDefinitions</code>
/// must include the key element(s) of the new index.</p>
pub fn set_attribute_definitions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AttributeDefinition>>,
) -> Self {
self.inner = self.inner.set_attribute_definitions(input);
self
}
/// <p>The name of the table to be updated.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table to be updated.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// <p>Controls how you are charged for read and write throughput and how you manage
/// capacity. When switching from pay-per-request to provisioned capacity, initial
/// provisioned capacity values must be set. The initial provisioned capacity values are
/// estimated based on the consumed read and write capacity of your table and global
/// secondary indexes over the past 30 minutes.</p>
/// <ul>
/// <li>
/// <p>
/// <code>PROVISIONED</code> - We recommend using <code>PROVISIONED</code> for
/// predictable workloads. <code>PROVISIONED</code> sets the billing mode to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual">Provisioned Mode</a>.</p>
/// </li>
/// <li>
/// <p>
/// <code>PAY_PER_REQUEST</code> - We recommend using <code>PAY_PER_REQUEST</code>
/// for unpredictable workloads. <code>PAY_PER_REQUEST</code> sets the billing mode
/// to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand">On-Demand Mode</a>. </p>
/// </li>
/// </ul>
pub fn billing_mode(mut self, inp: crate::model::BillingMode) -> Self {
self.inner = self.inner.billing_mode(inp);
self
}
/// <p>Controls how you are charged for read and write throughput and how you manage
/// capacity. When switching from pay-per-request to provisioned capacity, initial
/// provisioned capacity values must be set. The initial provisioned capacity values are
/// estimated based on the consumed read and write capacity of your table and global
/// secondary indexes over the past 30 minutes.</p>
/// <ul>
/// <li>
/// <p>
/// <code>PROVISIONED</code> - We recommend using <code>PROVISIONED</code> for
/// predictable workloads. <code>PROVISIONED</code> sets the billing mode to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual">Provisioned Mode</a>.</p>
/// </li>
/// <li>
/// <p>
/// <code>PAY_PER_REQUEST</code> - We recommend using <code>PAY_PER_REQUEST</code>
/// for unpredictable workloads. <code>PAY_PER_REQUEST</code> sets the billing mode
/// to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand">On-Demand Mode</a>. </p>
/// </li>
/// </ul>
pub fn set_billing_mode(
mut self,
input: std::option::Option<crate::model::BillingMode>,
) -> Self {
self.inner = self.inner.set_billing_mode(input);
self
}
/// <p>The new provisioned throughput settings for the specified table or index.</p>
pub fn provisioned_throughput(mut self, inp: crate::model::ProvisionedThroughput) -> Self {
self.inner = self.inner.provisioned_throughput(inp);
self
}
/// <p>The new provisioned throughput settings for the specified table or index.</p>
pub fn set_provisioned_throughput(
mut self,
input: std::option::Option<crate::model::ProvisionedThroughput>,
) -> Self {
self.inner = self.inner.set_provisioned_throughput(input);
self
}
/// Appends an item to `GlobalSecondaryIndexUpdates`.
///
/// To override the contents of this collection use [`set_global_secondary_index_updates`](Self::set_global_secondary_index_updates).
///
/// <p>An array of one or more global secondary indexes for the table. For each index in the
/// array, you can request one action:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Create</code> - add a new global secondary index to the table.</p>
/// </li>
/// <li>
/// <p>
/// <code>Update</code> - modify the provisioned throughput settings of an existing
/// global secondary index.</p>
/// </li>
/// <li>
/// <p>
/// <code>Delete</code> - remove a global secondary index from the table.</p>
/// </li>
/// </ul>
/// <p>You can create or delete only one global secondary index per <code>UpdateTable</code>
/// operation.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html">Managing Global
/// Secondary Indexes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>. </p>
pub fn global_secondary_index_updates(
mut self,
inp: impl Into<crate::model::GlobalSecondaryIndexUpdate>,
) -> Self {
self.inner = self.inner.global_secondary_index_updates(inp);
self
}
/// <p>An array of one or more global secondary indexes for the table. For each index in the
/// array, you can request one action:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Create</code> - add a new global secondary index to the table.</p>
/// </li>
/// <li>
/// <p>
/// <code>Update</code> - modify the provisioned throughput settings of an existing
/// global secondary index.</p>
/// </li>
/// <li>
/// <p>
/// <code>Delete</code> - remove a global secondary index from the table.</p>
/// </li>
/// </ul>
/// <p>You can create or delete only one global secondary index per <code>UpdateTable</code>
/// operation.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html">Managing Global
/// Secondary Indexes</a> in the <i>Amazon DynamoDB Developer
/// Guide</i>. </p>
pub fn set_global_secondary_index_updates(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::GlobalSecondaryIndexUpdate>>,
) -> Self {
self.inner = self.inner.set_global_secondary_index_updates(input);
self
}
/// <p>Represents the DynamoDB Streams configuration for the table.</p>
/// <note>
/// <p>You receive a <code>ResourceInUseException</code> if you try to enable a stream on
/// a table that already has a stream, or if you try to disable a stream on a table that
/// doesn't have a stream.</p>
/// </note>
pub fn stream_specification(mut self, inp: crate::model::StreamSpecification) -> Self {
self.inner = self.inner.stream_specification(inp);
self
}
/// <p>Represents the DynamoDB Streams configuration for the table.</p>
/// <note>
/// <p>You receive a <code>ResourceInUseException</code> if you try to enable a stream on
/// a table that already has a stream, or if you try to disable a stream on a table that
/// doesn't have a stream.</p>
/// </note>
pub fn set_stream_specification(
mut self,
input: std::option::Option<crate::model::StreamSpecification>,
) -> Self {
self.inner = self.inner.set_stream_specification(input);
self
}
/// <p>The new server-side encryption settings for the specified table.</p>
pub fn sse_specification(mut self, inp: crate::model::SseSpecification) -> Self {
self.inner = self.inner.sse_specification(inp);
self
}
/// <p>The new server-side encryption settings for the specified table.</p>
pub fn set_sse_specification(
mut self,
input: std::option::Option<crate::model::SseSpecification>,
) -> Self {
self.inner = self.inner.set_sse_specification(input);
self
}
/// Appends an item to `ReplicaUpdates`.
///
/// To override the contents of this collection use [`set_replica_updates`](Self::set_replica_updates).
///
/// <p>A list of replica update actions (create, delete, or update) for the table.</p>
/// <note>
/// <p>This property only applies to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html">Version
/// 2019.11.21</a> of global tables.</p>
/// </note>
pub fn replica_updates(
mut self,
inp: impl Into<crate::model::ReplicationGroupUpdate>,
) -> Self {
self.inner = self.inner.replica_updates(inp);
self
}
/// <p>A list of replica update actions (create, delete, or update) for the table.</p>
/// <note>
/// <p>This property only applies to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html">Version
/// 2019.11.21</a> of global tables.</p>
/// </note>
pub fn set_replica_updates(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ReplicationGroupUpdate>>,
) -> Self {
self.inner = self.inner.set_replica_updates(input);
self
}
}
/// Fluent builder constructing a request to `UpdateTableReplicaAutoScaling`.
///
/// <p>Updates auto scaling settings on your global tables at once.</p>
/// <note>
/// <p>This operation only applies to <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html">Version
/// 2019.11.21</a> of global tables.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct UpdateTableReplicaAutoScaling<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_table_replica_auto_scaling_input::Builder,
}
impl<C, M, R> UpdateTableReplicaAutoScaling<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateTableReplicaAutoScaling`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateTableReplicaAutoScalingOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateTableReplicaAutoScalingError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateTableReplicaAutoScalingInputOperationOutputAlias,
crate::output::UpdateTableReplicaAutoScalingOutput,
crate::error::UpdateTableReplicaAutoScalingError,
crate::input::UpdateTableReplicaAutoScalingInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Appends an item to `GlobalSecondaryIndexUpdates`.
///
/// To override the contents of this collection use [`set_global_secondary_index_updates`](Self::set_global_secondary_index_updates).
///
/// <p>Represents the auto scaling settings of the global secondary indexes of the replica to
/// be updated.</p>
pub fn global_secondary_index_updates(
mut self,
inp: impl Into<crate::model::GlobalSecondaryIndexAutoScalingUpdate>,
) -> Self {
self.inner = self.inner.global_secondary_index_updates(inp);
self
}
/// <p>Represents the auto scaling settings of the global secondary indexes of the replica to
/// be updated.</p>
pub fn set_global_secondary_index_updates(
mut self,
input: std::option::Option<
std::vec::Vec<crate::model::GlobalSecondaryIndexAutoScalingUpdate>,
>,
) -> Self {
self.inner = self.inner.set_global_secondary_index_updates(input);
self
}
/// <p>The name of the global table to be updated.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the global table to be updated.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// <p>Represents the auto scaling settings to be modified for a global table or global
/// secondary index.</p>
pub fn provisioned_write_capacity_auto_scaling_update(
mut self,
inp: crate::model::AutoScalingSettingsUpdate,
) -> Self {
self.inner = self
.inner
.provisioned_write_capacity_auto_scaling_update(inp);
self
}
/// <p>Represents the auto scaling settings to be modified for a global table or global
/// secondary index.</p>
pub fn set_provisioned_write_capacity_auto_scaling_update(
mut self,
input: std::option::Option<crate::model::AutoScalingSettingsUpdate>,
) -> Self {
self.inner = self
.inner
.set_provisioned_write_capacity_auto_scaling_update(input);
self
}
/// Appends an item to `ReplicaUpdates`.
///
/// To override the contents of this collection use [`set_replica_updates`](Self::set_replica_updates).
///
/// <p>Represents the auto scaling settings of replicas of the table that will be
/// modified.</p>
pub fn replica_updates(
mut self,
inp: impl Into<crate::model::ReplicaAutoScalingUpdate>,
) -> Self {
self.inner = self.inner.replica_updates(inp);
self
}
/// <p>Represents the auto scaling settings of replicas of the table that will be
/// modified.</p>
pub fn set_replica_updates(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ReplicaAutoScalingUpdate>>,
) -> Self {
self.inner = self.inner.set_replica_updates(input);
self
}
}
/// Fluent builder constructing a request to `UpdateTimeToLive`.
///
/// <p>The <code>UpdateTimeToLive</code> method enables or disables Time to Live (TTL) for
/// the specified table. A successful <code>UpdateTimeToLive</code> call returns the current
/// <code>TimeToLiveSpecification</code>. It can take up to one hour for the change to
/// fully process. Any additional <code>UpdateTimeToLive</code> calls for the same table
/// during this one hour duration result in a <code>ValidationException</code>. </p>
/// <p>TTL compares the current time in epoch time format to the time stored in the TTL
/// attribute of an item. If the epoch time value stored in the attribute is less than the
/// current time, the item is marked as expired and subsequently deleted.</p>
/// <note>
/// <p> The epoch time format is the number of seconds elapsed since 12:00:00 AM January
/// 1, 1970 UTC. </p>
/// </note>
/// <p>DynamoDB deletes expired items on a best-effort basis to ensure availability of
/// throughput for other data operations. </p>
/// <important>
/// <p>DynamoDB typically deletes expired items within two days of expiration. The exact
/// duration within which an item gets deleted after expiration is specific to the
/// nature of the workload. Items that have expired and not been deleted will still show
/// up in reads, queries, and scans.</p>
/// </important>
/// <p>As items are deleted, they are removed from any local secondary index and global
/// secondary index immediately in the same eventually consistent way as a standard delete
/// operation.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html">Time To Live</a> in the
/// Amazon DynamoDB Developer Guide. </p>
#[derive(std::fmt::Debug)]
pub struct UpdateTimeToLive<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_time_to_live_input::Builder,
}
impl<C, M, R> UpdateTimeToLive<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateTimeToLive`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateTimeToLiveOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateTimeToLiveError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateTimeToLiveInputOperationOutputAlias,
crate::output::UpdateTimeToLiveOutput,
crate::error::UpdateTimeToLiveError,
crate::input::UpdateTimeToLiveInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the table to be configured.</p>
pub fn table_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.table_name(inp);
self
}
/// <p>The name of the table to be configured.</p>
pub fn set_table_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_table_name(input);
self
}
/// <p>Represents the settings used to enable or disable Time to Live for the specified
/// table.</p>
pub fn time_to_live_specification(
mut self,
inp: crate::model::TimeToLiveSpecification,
) -> Self {
self.inner = self.inner.time_to_live_specification(inp);
self
}
/// <p>Represents the settings used to enable or disable Time to Live for the specified
/// table.</p>
pub fn set_time_to_live_specification(
mut self,
input: std::option::Option<crate::model::TimeToLiveSpecification>,
) -> Self {
self.inner = self.inner.set_time_to_live_specification(input);
self
}
}
}
impl<C> Client<C, aws_hyper::AwsMiddleware, aws_smithy_client::retry::Standard> {
/// Creates a client with the given service config and connector override.
pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut client = aws_hyper::Client::new(conn)
.with_retry_config(retry_config.into())
.with_timeout_config(timeout_config);
client.set_sleep_impl(sleep_impl);
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
impl
Client<
aws_smithy_client::erase::DynConnector,
aws_hyper::AwsMiddleware,
aws_smithy_client::retry::Standard,
>
{
/// Creates a new client from a shared config.
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn new(config: &aws_types::config::Config) -> Self {
Self::from_conf(config.into())
}
/// Creates a new client from the service [`Config`](crate::Config).
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_conf(conf: crate::Config) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut client = aws_hyper::Client::https()
.with_retry_config(retry_config.into())
.with_timeout_config(timeout_config);
client.set_sleep_impl(sleep_impl);
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
| 47.892633 | 263 | 0.592741 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.