hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
48f7b5cbc86e32cd64e7754d3e3936fa4d86100e
| 181 |
fn main() {
use std::f32;
let f = 2.0f32;
// 2^2 - 4 == 0
let abs_difference = (f.exp2() - 4.0).abs();
assert!(abs_difference <= f32::EPSILON);
}
| 16.454545 | 48 | 0.469613 |
fcd249330b12c2e418f2d0a50a55592d8a991efb
| 2,414 |
/*******************************************************************************
* (c) 2021 Zondax GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************/
use core::ops::DerefMut;
use crate::ui::{manual_vtable::RefMutDynViewable, Viewable};
use super::ZUI;
pub trait UIBackend<const KEY_SIZE: usize>: Sized {
type MessageBuf: DerefMut<Target = str>;
//How many "action" items are we in charge of displaying also
const INCLUDE_ACTIONS_COUNT: usize;
fn static_mut() -> &'static mut Self;
fn key_buf(&mut self) -> &mut [u8; KEY_SIZE];
fn message_buf(&self) -> Self::MessageBuf;
fn split_value_field(&mut self, message_buf: Self::MessageBuf);
//view_idle_show_impl
fn show_idle(&mut self, item_idx: usize, status: Option<&[u8]>);
//view_error_show_impl
fn show_error(&mut self);
//view_message_show_impl
fn show_message(&mut self, title: &str, message: &str);
//view_review_show_impl
fn show_review(ui: &mut ZUI<Self, KEY_SIZE>);
//h_review_update
fn update_review(ui: &mut ZUI<Self, KEY_SIZE>);
//UX_WAIT macro equivalent
fn wait_ui(&mut self);
fn expert(&self) -> bool;
fn toggle_expert(&mut self);
fn update_expert(&mut self);
fn accept_reject_out(&mut self) -> &mut [u8];
fn accept_reject_end(&mut self, len: usize);
fn store_viewable<V: Viewable + Sized + 'static>(
&mut self,
viewable: V,
) -> Option<RefMutDynViewable>;
}
cfg_if::cfg_if! {
if #[cfg(any(nanos, feature = "cbindgen_s"))] {
mod nanos;
pub use nanos::{NanoSBackend, RUST_ZUI};
} else if #[cfg(any(nanox, feature = "cbindgen_x"))] {
mod nanox;
pub use nanox::{NanoXBackend, RUST_ZUI};
} else {
mod console;
pub use console::{ConsoleBackend, RUST_ZUI};
}
}
| 29.439024 | 81 | 0.622204 |
b9117413f00e1b925f811556b3f0fb266d77e9d6
| 200,745 |
/*
Copyright (C) 2018-2019 [email protected]
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// ⚠️This file was generated by GENERATOR!🦹♂️
use super::iced_constants::IcedConstants;
use super::Mnemonic;
#[rustfmt::skip]
pub(super) static TO_MNEMONIC: [Mnemonic; IcedConstants::CODE_ENUM_COUNT] = [
Mnemonic::INVALID,// INVALID
Mnemonic::Db,// DeclareByte
Mnemonic::Dw,// DeclareWord
Mnemonic::Dd,// DeclareDword
Mnemonic::Dq,// DeclareQword
Mnemonic::Add,// Add_rm8_r8
Mnemonic::Add,// Add_rm16_r16
Mnemonic::Add,// Add_rm32_r32
Mnemonic::Add,// Add_rm64_r64
Mnemonic::Add,// Add_r8_rm8
Mnemonic::Add,// Add_r16_rm16
Mnemonic::Add,// Add_r32_rm32
Mnemonic::Add,// Add_r64_rm64
Mnemonic::Add,// Add_AL_imm8
Mnemonic::Add,// Add_AX_imm16
Mnemonic::Add,// Add_EAX_imm32
Mnemonic::Add,// Add_RAX_imm32
Mnemonic::Push,// Pushw_ES
Mnemonic::Push,// Pushd_ES
Mnemonic::Pop,// Popw_ES
Mnemonic::Pop,// Popd_ES
Mnemonic::Or,// Or_rm8_r8
Mnemonic::Or,// Or_rm16_r16
Mnemonic::Or,// Or_rm32_r32
Mnemonic::Or,// Or_rm64_r64
Mnemonic::Or,// Or_r8_rm8
Mnemonic::Or,// Or_r16_rm16
Mnemonic::Or,// Or_r32_rm32
Mnemonic::Or,// Or_r64_rm64
Mnemonic::Or,// Or_AL_imm8
Mnemonic::Or,// Or_AX_imm16
Mnemonic::Or,// Or_EAX_imm32
Mnemonic::Or,// Or_RAX_imm32
Mnemonic::Push,// Pushw_CS
Mnemonic::Push,// Pushd_CS
Mnemonic::Pop,// Popw_CS
Mnemonic::Adc,// Adc_rm8_r8
Mnemonic::Adc,// Adc_rm16_r16
Mnemonic::Adc,// Adc_rm32_r32
Mnemonic::Adc,// Adc_rm64_r64
Mnemonic::Adc,// Adc_r8_rm8
Mnemonic::Adc,// Adc_r16_rm16
Mnemonic::Adc,// Adc_r32_rm32
Mnemonic::Adc,// Adc_r64_rm64
Mnemonic::Adc,// Adc_AL_imm8
Mnemonic::Adc,// Adc_AX_imm16
Mnemonic::Adc,// Adc_EAX_imm32
Mnemonic::Adc,// Adc_RAX_imm32
Mnemonic::Push,// Pushw_SS
Mnemonic::Push,// Pushd_SS
Mnemonic::Pop,// Popw_SS
Mnemonic::Pop,// Popd_SS
Mnemonic::Sbb,// Sbb_rm8_r8
Mnemonic::Sbb,// Sbb_rm16_r16
Mnemonic::Sbb,// Sbb_rm32_r32
Mnemonic::Sbb,// Sbb_rm64_r64
Mnemonic::Sbb,// Sbb_r8_rm8
Mnemonic::Sbb,// Sbb_r16_rm16
Mnemonic::Sbb,// Sbb_r32_rm32
Mnemonic::Sbb,// Sbb_r64_rm64
Mnemonic::Sbb,// Sbb_AL_imm8
Mnemonic::Sbb,// Sbb_AX_imm16
Mnemonic::Sbb,// Sbb_EAX_imm32
Mnemonic::Sbb,// Sbb_RAX_imm32
Mnemonic::Push,// Pushw_DS
Mnemonic::Push,// Pushd_DS
Mnemonic::Pop,// Popw_DS
Mnemonic::Pop,// Popd_DS
Mnemonic::And,// And_rm8_r8
Mnemonic::And,// And_rm16_r16
Mnemonic::And,// And_rm32_r32
Mnemonic::And,// And_rm64_r64
Mnemonic::And,// And_r8_rm8
Mnemonic::And,// And_r16_rm16
Mnemonic::And,// And_r32_rm32
Mnemonic::And,// And_r64_rm64
Mnemonic::And,// And_AL_imm8
Mnemonic::And,// And_AX_imm16
Mnemonic::And,// And_EAX_imm32
Mnemonic::And,// And_RAX_imm32
Mnemonic::Daa,// Daa
Mnemonic::Sub,// Sub_rm8_r8
Mnemonic::Sub,// Sub_rm16_r16
Mnemonic::Sub,// Sub_rm32_r32
Mnemonic::Sub,// Sub_rm64_r64
Mnemonic::Sub,// Sub_r8_rm8
Mnemonic::Sub,// Sub_r16_rm16
Mnemonic::Sub,// Sub_r32_rm32
Mnemonic::Sub,// Sub_r64_rm64
Mnemonic::Sub,// Sub_AL_imm8
Mnemonic::Sub,// Sub_AX_imm16
Mnemonic::Sub,// Sub_EAX_imm32
Mnemonic::Sub,// Sub_RAX_imm32
Mnemonic::Das,// Das
Mnemonic::Xor,// Xor_rm8_r8
Mnemonic::Xor,// Xor_rm16_r16
Mnemonic::Xor,// Xor_rm32_r32
Mnemonic::Xor,// Xor_rm64_r64
Mnemonic::Xor,// Xor_r8_rm8
Mnemonic::Xor,// Xor_r16_rm16
Mnemonic::Xor,// Xor_r32_rm32
Mnemonic::Xor,// Xor_r64_rm64
Mnemonic::Xor,// Xor_AL_imm8
Mnemonic::Xor,// Xor_AX_imm16
Mnemonic::Xor,// Xor_EAX_imm32
Mnemonic::Xor,// Xor_RAX_imm32
Mnemonic::Aaa,// Aaa
Mnemonic::Cmp,// Cmp_rm8_r8
Mnemonic::Cmp,// Cmp_rm16_r16
Mnemonic::Cmp,// Cmp_rm32_r32
Mnemonic::Cmp,// Cmp_rm64_r64
Mnemonic::Cmp,// Cmp_r8_rm8
Mnemonic::Cmp,// Cmp_r16_rm16
Mnemonic::Cmp,// Cmp_r32_rm32
Mnemonic::Cmp,// Cmp_r64_rm64
Mnemonic::Cmp,// Cmp_AL_imm8
Mnemonic::Cmp,// Cmp_AX_imm16
Mnemonic::Cmp,// Cmp_EAX_imm32
Mnemonic::Cmp,// Cmp_RAX_imm32
Mnemonic::Aas,// Aas
Mnemonic::Inc,// Inc_r16
Mnemonic::Inc,// Inc_r32
Mnemonic::Dec,// Dec_r16
Mnemonic::Dec,// Dec_r32
Mnemonic::Push,// Push_r16
Mnemonic::Push,// Push_r32
Mnemonic::Push,// Push_r64
Mnemonic::Pop,// Pop_r16
Mnemonic::Pop,// Pop_r32
Mnemonic::Pop,// Pop_r64
Mnemonic::Pusha,// Pushaw
Mnemonic::Pushad,// Pushad
Mnemonic::Popa,// Popaw
Mnemonic::Popad,// Popad
Mnemonic::Bound,// Bound_r16_m1616
Mnemonic::Bound,// Bound_r32_m3232
Mnemonic::Arpl,// Arpl_rm16_r16
Mnemonic::Arpl,// Arpl_r32m16_r32
Mnemonic::Movsxd,// Movsxd_r16_rm16
Mnemonic::Movsxd,// Movsxd_r32_rm32
Mnemonic::Movsxd,// Movsxd_r64_rm32
Mnemonic::Push,// Push_imm16
Mnemonic::Push,// Pushd_imm32
Mnemonic::Push,// Pushq_imm32
Mnemonic::Imul,// Imul_r16_rm16_imm16
Mnemonic::Imul,// Imul_r32_rm32_imm32
Mnemonic::Imul,// Imul_r64_rm64_imm32
Mnemonic::Push,// Pushw_imm8
Mnemonic::Push,// Pushd_imm8
Mnemonic::Push,// Pushq_imm8
Mnemonic::Imul,// Imul_r16_rm16_imm8
Mnemonic::Imul,// Imul_r32_rm32_imm8
Mnemonic::Imul,// Imul_r64_rm64_imm8
Mnemonic::Insb,// Insb_m8_DX
Mnemonic::Insw,// Insw_m16_DX
Mnemonic::Insd,// Insd_m32_DX
Mnemonic::Outsb,// Outsb_DX_m8
Mnemonic::Outsw,// Outsw_DX_m16
Mnemonic::Outsd,// Outsd_DX_m32
Mnemonic::Jo,// Jo_rel8_16
Mnemonic::Jo,// Jo_rel8_32
Mnemonic::Jo,// Jo_rel8_64
Mnemonic::Jno,// Jno_rel8_16
Mnemonic::Jno,// Jno_rel8_32
Mnemonic::Jno,// Jno_rel8_64
Mnemonic::Jb,// Jb_rel8_16
Mnemonic::Jb,// Jb_rel8_32
Mnemonic::Jb,// Jb_rel8_64
Mnemonic::Jae,// Jae_rel8_16
Mnemonic::Jae,// Jae_rel8_32
Mnemonic::Jae,// Jae_rel8_64
Mnemonic::Je,// Je_rel8_16
Mnemonic::Je,// Je_rel8_32
Mnemonic::Je,// Je_rel8_64
Mnemonic::Jne,// Jne_rel8_16
Mnemonic::Jne,// Jne_rel8_32
Mnemonic::Jne,// Jne_rel8_64
Mnemonic::Jbe,// Jbe_rel8_16
Mnemonic::Jbe,// Jbe_rel8_32
Mnemonic::Jbe,// Jbe_rel8_64
Mnemonic::Ja,// Ja_rel8_16
Mnemonic::Ja,// Ja_rel8_32
Mnemonic::Ja,// Ja_rel8_64
Mnemonic::Js,// Js_rel8_16
Mnemonic::Js,// Js_rel8_32
Mnemonic::Js,// Js_rel8_64
Mnemonic::Jns,// Jns_rel8_16
Mnemonic::Jns,// Jns_rel8_32
Mnemonic::Jns,// Jns_rel8_64
Mnemonic::Jp,// Jp_rel8_16
Mnemonic::Jp,// Jp_rel8_32
Mnemonic::Jp,// Jp_rel8_64
Mnemonic::Jnp,// Jnp_rel8_16
Mnemonic::Jnp,// Jnp_rel8_32
Mnemonic::Jnp,// Jnp_rel8_64
Mnemonic::Jl,// Jl_rel8_16
Mnemonic::Jl,// Jl_rel8_32
Mnemonic::Jl,// Jl_rel8_64
Mnemonic::Jge,// Jge_rel8_16
Mnemonic::Jge,// Jge_rel8_32
Mnemonic::Jge,// Jge_rel8_64
Mnemonic::Jle,// Jle_rel8_16
Mnemonic::Jle,// Jle_rel8_32
Mnemonic::Jle,// Jle_rel8_64
Mnemonic::Jg,// Jg_rel8_16
Mnemonic::Jg,// Jg_rel8_32
Mnemonic::Jg,// Jg_rel8_64
Mnemonic::Add,// Add_rm8_imm8
Mnemonic::Or,// Or_rm8_imm8
Mnemonic::Adc,// Adc_rm8_imm8
Mnemonic::Sbb,// Sbb_rm8_imm8
Mnemonic::And,// And_rm8_imm8
Mnemonic::Sub,// Sub_rm8_imm8
Mnemonic::Xor,// Xor_rm8_imm8
Mnemonic::Cmp,// Cmp_rm8_imm8
Mnemonic::Add,// Add_rm16_imm16
Mnemonic::Add,// Add_rm32_imm32
Mnemonic::Add,// Add_rm64_imm32
Mnemonic::Or,// Or_rm16_imm16
Mnemonic::Or,// Or_rm32_imm32
Mnemonic::Or,// Or_rm64_imm32
Mnemonic::Adc,// Adc_rm16_imm16
Mnemonic::Adc,// Adc_rm32_imm32
Mnemonic::Adc,// Adc_rm64_imm32
Mnemonic::Sbb,// Sbb_rm16_imm16
Mnemonic::Sbb,// Sbb_rm32_imm32
Mnemonic::Sbb,// Sbb_rm64_imm32
Mnemonic::And,// And_rm16_imm16
Mnemonic::And,// And_rm32_imm32
Mnemonic::And,// And_rm64_imm32
Mnemonic::Sub,// Sub_rm16_imm16
Mnemonic::Sub,// Sub_rm32_imm32
Mnemonic::Sub,// Sub_rm64_imm32
Mnemonic::Xor,// Xor_rm16_imm16
Mnemonic::Xor,// Xor_rm32_imm32
Mnemonic::Xor,// Xor_rm64_imm32
Mnemonic::Cmp,// Cmp_rm16_imm16
Mnemonic::Cmp,// Cmp_rm32_imm32
Mnemonic::Cmp,// Cmp_rm64_imm32
Mnemonic::Add,// Add_rm8_imm8_82
Mnemonic::Or,// Or_rm8_imm8_82
Mnemonic::Adc,// Adc_rm8_imm8_82
Mnemonic::Sbb,// Sbb_rm8_imm8_82
Mnemonic::And,// And_rm8_imm8_82
Mnemonic::Sub,// Sub_rm8_imm8_82
Mnemonic::Xor,// Xor_rm8_imm8_82
Mnemonic::Cmp,// Cmp_rm8_imm8_82
Mnemonic::Add,// Add_rm16_imm8
Mnemonic::Add,// Add_rm32_imm8
Mnemonic::Add,// Add_rm64_imm8
Mnemonic::Or,// Or_rm16_imm8
Mnemonic::Or,// Or_rm32_imm8
Mnemonic::Or,// Or_rm64_imm8
Mnemonic::Adc,// Adc_rm16_imm8
Mnemonic::Adc,// Adc_rm32_imm8
Mnemonic::Adc,// Adc_rm64_imm8
Mnemonic::Sbb,// Sbb_rm16_imm8
Mnemonic::Sbb,// Sbb_rm32_imm8
Mnemonic::Sbb,// Sbb_rm64_imm8
Mnemonic::And,// And_rm16_imm8
Mnemonic::And,// And_rm32_imm8
Mnemonic::And,// And_rm64_imm8
Mnemonic::Sub,// Sub_rm16_imm8
Mnemonic::Sub,// Sub_rm32_imm8
Mnemonic::Sub,// Sub_rm64_imm8
Mnemonic::Xor,// Xor_rm16_imm8
Mnemonic::Xor,// Xor_rm32_imm8
Mnemonic::Xor,// Xor_rm64_imm8
Mnemonic::Cmp,// Cmp_rm16_imm8
Mnemonic::Cmp,// Cmp_rm32_imm8
Mnemonic::Cmp,// Cmp_rm64_imm8
Mnemonic::Test,// Test_rm8_r8
Mnemonic::Test,// Test_rm16_r16
Mnemonic::Test,// Test_rm32_r32
Mnemonic::Test,// Test_rm64_r64
Mnemonic::Xchg,// Xchg_rm8_r8
Mnemonic::Xchg,// Xchg_rm16_r16
Mnemonic::Xchg,// Xchg_rm32_r32
Mnemonic::Xchg,// Xchg_rm64_r64
Mnemonic::Mov,// Mov_rm8_r8
Mnemonic::Mov,// Mov_rm16_r16
Mnemonic::Mov,// Mov_rm32_r32
Mnemonic::Mov,// Mov_rm64_r64
Mnemonic::Mov,// Mov_r8_rm8
Mnemonic::Mov,// Mov_r16_rm16
Mnemonic::Mov,// Mov_r32_rm32
Mnemonic::Mov,// Mov_r64_rm64
Mnemonic::Mov,// Mov_rm16_Sreg
Mnemonic::Mov,// Mov_r32m16_Sreg
Mnemonic::Mov,// Mov_r64m16_Sreg
Mnemonic::Lea,// Lea_r16_m
Mnemonic::Lea,// Lea_r32_m
Mnemonic::Lea,// Lea_r64_m
Mnemonic::Mov,// Mov_Sreg_rm16
Mnemonic::Mov,// Mov_Sreg_r32m16
Mnemonic::Mov,// Mov_Sreg_r64m16
Mnemonic::Pop,// Pop_rm16
Mnemonic::Pop,// Pop_rm32
Mnemonic::Pop,// Pop_rm64
Mnemonic::Nop,// Nopw
Mnemonic::Nop,// Nopd
Mnemonic::Nop,// Nopq
Mnemonic::Xchg,// Xchg_r16_AX
Mnemonic::Xchg,// Xchg_r32_EAX
Mnemonic::Xchg,// Xchg_r64_RAX
Mnemonic::Pause,// Pause
Mnemonic::Cbw,// Cbw
Mnemonic::Cwde,// Cwde
Mnemonic::Cdqe,// Cdqe
Mnemonic::Cwd,// Cwd
Mnemonic::Cdq,// Cdq
Mnemonic::Cqo,// Cqo
Mnemonic::Call,// Call_ptr1616
Mnemonic::Call,// Call_ptr1632
Mnemonic::Wait,// Wait
Mnemonic::Pushf,// Pushfw
Mnemonic::Pushfd,// Pushfd
Mnemonic::Pushfq,// Pushfq
Mnemonic::Popf,// Popfw
Mnemonic::Popfd,// Popfd
Mnemonic::Popfq,// Popfq
Mnemonic::Sahf,// Sahf
Mnemonic::Lahf,// Lahf
Mnemonic::Mov,// Mov_AL_moffs8
Mnemonic::Mov,// Mov_AX_moffs16
Mnemonic::Mov,// Mov_EAX_moffs32
Mnemonic::Mov,// Mov_RAX_moffs64
Mnemonic::Mov,// Mov_moffs8_AL
Mnemonic::Mov,// Mov_moffs16_AX
Mnemonic::Mov,// Mov_moffs32_EAX
Mnemonic::Mov,// Mov_moffs64_RAX
Mnemonic::Movsb,// Movsb_m8_m8
Mnemonic::Movsw,// Movsw_m16_m16
Mnemonic::Movsd,// Movsd_m32_m32
Mnemonic::Movsq,// Movsq_m64_m64
Mnemonic::Cmpsb,// Cmpsb_m8_m8
Mnemonic::Cmpsw,// Cmpsw_m16_m16
Mnemonic::Cmpsd,// Cmpsd_m32_m32
Mnemonic::Cmpsq,// Cmpsq_m64_m64
Mnemonic::Test,// Test_AL_imm8
Mnemonic::Test,// Test_AX_imm16
Mnemonic::Test,// Test_EAX_imm32
Mnemonic::Test,// Test_RAX_imm32
Mnemonic::Stosb,// Stosb_m8_AL
Mnemonic::Stosw,// Stosw_m16_AX
Mnemonic::Stosd,// Stosd_m32_EAX
Mnemonic::Stosq,// Stosq_m64_RAX
Mnemonic::Lodsb,// Lodsb_AL_m8
Mnemonic::Lodsw,// Lodsw_AX_m16
Mnemonic::Lodsd,// Lodsd_EAX_m32
Mnemonic::Lodsq,// Lodsq_RAX_m64
Mnemonic::Scasb,// Scasb_AL_m8
Mnemonic::Scasw,// Scasw_AX_m16
Mnemonic::Scasd,// Scasd_EAX_m32
Mnemonic::Scasq,// Scasq_RAX_m64
Mnemonic::Mov,// Mov_r8_imm8
Mnemonic::Mov,// Mov_r16_imm16
Mnemonic::Mov,// Mov_r32_imm32
Mnemonic::Mov,// Mov_r64_imm64
Mnemonic::Rol,// Rol_rm8_imm8
Mnemonic::Ror,// Ror_rm8_imm8
Mnemonic::Rcl,// Rcl_rm8_imm8
Mnemonic::Rcr,// Rcr_rm8_imm8
Mnemonic::Shl,// Shl_rm8_imm8
Mnemonic::Shr,// Shr_rm8_imm8
Mnemonic::Sal,// Sal_rm8_imm8
Mnemonic::Sar,// Sar_rm8_imm8
Mnemonic::Rol,// Rol_rm16_imm8
Mnemonic::Rol,// Rol_rm32_imm8
Mnemonic::Rol,// Rol_rm64_imm8
Mnemonic::Ror,// Ror_rm16_imm8
Mnemonic::Ror,// Ror_rm32_imm8
Mnemonic::Ror,// Ror_rm64_imm8
Mnemonic::Rcl,// Rcl_rm16_imm8
Mnemonic::Rcl,// Rcl_rm32_imm8
Mnemonic::Rcl,// Rcl_rm64_imm8
Mnemonic::Rcr,// Rcr_rm16_imm8
Mnemonic::Rcr,// Rcr_rm32_imm8
Mnemonic::Rcr,// Rcr_rm64_imm8
Mnemonic::Shl,// Shl_rm16_imm8
Mnemonic::Shl,// Shl_rm32_imm8
Mnemonic::Shl,// Shl_rm64_imm8
Mnemonic::Shr,// Shr_rm16_imm8
Mnemonic::Shr,// Shr_rm32_imm8
Mnemonic::Shr,// Shr_rm64_imm8
Mnemonic::Sal,// Sal_rm16_imm8
Mnemonic::Sal,// Sal_rm32_imm8
Mnemonic::Sal,// Sal_rm64_imm8
Mnemonic::Sar,// Sar_rm16_imm8
Mnemonic::Sar,// Sar_rm32_imm8
Mnemonic::Sar,// Sar_rm64_imm8
Mnemonic::Ret,// Retnw_imm16
Mnemonic::Ret,// Retnd_imm16
Mnemonic::Ret,// Retnq_imm16
Mnemonic::Ret,// Retnw
Mnemonic::Ret,// Retnd
Mnemonic::Ret,// Retnq
Mnemonic::Les,// Les_r16_m1616
Mnemonic::Les,// Les_r32_m1632
Mnemonic::Lds,// Lds_r16_m1616
Mnemonic::Lds,// Lds_r32_m1632
Mnemonic::Mov,// Mov_rm8_imm8
Mnemonic::Xabort,// Xabort_imm8
Mnemonic::Mov,// Mov_rm16_imm16
Mnemonic::Mov,// Mov_rm32_imm32
Mnemonic::Mov,// Mov_rm64_imm32
Mnemonic::Xbegin,// Xbegin_rel16
Mnemonic::Xbegin,// Xbegin_rel32
Mnemonic::Enter,// Enterw_imm16_imm8
Mnemonic::Enter,// Enterd_imm16_imm8
Mnemonic::Enter,// Enterq_imm16_imm8
Mnemonic::Leave,// Leavew
Mnemonic::Leave,// Leaved
Mnemonic::Leave,// Leaveq
Mnemonic::Retf,// Retfw_imm16
Mnemonic::Retf,// Retfd_imm16
Mnemonic::Retf,// Retfq_imm16
Mnemonic::Retf,// Retfw
Mnemonic::Retf,// Retfd
Mnemonic::Retf,// Retfq
Mnemonic::Int3,// Int3
Mnemonic::Int,// Int_imm8
Mnemonic::Into,// Into
Mnemonic::Iret,// Iretw
Mnemonic::Iretd,// Iretd
Mnemonic::Iretq,// Iretq
Mnemonic::Rol,// Rol_rm8_1
Mnemonic::Ror,// Ror_rm8_1
Mnemonic::Rcl,// Rcl_rm8_1
Mnemonic::Rcr,// Rcr_rm8_1
Mnemonic::Shl,// Shl_rm8_1
Mnemonic::Shr,// Shr_rm8_1
Mnemonic::Sal,// Sal_rm8_1
Mnemonic::Sar,// Sar_rm8_1
Mnemonic::Rol,// Rol_rm16_1
Mnemonic::Rol,// Rol_rm32_1
Mnemonic::Rol,// Rol_rm64_1
Mnemonic::Ror,// Ror_rm16_1
Mnemonic::Ror,// Ror_rm32_1
Mnemonic::Ror,// Ror_rm64_1
Mnemonic::Rcl,// Rcl_rm16_1
Mnemonic::Rcl,// Rcl_rm32_1
Mnemonic::Rcl,// Rcl_rm64_1
Mnemonic::Rcr,// Rcr_rm16_1
Mnemonic::Rcr,// Rcr_rm32_1
Mnemonic::Rcr,// Rcr_rm64_1
Mnemonic::Shl,// Shl_rm16_1
Mnemonic::Shl,// Shl_rm32_1
Mnemonic::Shl,// Shl_rm64_1
Mnemonic::Shr,// Shr_rm16_1
Mnemonic::Shr,// Shr_rm32_1
Mnemonic::Shr,// Shr_rm64_1
Mnemonic::Sal,// Sal_rm16_1
Mnemonic::Sal,// Sal_rm32_1
Mnemonic::Sal,// Sal_rm64_1
Mnemonic::Sar,// Sar_rm16_1
Mnemonic::Sar,// Sar_rm32_1
Mnemonic::Sar,// Sar_rm64_1
Mnemonic::Rol,// Rol_rm8_CL
Mnemonic::Ror,// Ror_rm8_CL
Mnemonic::Rcl,// Rcl_rm8_CL
Mnemonic::Rcr,// Rcr_rm8_CL
Mnemonic::Shl,// Shl_rm8_CL
Mnemonic::Shr,// Shr_rm8_CL
Mnemonic::Sal,// Sal_rm8_CL
Mnemonic::Sar,// Sar_rm8_CL
Mnemonic::Rol,// Rol_rm16_CL
Mnemonic::Rol,// Rol_rm32_CL
Mnemonic::Rol,// Rol_rm64_CL
Mnemonic::Ror,// Ror_rm16_CL
Mnemonic::Ror,// Ror_rm32_CL
Mnemonic::Ror,// Ror_rm64_CL
Mnemonic::Rcl,// Rcl_rm16_CL
Mnemonic::Rcl,// Rcl_rm32_CL
Mnemonic::Rcl,// Rcl_rm64_CL
Mnemonic::Rcr,// Rcr_rm16_CL
Mnemonic::Rcr,// Rcr_rm32_CL
Mnemonic::Rcr,// Rcr_rm64_CL
Mnemonic::Shl,// Shl_rm16_CL
Mnemonic::Shl,// Shl_rm32_CL
Mnemonic::Shl,// Shl_rm64_CL
Mnemonic::Shr,// Shr_rm16_CL
Mnemonic::Shr,// Shr_rm32_CL
Mnemonic::Shr,// Shr_rm64_CL
Mnemonic::Sal,// Sal_rm16_CL
Mnemonic::Sal,// Sal_rm32_CL
Mnemonic::Sal,// Sal_rm64_CL
Mnemonic::Sar,// Sar_rm16_CL
Mnemonic::Sar,// Sar_rm32_CL
Mnemonic::Sar,// Sar_rm64_CL
Mnemonic::Aam,// Aam_imm8
Mnemonic::Aad,// Aad_imm8
Mnemonic::Salc,// Salc
Mnemonic::Xlatb,// Xlat_m8
Mnemonic::Fadd,// Fadd_m32fp
Mnemonic::Fmul,// Fmul_m32fp
Mnemonic::Fcom,// Fcom_m32fp
Mnemonic::Fcomp,// Fcomp_m32fp
Mnemonic::Fsub,// Fsub_m32fp
Mnemonic::Fsubr,// Fsubr_m32fp
Mnemonic::Fdiv,// Fdiv_m32fp
Mnemonic::Fdivr,// Fdivr_m32fp
Mnemonic::Fadd,// Fadd_st0_sti
Mnemonic::Fmul,// Fmul_st0_sti
Mnemonic::Fcom,// Fcom_st0_sti
Mnemonic::Fcomp,// Fcomp_st0_sti
Mnemonic::Fsub,// Fsub_st0_sti
Mnemonic::Fsubr,// Fsubr_st0_sti
Mnemonic::Fdiv,// Fdiv_st0_sti
Mnemonic::Fdivr,// Fdivr_st0_sti
Mnemonic::Fld,// Fld_m32fp
Mnemonic::Fst,// Fst_m32fp
Mnemonic::Fstp,// Fstp_m32fp
Mnemonic::Fldenv,// Fldenv_m14byte
Mnemonic::Fldenv,// Fldenv_m28byte
Mnemonic::Fldcw,// Fldcw_m2byte
Mnemonic::Fnstenv,// Fnstenv_m14byte
Mnemonic::Fstenv,// Fstenv_m14byte
Mnemonic::Fnstenv,// Fnstenv_m28byte
Mnemonic::Fstenv,// Fstenv_m28byte
Mnemonic::Fnstcw,// Fnstcw_m2byte
Mnemonic::Fstcw,// Fstcw_m2byte
Mnemonic::Fld,// Fld_sti
Mnemonic::Fxch,// Fxch_st0_sti
Mnemonic::Fnop,// Fnop
Mnemonic::Fstpnce,// Fstpnce_sti
Mnemonic::Fchs,// Fchs
Mnemonic::Fabs,// Fabs
Mnemonic::Ftst,// Ftst
Mnemonic::Fxam,// Fxam
Mnemonic::Fld1,// Fld1
Mnemonic::Fldl2t,// Fldl2t
Mnemonic::Fldl2e,// Fldl2e
Mnemonic::Fldpi,// Fldpi
Mnemonic::Fldlg2,// Fldlg2
Mnemonic::Fldln2,// Fldln2
Mnemonic::Fldz,// Fldz
Mnemonic::F2xm1,// F2xm1
Mnemonic::Fyl2x,// Fyl2x
Mnemonic::Fptan,// Fptan
Mnemonic::Fpatan,// Fpatan
Mnemonic::Fxtract,// Fxtract
Mnemonic::Fprem1,// Fprem1
Mnemonic::Fdecstp,// Fdecstp
Mnemonic::Fincstp,// Fincstp
Mnemonic::Fprem,// Fprem
Mnemonic::Fyl2xp1,// Fyl2xp1
Mnemonic::Fsqrt,// Fsqrt
Mnemonic::Fsincos,// Fsincos
Mnemonic::Frndint,// Frndint
Mnemonic::Fscale,// Fscale
Mnemonic::Fsin,// Fsin
Mnemonic::Fcos,// Fcos
Mnemonic::Fiadd,// Fiadd_m32int
Mnemonic::Fimul,// Fimul_m32int
Mnemonic::Ficom,// Ficom_m32int
Mnemonic::Ficomp,// Ficomp_m32int
Mnemonic::Fisub,// Fisub_m32int
Mnemonic::Fisubr,// Fisubr_m32int
Mnemonic::Fidiv,// Fidiv_m32int
Mnemonic::Fidivr,// Fidivr_m32int
Mnemonic::Fcmovb,// Fcmovb_st0_sti
Mnemonic::Fcmove,// Fcmove_st0_sti
Mnemonic::Fcmovbe,// Fcmovbe_st0_sti
Mnemonic::Fcmovu,// Fcmovu_st0_sti
Mnemonic::Fucompp,// Fucompp
Mnemonic::Fild,// Fild_m32int
Mnemonic::Fisttp,// Fisttp_m32int
Mnemonic::Fist,// Fist_m32int
Mnemonic::Fistp,// Fistp_m32int
Mnemonic::Fld,// Fld_m80fp
Mnemonic::Fstp,// Fstp_m80fp
Mnemonic::Fcmovnb,// Fcmovnb_st0_sti
Mnemonic::Fcmovne,// Fcmovne_st0_sti
Mnemonic::Fcmovnbe,// Fcmovnbe_st0_sti
Mnemonic::Fcmovnu,// Fcmovnu_st0_sti
Mnemonic::Fneni,// Fneni
Mnemonic::Feni,// Feni
Mnemonic::Fndisi,// Fndisi
Mnemonic::Fdisi,// Fdisi
Mnemonic::Fnclex,// Fnclex
Mnemonic::Fclex,// Fclex
Mnemonic::Fninit,// Fninit
Mnemonic::Finit,// Finit
Mnemonic::Fnsetpm,// Fnsetpm
Mnemonic::Fsetpm,// Fsetpm
Mnemonic::Frstpm,// Frstpm
Mnemonic::Fucomi,// Fucomi_st0_sti
Mnemonic::Fcomi,// Fcomi_st0_sti
Mnemonic::Fadd,// Fadd_m64fp
Mnemonic::Fmul,// Fmul_m64fp
Mnemonic::Fcom,// Fcom_m64fp
Mnemonic::Fcomp,// Fcomp_m64fp
Mnemonic::Fsub,// Fsub_m64fp
Mnemonic::Fsubr,// Fsubr_m64fp
Mnemonic::Fdiv,// Fdiv_m64fp
Mnemonic::Fdivr,// Fdivr_m64fp
Mnemonic::Fadd,// Fadd_sti_st0
Mnemonic::Fmul,// Fmul_sti_st0
Mnemonic::Fcom,// Fcom_st0_sti_DCD0
Mnemonic::Fcomp,// Fcomp_st0_sti_DCD8
Mnemonic::Fsubr,// Fsubr_sti_st0
Mnemonic::Fsub,// Fsub_sti_st0
Mnemonic::Fdivr,// Fdivr_sti_st0
Mnemonic::Fdiv,// Fdiv_sti_st0
Mnemonic::Fld,// Fld_m64fp
Mnemonic::Fisttp,// Fisttp_m64int
Mnemonic::Fst,// Fst_m64fp
Mnemonic::Fstp,// Fstp_m64fp
Mnemonic::Frstor,// Frstor_m94byte
Mnemonic::Frstor,// Frstor_m108byte
Mnemonic::Fnsave,// Fnsave_m94byte
Mnemonic::Fsave,// Fsave_m94byte
Mnemonic::Fnsave,// Fnsave_m108byte
Mnemonic::Fsave,// Fsave_m108byte
Mnemonic::Fnstsw,// Fnstsw_m2byte
Mnemonic::Fstsw,// Fstsw_m2byte
Mnemonic::Ffree,// Ffree_sti
Mnemonic::Fxch,// Fxch_st0_sti_DDC8
Mnemonic::Fst,// Fst_sti
Mnemonic::Fstp,// Fstp_sti
Mnemonic::Fucom,// Fucom_st0_sti
Mnemonic::Fucomp,// Fucomp_st0_sti
Mnemonic::Fiadd,// Fiadd_m16int
Mnemonic::Fimul,// Fimul_m16int
Mnemonic::Ficom,// Ficom_m16int
Mnemonic::Ficomp,// Ficomp_m16int
Mnemonic::Fisub,// Fisub_m16int
Mnemonic::Fisubr,// Fisubr_m16int
Mnemonic::Fidiv,// Fidiv_m16int
Mnemonic::Fidivr,// Fidivr_m16int
Mnemonic::Faddp,// Faddp_sti_st0
Mnemonic::Fmulp,// Fmulp_sti_st0
Mnemonic::Fcomp,// Fcomp_st0_sti_DED0
Mnemonic::Fcompp,// Fcompp
Mnemonic::Fsubrp,// Fsubrp_sti_st0
Mnemonic::Fsubp,// Fsubp_sti_st0
Mnemonic::Fdivrp,// Fdivrp_sti_st0
Mnemonic::Fdivp,// Fdivp_sti_st0
Mnemonic::Fild,// Fild_m16int
Mnemonic::Fisttp,// Fisttp_m16int
Mnemonic::Fist,// Fist_m16int
Mnemonic::Fistp,// Fistp_m16int
Mnemonic::Fbld,// Fbld_m80bcd
Mnemonic::Fild,// Fild_m64int
Mnemonic::Fbstp,// Fbstp_m80bcd
Mnemonic::Fistp,// Fistp_m64int
Mnemonic::Ffreep,// Ffreep_sti
Mnemonic::Fxch,// Fxch_st0_sti_DFC8
Mnemonic::Fstp,// Fstp_sti_DFD0
Mnemonic::Fstp,// Fstp_sti_DFD8
Mnemonic::Fnstsw,// Fnstsw_AX
Mnemonic::Fstsw,// Fstsw_AX
Mnemonic::Fstdw,// Fstdw_AX
Mnemonic::Fstsg,// Fstsg_AX
Mnemonic::Fucomip,// Fucomip_st0_sti
Mnemonic::Fcomip,// Fcomip_st0_sti
Mnemonic::Loopne,// Loopne_rel8_16_CX
Mnemonic::Loopne,// Loopne_rel8_32_CX
Mnemonic::Loopne,// Loopne_rel8_16_ECX
Mnemonic::Loopne,// Loopne_rel8_32_ECX
Mnemonic::Loopne,// Loopne_rel8_64_ECX
Mnemonic::Loopne,// Loopne_rel8_16_RCX
Mnemonic::Loopne,// Loopne_rel8_64_RCX
Mnemonic::Loope,// Loope_rel8_16_CX
Mnemonic::Loope,// Loope_rel8_32_CX
Mnemonic::Loope,// Loope_rel8_16_ECX
Mnemonic::Loope,// Loope_rel8_32_ECX
Mnemonic::Loope,// Loope_rel8_64_ECX
Mnemonic::Loope,// Loope_rel8_16_RCX
Mnemonic::Loope,// Loope_rel8_64_RCX
Mnemonic::Loop,// Loop_rel8_16_CX
Mnemonic::Loop,// Loop_rel8_32_CX
Mnemonic::Loop,// Loop_rel8_16_ECX
Mnemonic::Loop,// Loop_rel8_32_ECX
Mnemonic::Loop,// Loop_rel8_64_ECX
Mnemonic::Loop,// Loop_rel8_16_RCX
Mnemonic::Loop,// Loop_rel8_64_RCX
Mnemonic::Jcxz,// Jcxz_rel8_16
Mnemonic::Jcxz,// Jcxz_rel8_32
Mnemonic::Jecxz,// Jecxz_rel8_16
Mnemonic::Jecxz,// Jecxz_rel8_32
Mnemonic::Jecxz,// Jecxz_rel8_64
Mnemonic::Jrcxz,// Jrcxz_rel8_16
Mnemonic::Jrcxz,// Jrcxz_rel8_64
Mnemonic::In,// In_AL_imm8
Mnemonic::In,// In_AX_imm8
Mnemonic::In,// In_EAX_imm8
Mnemonic::Out,// Out_imm8_AL
Mnemonic::Out,// Out_imm8_AX
Mnemonic::Out,// Out_imm8_EAX
Mnemonic::Call,// Call_rel16
Mnemonic::Call,// Call_rel32_32
Mnemonic::Call,// Call_rel32_64
Mnemonic::Jmp,// Jmp_rel16
Mnemonic::Jmp,// Jmp_rel32_32
Mnemonic::Jmp,// Jmp_rel32_64
Mnemonic::Jmp,// Jmp_ptr1616
Mnemonic::Jmp,// Jmp_ptr1632
Mnemonic::Jmp,// Jmp_rel8_16
Mnemonic::Jmp,// Jmp_rel8_32
Mnemonic::Jmp,// Jmp_rel8_64
Mnemonic::In,// In_AL_DX
Mnemonic::In,// In_AX_DX
Mnemonic::In,// In_EAX_DX
Mnemonic::Out,// Out_DX_AL
Mnemonic::Out,// Out_DX_AX
Mnemonic::Out,// Out_DX_EAX
Mnemonic::Int1,// Int1
Mnemonic::Hlt,// Hlt
Mnemonic::Cmc,// Cmc
Mnemonic::Test,// Test_rm8_imm8
Mnemonic::Test,// Test_rm8_imm8_F6r1
Mnemonic::Not,// Not_rm8
Mnemonic::Neg,// Neg_rm8
Mnemonic::Mul,// Mul_rm8
Mnemonic::Imul,// Imul_rm8
Mnemonic::Div,// Div_rm8
Mnemonic::Idiv,// Idiv_rm8
Mnemonic::Test,// Test_rm16_imm16
Mnemonic::Test,// Test_rm32_imm32
Mnemonic::Test,// Test_rm64_imm32
Mnemonic::Test,// Test_rm16_imm16_F7r1
Mnemonic::Test,// Test_rm32_imm32_F7r1
Mnemonic::Test,// Test_rm64_imm32_F7r1
Mnemonic::Not,// Not_rm16
Mnemonic::Not,// Not_rm32
Mnemonic::Not,// Not_rm64
Mnemonic::Neg,// Neg_rm16
Mnemonic::Neg,// Neg_rm32
Mnemonic::Neg,// Neg_rm64
Mnemonic::Mul,// Mul_rm16
Mnemonic::Mul,// Mul_rm32
Mnemonic::Mul,// Mul_rm64
Mnemonic::Imul,// Imul_rm16
Mnemonic::Imul,// Imul_rm32
Mnemonic::Imul,// Imul_rm64
Mnemonic::Div,// Div_rm16
Mnemonic::Div,// Div_rm32
Mnemonic::Div,// Div_rm64
Mnemonic::Idiv,// Idiv_rm16
Mnemonic::Idiv,// Idiv_rm32
Mnemonic::Idiv,// Idiv_rm64
Mnemonic::Clc,// Clc
Mnemonic::Stc,// Stc
Mnemonic::Cli,// Cli
Mnemonic::Sti,// Sti
Mnemonic::Cld,// Cld
Mnemonic::Std,// Std
Mnemonic::Inc,// Inc_rm8
Mnemonic::Dec,// Dec_rm8
Mnemonic::Inc,// Inc_rm16
Mnemonic::Inc,// Inc_rm32
Mnemonic::Inc,// Inc_rm64
Mnemonic::Dec,// Dec_rm16
Mnemonic::Dec,// Dec_rm32
Mnemonic::Dec,// Dec_rm64
Mnemonic::Call,// Call_rm16
Mnemonic::Call,// Call_rm32
Mnemonic::Call,// Call_rm64
Mnemonic::Call,// Call_m1616
Mnemonic::Call,// Call_m1632
Mnemonic::Call,// Call_m1664
Mnemonic::Jmp,// Jmp_rm16
Mnemonic::Jmp,// Jmp_rm32
Mnemonic::Jmp,// Jmp_rm64
Mnemonic::Jmp,// Jmp_m1616
Mnemonic::Jmp,// Jmp_m1632
Mnemonic::Jmp,// Jmp_m1664
Mnemonic::Push,// Push_rm16
Mnemonic::Push,// Push_rm32
Mnemonic::Push,// Push_rm64
Mnemonic::Sldt,// Sldt_rm16
Mnemonic::Sldt,// Sldt_r32m16
Mnemonic::Sldt,// Sldt_r64m16
Mnemonic::Str,// Str_rm16
Mnemonic::Str,// Str_r32m16
Mnemonic::Str,// Str_r64m16
Mnemonic::Lldt,// Lldt_rm16
Mnemonic::Lldt,// Lldt_r32m16
Mnemonic::Lldt,// Lldt_r64m16
Mnemonic::Ltr,// Ltr_rm16
Mnemonic::Ltr,// Ltr_r32m16
Mnemonic::Ltr,// Ltr_r64m16
Mnemonic::Verr,// Verr_rm16
Mnemonic::Verr,// Verr_r32m16
Mnemonic::Verr,// Verr_r64m16
Mnemonic::Verw,// Verw_rm16
Mnemonic::Verw,// Verw_r32m16
Mnemonic::Verw,// Verw_r64m16
Mnemonic::Jmpe,// Jmpe_rm16
Mnemonic::Jmpe,// Jmpe_rm32
Mnemonic::Sgdt,// Sgdt_m1632_16
Mnemonic::Sgdt,// Sgdt_m1632
Mnemonic::Sgdt,// Sgdt_m1664
Mnemonic::Sidt,// Sidt_m1632_16
Mnemonic::Sidt,// Sidt_m1632
Mnemonic::Sidt,// Sidt_m1664
Mnemonic::Lgdt,// Lgdt_m1632_16
Mnemonic::Lgdt,// Lgdt_m1632
Mnemonic::Lgdt,// Lgdt_m1664
Mnemonic::Lidt,// Lidt_m1632_16
Mnemonic::Lidt,// Lidt_m1632
Mnemonic::Lidt,// Lidt_m1664
Mnemonic::Smsw,// Smsw_rm16
Mnemonic::Smsw,// Smsw_r32m16
Mnemonic::Smsw,// Smsw_r64m16
Mnemonic::Rstorssp,// Rstorssp_m64
Mnemonic::Lmsw,// Lmsw_rm16
Mnemonic::Lmsw,// Lmsw_r32m16
Mnemonic::Lmsw,// Lmsw_r64m16
Mnemonic::Invlpg,// Invlpg_m
Mnemonic::Enclv,// Enclv
Mnemonic::Vmcall,// Vmcall
Mnemonic::Vmlaunch,// Vmlaunch
Mnemonic::Vmresume,// Vmresume
Mnemonic::Vmxoff,// Vmxoff
Mnemonic::Pconfig,// Pconfig
Mnemonic::Monitor,// Monitorw
Mnemonic::Monitor,// Monitord
Mnemonic::Monitor,// Monitorq
Mnemonic::Mwait,// Mwait
Mnemonic::Clac,// Clac
Mnemonic::Stac,// Stac
Mnemonic::Encls,// Encls
Mnemonic::Xgetbv,// Xgetbv
Mnemonic::Xsetbv,// Xsetbv
Mnemonic::Vmfunc,// Vmfunc
Mnemonic::Xend,// Xend
Mnemonic::Xtest,// Xtest
Mnemonic::Enclu,// Enclu
Mnemonic::Vmrun,// Vmrunw
Mnemonic::Vmrun,// Vmrund
Mnemonic::Vmrun,// Vmrunq
Mnemonic::Vmmcall,// Vmmcall
Mnemonic::Vmload,// Vmloadw
Mnemonic::Vmload,// Vmloadd
Mnemonic::Vmload,// Vmloadq
Mnemonic::Vmsave,// Vmsavew
Mnemonic::Vmsave,// Vmsaved
Mnemonic::Vmsave,// Vmsaveq
Mnemonic::Stgi,// Stgi
Mnemonic::Clgi,// Clgi
Mnemonic::Skinit,// Skinit
Mnemonic::Invlpga,// Invlpgaw
Mnemonic::Invlpga,// Invlpgad
Mnemonic::Invlpga,// Invlpgaq
Mnemonic::Setssbsy,// Setssbsy
Mnemonic::Saveprevssp,// Saveprevssp
Mnemonic::Rdpkru,// Rdpkru
Mnemonic::Wrpkru,// Wrpkru
Mnemonic::Swapgs,// Swapgs
Mnemonic::Rdtscp,// Rdtscp
Mnemonic::Monitorx,// Monitorxw
Mnemonic::Monitorx,// Monitorxd
Mnemonic::Monitorx,// Monitorxq
Mnemonic::Mcommit,// Mcommit
Mnemonic::Mwaitx,// Mwaitx
Mnemonic::Clzero,// Clzerow
Mnemonic::Clzero,// Clzerod
Mnemonic::Clzero,// Clzeroq
Mnemonic::Rdpru,// Rdpru
Mnemonic::Lar,// Lar_r16_rm16
Mnemonic::Lar,// Lar_r32_r32m16
Mnemonic::Lar,// Lar_r64_r64m16
Mnemonic::Lsl,// Lsl_r16_rm16
Mnemonic::Lsl,// Lsl_r32_r32m16
Mnemonic::Lsl,// Lsl_r64_r64m16
Mnemonic::Loadall,// Loadallreset286
Mnemonic::Loadall,// Loadall286
Mnemonic::Syscall,// Syscall
Mnemonic::Clts,// Clts
Mnemonic::Loadall,// Loadall386
Mnemonic::Sysret,// Sysretd
Mnemonic::Sysretq,// Sysretq
Mnemonic::Invd,// Invd
Mnemonic::Wbinvd,// Wbinvd
Mnemonic::Wbnoinvd,// Wbnoinvd
Mnemonic::Cl1invmb,// Cl1invmb
Mnemonic::Ud2,// Ud2
Mnemonic::Reservednop,// Reservednop_rm16_r16_0F0D
Mnemonic::Reservednop,// Reservednop_rm32_r32_0F0D
Mnemonic::Reservednop,// Reservednop_rm64_r64_0F0D
Mnemonic::Prefetch,// Prefetch_m8
Mnemonic::Prefetchw,// Prefetchw_m8
Mnemonic::Prefetchwt1,// Prefetchwt1_m8
Mnemonic::Femms,// Femms
Mnemonic::Umov,// Umov_rm8_r8
Mnemonic::Umov,// Umov_rm16_r16
Mnemonic::Umov,// Umov_rm32_r32
Mnemonic::Umov,// Umov_r8_rm8
Mnemonic::Umov,// Umov_r16_rm16
Mnemonic::Umov,// Umov_r32_rm32
Mnemonic::Movups,// Movups_xmm_xmmm128
Mnemonic::Vmovups,// VEX_Vmovups_xmm_xmmm128
Mnemonic::Vmovups,// VEX_Vmovups_ymm_ymmm256
Mnemonic::Vmovups,// EVEX_Vmovups_xmm_k1z_xmmm128
Mnemonic::Vmovups,// EVEX_Vmovups_ymm_k1z_ymmm256
Mnemonic::Vmovups,// EVEX_Vmovups_zmm_k1z_zmmm512
Mnemonic::Movupd,// Movupd_xmm_xmmm128
Mnemonic::Vmovupd,// VEX_Vmovupd_xmm_xmmm128
Mnemonic::Vmovupd,// VEX_Vmovupd_ymm_ymmm256
Mnemonic::Vmovupd,// EVEX_Vmovupd_xmm_k1z_xmmm128
Mnemonic::Vmovupd,// EVEX_Vmovupd_ymm_k1z_ymmm256
Mnemonic::Vmovupd,// EVEX_Vmovupd_zmm_k1z_zmmm512
Mnemonic::Movss,// Movss_xmm_xmmm32
Mnemonic::Vmovss,// VEX_Vmovss_xmm_xmm_xmm
Mnemonic::Vmovss,// VEX_Vmovss_xmm_m32
Mnemonic::Vmovss,// EVEX_Vmovss_xmm_k1z_xmm_xmm
Mnemonic::Vmovss,// EVEX_Vmovss_xmm_k1z_m32
Mnemonic::Movsd,// Movsd_xmm_xmmm64
Mnemonic::Vmovsd,// VEX_Vmovsd_xmm_xmm_xmm
Mnemonic::Vmovsd,// VEX_Vmovsd_xmm_m64
Mnemonic::Vmovsd,// EVEX_Vmovsd_xmm_k1z_xmm_xmm
Mnemonic::Vmovsd,// EVEX_Vmovsd_xmm_k1z_m64
Mnemonic::Movups,// Movups_xmmm128_xmm
Mnemonic::Vmovups,// VEX_Vmovups_xmmm128_xmm
Mnemonic::Vmovups,// VEX_Vmovups_ymmm256_ymm
Mnemonic::Vmovups,// EVEX_Vmovups_xmmm128_k1z_xmm
Mnemonic::Vmovups,// EVEX_Vmovups_ymmm256_k1z_ymm
Mnemonic::Vmovups,// EVEX_Vmovups_zmmm512_k1z_zmm
Mnemonic::Movupd,// Movupd_xmmm128_xmm
Mnemonic::Vmovupd,// VEX_Vmovupd_xmmm128_xmm
Mnemonic::Vmovupd,// VEX_Vmovupd_ymmm256_ymm
Mnemonic::Vmovupd,// EVEX_Vmovupd_xmmm128_k1z_xmm
Mnemonic::Vmovupd,// EVEX_Vmovupd_ymmm256_k1z_ymm
Mnemonic::Vmovupd,// EVEX_Vmovupd_zmmm512_k1z_zmm
Mnemonic::Movss,// Movss_xmmm32_xmm
Mnemonic::Vmovss,// VEX_Vmovss_xmm_xmm_xmm_0F11
Mnemonic::Vmovss,// VEX_Vmovss_m32_xmm
Mnemonic::Vmovss,// EVEX_Vmovss_xmm_k1z_xmm_xmm_0F11
Mnemonic::Vmovss,// EVEX_Vmovss_m32_k1_xmm
Mnemonic::Movsd,// Movsd_xmmm64_xmm
Mnemonic::Vmovsd,// VEX_Vmovsd_xmm_xmm_xmm_0F11
Mnemonic::Vmovsd,// VEX_Vmovsd_m64_xmm
Mnemonic::Vmovsd,// EVEX_Vmovsd_xmm_k1z_xmm_xmm_0F11
Mnemonic::Vmovsd,// EVEX_Vmovsd_m64_k1_xmm
Mnemonic::Movhlps,// Movhlps_xmm_xmm
Mnemonic::Movlps,// Movlps_xmm_m64
Mnemonic::Vmovhlps,// VEX_Vmovhlps_xmm_xmm_xmm
Mnemonic::Vmovlps,// VEX_Vmovlps_xmm_xmm_m64
Mnemonic::Vmovhlps,// EVEX_Vmovhlps_xmm_xmm_xmm
Mnemonic::Vmovlps,// EVEX_Vmovlps_xmm_xmm_m64
Mnemonic::Movlpd,// Movlpd_xmm_m64
Mnemonic::Vmovlpd,// VEX_Vmovlpd_xmm_xmm_m64
Mnemonic::Vmovlpd,// EVEX_Vmovlpd_xmm_xmm_m64
Mnemonic::Movsldup,// Movsldup_xmm_xmmm128
Mnemonic::Vmovsldup,// VEX_Vmovsldup_xmm_xmmm128
Mnemonic::Vmovsldup,// VEX_Vmovsldup_ymm_ymmm256
Mnemonic::Vmovsldup,// EVEX_Vmovsldup_xmm_k1z_xmmm128
Mnemonic::Vmovsldup,// EVEX_Vmovsldup_ymm_k1z_ymmm256
Mnemonic::Vmovsldup,// EVEX_Vmovsldup_zmm_k1z_zmmm512
Mnemonic::Movddup,// Movddup_xmm_xmmm64
Mnemonic::Vmovddup,// VEX_Vmovddup_xmm_xmmm64
Mnemonic::Vmovddup,// VEX_Vmovddup_ymm_ymmm256
Mnemonic::Vmovddup,// EVEX_Vmovddup_xmm_k1z_xmmm64
Mnemonic::Vmovddup,// EVEX_Vmovddup_ymm_k1z_ymmm256
Mnemonic::Vmovddup,// EVEX_Vmovddup_zmm_k1z_zmmm512
Mnemonic::Movlps,// Movlps_m64_xmm
Mnemonic::Vmovlps,// VEX_Vmovlps_m64_xmm
Mnemonic::Vmovlps,// EVEX_Vmovlps_m64_xmm
Mnemonic::Movlpd,// Movlpd_m64_xmm
Mnemonic::Vmovlpd,// VEX_Vmovlpd_m64_xmm
Mnemonic::Vmovlpd,// EVEX_Vmovlpd_m64_xmm
Mnemonic::Unpcklps,// Unpcklps_xmm_xmmm128
Mnemonic::Vunpcklps,// VEX_Vunpcklps_xmm_xmm_xmmm128
Mnemonic::Vunpcklps,// VEX_Vunpcklps_ymm_ymm_ymmm256
Mnemonic::Vunpcklps,// EVEX_Vunpcklps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vunpcklps,// EVEX_Vunpcklps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vunpcklps,// EVEX_Vunpcklps_zmm_k1z_zmm_zmmm512b32
Mnemonic::Unpcklpd,// Unpcklpd_xmm_xmmm128
Mnemonic::Vunpcklpd,// VEX_Vunpcklpd_xmm_xmm_xmmm128
Mnemonic::Vunpcklpd,// VEX_Vunpcklpd_ymm_ymm_ymmm256
Mnemonic::Vunpcklpd,// EVEX_Vunpcklpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vunpcklpd,// EVEX_Vunpcklpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vunpcklpd,// EVEX_Vunpcklpd_zmm_k1z_zmm_zmmm512b64
Mnemonic::Unpckhps,// Unpckhps_xmm_xmmm128
Mnemonic::Vunpckhps,// VEX_Vunpckhps_xmm_xmm_xmmm128
Mnemonic::Vunpckhps,// VEX_Vunpckhps_ymm_ymm_ymmm256
Mnemonic::Vunpckhps,// EVEX_Vunpckhps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vunpckhps,// EVEX_Vunpckhps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vunpckhps,// EVEX_Vunpckhps_zmm_k1z_zmm_zmmm512b32
Mnemonic::Unpckhpd,// Unpckhpd_xmm_xmmm128
Mnemonic::Vunpckhpd,// VEX_Vunpckhpd_xmm_xmm_xmmm128
Mnemonic::Vunpckhpd,// VEX_Vunpckhpd_ymm_ymm_ymmm256
Mnemonic::Vunpckhpd,// EVEX_Vunpckhpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vunpckhpd,// EVEX_Vunpckhpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vunpckhpd,// EVEX_Vunpckhpd_zmm_k1z_zmm_zmmm512b64
Mnemonic::Movlhps,// Movlhps_xmm_xmm
Mnemonic::Vmovlhps,// VEX_Vmovlhps_xmm_xmm_xmm
Mnemonic::Vmovlhps,// EVEX_Vmovlhps_xmm_xmm_xmm
Mnemonic::Movhps,// Movhps_xmm_m64
Mnemonic::Vmovhps,// VEX_Vmovhps_xmm_xmm_m64
Mnemonic::Vmovhps,// EVEX_Vmovhps_xmm_xmm_m64
Mnemonic::Movhpd,// Movhpd_xmm_m64
Mnemonic::Vmovhpd,// VEX_Vmovhpd_xmm_xmm_m64
Mnemonic::Vmovhpd,// EVEX_Vmovhpd_xmm_xmm_m64
Mnemonic::Movshdup,// Movshdup_xmm_xmmm128
Mnemonic::Vmovshdup,// VEX_Vmovshdup_xmm_xmmm128
Mnemonic::Vmovshdup,// VEX_Vmovshdup_ymm_ymmm256
Mnemonic::Vmovshdup,// EVEX_Vmovshdup_xmm_k1z_xmmm128
Mnemonic::Vmovshdup,// EVEX_Vmovshdup_ymm_k1z_ymmm256
Mnemonic::Vmovshdup,// EVEX_Vmovshdup_zmm_k1z_zmmm512
Mnemonic::Movhps,// Movhps_m64_xmm
Mnemonic::Vmovhps,// VEX_Vmovhps_m64_xmm
Mnemonic::Vmovhps,// EVEX_Vmovhps_m64_xmm
Mnemonic::Movhpd,// Movhpd_m64_xmm
Mnemonic::Vmovhpd,// VEX_Vmovhpd_m64_xmm
Mnemonic::Vmovhpd,// EVEX_Vmovhpd_m64_xmm
Mnemonic::Reservednop,// Reservednop_rm16_r16_0F18
Mnemonic::Reservednop,// Reservednop_rm32_r32_0F18
Mnemonic::Reservednop,// Reservednop_rm64_r64_0F18
Mnemonic::Reservednop,// Reservednop_rm16_r16_0F19
Mnemonic::Reservednop,// Reservednop_rm32_r32_0F19
Mnemonic::Reservednop,// Reservednop_rm64_r64_0F19
Mnemonic::Reservednop,// Reservednop_rm16_r16_0F1A
Mnemonic::Reservednop,// Reservednop_rm32_r32_0F1A
Mnemonic::Reservednop,// Reservednop_rm64_r64_0F1A
Mnemonic::Reservednop,// Reservednop_rm16_r16_0F1B
Mnemonic::Reservednop,// Reservednop_rm32_r32_0F1B
Mnemonic::Reservednop,// Reservednop_rm64_r64_0F1B
Mnemonic::Reservednop,// Reservednop_rm16_r16_0F1C
Mnemonic::Reservednop,// Reservednop_rm32_r32_0F1C
Mnemonic::Reservednop,// Reservednop_rm64_r64_0F1C
Mnemonic::Reservednop,// Reservednop_rm16_r16_0F1D
Mnemonic::Reservednop,// Reservednop_rm32_r32_0F1D
Mnemonic::Reservednop,// Reservednop_rm64_r64_0F1D
Mnemonic::Reservednop,// Reservednop_rm16_r16_0F1E
Mnemonic::Reservednop,// Reservednop_rm32_r32_0F1E
Mnemonic::Reservednop,// Reservednop_rm64_r64_0F1E
Mnemonic::Reservednop,// Reservednop_rm16_r16_0F1F
Mnemonic::Reservednop,// Reservednop_rm32_r32_0F1F
Mnemonic::Reservednop,// Reservednop_rm64_r64_0F1F
Mnemonic::Prefetchnta,// Prefetchnta_m8
Mnemonic::Prefetcht0,// Prefetcht0_m8
Mnemonic::Prefetcht1,// Prefetcht1_m8
Mnemonic::Prefetcht2,// Prefetcht2_m8
Mnemonic::Bndldx,// Bndldx_bnd_mib
Mnemonic::Bndmov,// Bndmov_bnd_bndm64
Mnemonic::Bndmov,// Bndmov_bnd_bndm128
Mnemonic::Bndcl,// Bndcl_bnd_rm32
Mnemonic::Bndcl,// Bndcl_bnd_rm64
Mnemonic::Bndcu,// Bndcu_bnd_rm32
Mnemonic::Bndcu,// Bndcu_bnd_rm64
Mnemonic::Bndstx,// Bndstx_mib_bnd
Mnemonic::Bndmov,// Bndmov_bndm64_bnd
Mnemonic::Bndmov,// Bndmov_bndm128_bnd
Mnemonic::Bndmk,// Bndmk_bnd_m32
Mnemonic::Bndmk,// Bndmk_bnd_m64
Mnemonic::Bndcn,// Bndcn_bnd_rm32
Mnemonic::Bndcn,// Bndcn_bnd_rm64
Mnemonic::Cldemote,// Cldemote_m8
Mnemonic::Rdsspd,// Rdsspd_r32
Mnemonic::Rdsspq,// Rdsspq_r64
Mnemonic::Endbr64,// Endbr64
Mnemonic::Endbr32,// Endbr32
Mnemonic::Nop,// Nop_rm16
Mnemonic::Nop,// Nop_rm32
Mnemonic::Nop,// Nop_rm64
Mnemonic::Mov,// Mov_r32_cr
Mnemonic::Mov,// Mov_r64_cr
Mnemonic::Mov,// Mov_r32_dr
Mnemonic::Mov,// Mov_r64_dr
Mnemonic::Mov,// Mov_cr_r32
Mnemonic::Mov,// Mov_cr_r64
Mnemonic::Mov,// Mov_dr_r32
Mnemonic::Mov,// Mov_dr_r64
Mnemonic::Mov,// Mov_r32_tr
Mnemonic::Mov,// Mov_tr_r32
Mnemonic::Movaps,// Movaps_xmm_xmmm128
Mnemonic::Vmovaps,// VEX_Vmovaps_xmm_xmmm128
Mnemonic::Vmovaps,// VEX_Vmovaps_ymm_ymmm256
Mnemonic::Vmovaps,// EVEX_Vmovaps_xmm_k1z_xmmm128
Mnemonic::Vmovaps,// EVEX_Vmovaps_ymm_k1z_ymmm256
Mnemonic::Vmovaps,// EVEX_Vmovaps_zmm_k1z_zmmm512
Mnemonic::Movapd,// Movapd_xmm_xmmm128
Mnemonic::Vmovapd,// VEX_Vmovapd_xmm_xmmm128
Mnemonic::Vmovapd,// VEX_Vmovapd_ymm_ymmm256
Mnemonic::Vmovapd,// EVEX_Vmovapd_xmm_k1z_xmmm128
Mnemonic::Vmovapd,// EVEX_Vmovapd_ymm_k1z_ymmm256
Mnemonic::Vmovapd,// EVEX_Vmovapd_zmm_k1z_zmmm512
Mnemonic::Movaps,// Movaps_xmmm128_xmm
Mnemonic::Vmovaps,// VEX_Vmovaps_xmmm128_xmm
Mnemonic::Vmovaps,// VEX_Vmovaps_ymmm256_ymm
Mnemonic::Vmovaps,// EVEX_Vmovaps_xmmm128_k1z_xmm
Mnemonic::Vmovaps,// EVEX_Vmovaps_ymmm256_k1z_ymm
Mnemonic::Vmovaps,// EVEX_Vmovaps_zmmm512_k1z_zmm
Mnemonic::Movapd,// Movapd_xmmm128_xmm
Mnemonic::Vmovapd,// VEX_Vmovapd_xmmm128_xmm
Mnemonic::Vmovapd,// VEX_Vmovapd_ymmm256_ymm
Mnemonic::Vmovapd,// EVEX_Vmovapd_xmmm128_k1z_xmm
Mnemonic::Vmovapd,// EVEX_Vmovapd_ymmm256_k1z_ymm
Mnemonic::Vmovapd,// EVEX_Vmovapd_zmmm512_k1z_zmm
Mnemonic::Cvtpi2ps,// Cvtpi2ps_xmm_mmm64
Mnemonic::Cvtpi2pd,// Cvtpi2pd_xmm_mmm64
Mnemonic::Cvtsi2ss,// Cvtsi2ss_xmm_rm32
Mnemonic::Cvtsi2ss,// Cvtsi2ss_xmm_rm64
Mnemonic::Vcvtsi2ss,// VEX_Vcvtsi2ss_xmm_xmm_rm32
Mnemonic::Vcvtsi2ss,// VEX_Vcvtsi2ss_xmm_xmm_rm64
Mnemonic::Vcvtsi2ss,// EVEX_Vcvtsi2ss_xmm_xmm_rm32_er
Mnemonic::Vcvtsi2ss,// EVEX_Vcvtsi2ss_xmm_xmm_rm64_er
Mnemonic::Cvtsi2sd,// Cvtsi2sd_xmm_rm32
Mnemonic::Cvtsi2sd,// Cvtsi2sd_xmm_rm64
Mnemonic::Vcvtsi2sd,// VEX_Vcvtsi2sd_xmm_xmm_rm32
Mnemonic::Vcvtsi2sd,// VEX_Vcvtsi2sd_xmm_xmm_rm64
Mnemonic::Vcvtsi2sd,// EVEX_Vcvtsi2sd_xmm_xmm_rm32_er
Mnemonic::Vcvtsi2sd,// EVEX_Vcvtsi2sd_xmm_xmm_rm64_er
Mnemonic::Movntps,// Movntps_m128_xmm
Mnemonic::Vmovntps,// VEX_Vmovntps_m128_xmm
Mnemonic::Vmovntps,// VEX_Vmovntps_m256_ymm
Mnemonic::Vmovntps,// EVEX_Vmovntps_m128_xmm
Mnemonic::Vmovntps,// EVEX_Vmovntps_m256_ymm
Mnemonic::Vmovntps,// EVEX_Vmovntps_m512_zmm
Mnemonic::Movntpd,// Movntpd_m128_xmm
Mnemonic::Vmovntpd,// VEX_Vmovntpd_m128_xmm
Mnemonic::Vmovntpd,// VEX_Vmovntpd_m256_ymm
Mnemonic::Vmovntpd,// EVEX_Vmovntpd_m128_xmm
Mnemonic::Vmovntpd,// EVEX_Vmovntpd_m256_ymm
Mnemonic::Vmovntpd,// EVEX_Vmovntpd_m512_zmm
Mnemonic::Movntss,// Movntss_m32_xmm
Mnemonic::Movntsd,// Movntsd_m64_xmm
Mnemonic::Cvttps2pi,// Cvttps2pi_mm_xmmm64
Mnemonic::Cvttpd2pi,// Cvttpd2pi_mm_xmmm128
Mnemonic::Cvttss2si,// Cvttss2si_r32_xmmm32
Mnemonic::Cvttss2si,// Cvttss2si_r64_xmmm32
Mnemonic::Vcvttss2si,// VEX_Vcvttss2si_r32_xmmm32
Mnemonic::Vcvttss2si,// VEX_Vcvttss2si_r64_xmmm32
Mnemonic::Vcvttss2si,// EVEX_Vcvttss2si_r32_xmmm32_sae
Mnemonic::Vcvttss2si,// EVEX_Vcvttss2si_r64_xmmm32_sae
Mnemonic::Cvttsd2si,// Cvttsd2si_r32_xmmm64
Mnemonic::Cvttsd2si,// Cvttsd2si_r64_xmmm64
Mnemonic::Vcvttsd2si,// VEX_Vcvttsd2si_r32_xmmm64
Mnemonic::Vcvttsd2si,// VEX_Vcvttsd2si_r64_xmmm64
Mnemonic::Vcvttsd2si,// EVEX_Vcvttsd2si_r32_xmmm64_sae
Mnemonic::Vcvttsd2si,// EVEX_Vcvttsd2si_r64_xmmm64_sae
Mnemonic::Cvtps2pi,// Cvtps2pi_mm_xmmm64
Mnemonic::Cvtpd2pi,// Cvtpd2pi_mm_xmmm128
Mnemonic::Cvtss2si,// Cvtss2si_r32_xmmm32
Mnemonic::Cvtss2si,// Cvtss2si_r64_xmmm32
Mnemonic::Vcvtss2si,// VEX_Vcvtss2si_r32_xmmm32
Mnemonic::Vcvtss2si,// VEX_Vcvtss2si_r64_xmmm32
Mnemonic::Vcvtss2si,// EVEX_Vcvtss2si_r32_xmmm32_er
Mnemonic::Vcvtss2si,// EVEX_Vcvtss2si_r64_xmmm32_er
Mnemonic::Cvtsd2si,// Cvtsd2si_r32_xmmm64
Mnemonic::Cvtsd2si,// Cvtsd2si_r64_xmmm64
Mnemonic::Vcvtsd2si,// VEX_Vcvtsd2si_r32_xmmm64
Mnemonic::Vcvtsd2si,// VEX_Vcvtsd2si_r64_xmmm64
Mnemonic::Vcvtsd2si,// EVEX_Vcvtsd2si_r32_xmmm64_er
Mnemonic::Vcvtsd2si,// EVEX_Vcvtsd2si_r64_xmmm64_er
Mnemonic::Ucomiss,// Ucomiss_xmm_xmmm32
Mnemonic::Vucomiss,// VEX_Vucomiss_xmm_xmmm32
Mnemonic::Vucomiss,// EVEX_Vucomiss_xmm_xmmm32_sae
Mnemonic::Ucomisd,// Ucomisd_xmm_xmmm64
Mnemonic::Vucomisd,// VEX_Vucomisd_xmm_xmmm64
Mnemonic::Vucomisd,// EVEX_Vucomisd_xmm_xmmm64_sae
Mnemonic::Comiss,// Comiss_xmm_xmmm32
Mnemonic::Comisd,// Comisd_xmm_xmmm64
Mnemonic::Vcomiss,// VEX_Vcomiss_xmm_xmmm32
Mnemonic::Vcomisd,// VEX_Vcomisd_xmm_xmmm64
Mnemonic::Vcomiss,// EVEX_Vcomiss_xmm_xmmm32_sae
Mnemonic::Vcomisd,// EVEX_Vcomisd_xmm_xmmm64_sae
Mnemonic::Wrmsr,// Wrmsr
Mnemonic::Rdtsc,// Rdtsc
Mnemonic::Rdmsr,// Rdmsr
Mnemonic::Rdpmc,// Rdpmc
Mnemonic::Sysenter,// Sysenter
Mnemonic::Sysexit,// Sysexitd
Mnemonic::Sysexitq,// Sysexitq
Mnemonic::Getsec,// Getsecd
Mnemonic::Cmovo,// Cmovo_r16_rm16
Mnemonic::Cmovo,// Cmovo_r32_rm32
Mnemonic::Cmovo,// Cmovo_r64_rm64
Mnemonic::Cmovno,// Cmovno_r16_rm16
Mnemonic::Cmovno,// Cmovno_r32_rm32
Mnemonic::Cmovno,// Cmovno_r64_rm64
Mnemonic::Cmovb,// Cmovb_r16_rm16
Mnemonic::Cmovb,// Cmovb_r32_rm32
Mnemonic::Cmovb,// Cmovb_r64_rm64
Mnemonic::Cmovae,// Cmovae_r16_rm16
Mnemonic::Cmovae,// Cmovae_r32_rm32
Mnemonic::Cmovae,// Cmovae_r64_rm64
Mnemonic::Cmove,// Cmove_r16_rm16
Mnemonic::Cmove,// Cmove_r32_rm32
Mnemonic::Cmove,// Cmove_r64_rm64
Mnemonic::Cmovne,// Cmovne_r16_rm16
Mnemonic::Cmovne,// Cmovne_r32_rm32
Mnemonic::Cmovne,// Cmovne_r64_rm64
Mnemonic::Cmovbe,// Cmovbe_r16_rm16
Mnemonic::Cmovbe,// Cmovbe_r32_rm32
Mnemonic::Cmovbe,// Cmovbe_r64_rm64
Mnemonic::Cmova,// Cmova_r16_rm16
Mnemonic::Cmova,// Cmova_r32_rm32
Mnemonic::Cmova,// Cmova_r64_rm64
Mnemonic::Cmovs,// Cmovs_r16_rm16
Mnemonic::Cmovs,// Cmovs_r32_rm32
Mnemonic::Cmovs,// Cmovs_r64_rm64
Mnemonic::Cmovns,// Cmovns_r16_rm16
Mnemonic::Cmovns,// Cmovns_r32_rm32
Mnemonic::Cmovns,// Cmovns_r64_rm64
Mnemonic::Cmovp,// Cmovp_r16_rm16
Mnemonic::Cmovp,// Cmovp_r32_rm32
Mnemonic::Cmovp,// Cmovp_r64_rm64
Mnemonic::Cmovnp,// Cmovnp_r16_rm16
Mnemonic::Cmovnp,// Cmovnp_r32_rm32
Mnemonic::Cmovnp,// Cmovnp_r64_rm64
Mnemonic::Cmovl,// Cmovl_r16_rm16
Mnemonic::Cmovl,// Cmovl_r32_rm32
Mnemonic::Cmovl,// Cmovl_r64_rm64
Mnemonic::Cmovge,// Cmovge_r16_rm16
Mnemonic::Cmovge,// Cmovge_r32_rm32
Mnemonic::Cmovge,// Cmovge_r64_rm64
Mnemonic::Cmovle,// Cmovle_r16_rm16
Mnemonic::Cmovle,// Cmovle_r32_rm32
Mnemonic::Cmovle,// Cmovle_r64_rm64
Mnemonic::Cmovg,// Cmovg_r16_rm16
Mnemonic::Cmovg,// Cmovg_r32_rm32
Mnemonic::Cmovg,// Cmovg_r64_rm64
Mnemonic::Kandw,// VEX_Kandw_kr_kr_kr
Mnemonic::Kandq,// VEX_Kandq_kr_kr_kr
Mnemonic::Kandb,// VEX_Kandb_kr_kr_kr
Mnemonic::Kandd,// VEX_Kandd_kr_kr_kr
Mnemonic::Kandnw,// VEX_Kandnw_kr_kr_kr
Mnemonic::Kandnq,// VEX_Kandnq_kr_kr_kr
Mnemonic::Kandnb,// VEX_Kandnb_kr_kr_kr
Mnemonic::Kandnd,// VEX_Kandnd_kr_kr_kr
Mnemonic::Knotw,// VEX_Knotw_kr_kr
Mnemonic::Knotq,// VEX_Knotq_kr_kr
Mnemonic::Knotb,// VEX_Knotb_kr_kr
Mnemonic::Knotd,// VEX_Knotd_kr_kr
Mnemonic::Korw,// VEX_Korw_kr_kr_kr
Mnemonic::Korq,// VEX_Korq_kr_kr_kr
Mnemonic::Korb,// VEX_Korb_kr_kr_kr
Mnemonic::Kord,// VEX_Kord_kr_kr_kr
Mnemonic::Kxnorw,// VEX_Kxnorw_kr_kr_kr
Mnemonic::Kxnorq,// VEX_Kxnorq_kr_kr_kr
Mnemonic::Kxnorb,// VEX_Kxnorb_kr_kr_kr
Mnemonic::Kxnord,// VEX_Kxnord_kr_kr_kr
Mnemonic::Kxorw,// VEX_Kxorw_kr_kr_kr
Mnemonic::Kxorq,// VEX_Kxorq_kr_kr_kr
Mnemonic::Kxorb,// VEX_Kxorb_kr_kr_kr
Mnemonic::Kxord,// VEX_Kxord_kr_kr_kr
Mnemonic::Kaddw,// VEX_Kaddw_kr_kr_kr
Mnemonic::Kaddq,// VEX_Kaddq_kr_kr_kr
Mnemonic::Kaddb,// VEX_Kaddb_kr_kr_kr
Mnemonic::Kaddd,// VEX_Kaddd_kr_kr_kr
Mnemonic::Kunpckwd,// VEX_Kunpckwd_kr_kr_kr
Mnemonic::Kunpckdq,// VEX_Kunpckdq_kr_kr_kr
Mnemonic::Kunpckbw,// VEX_Kunpckbw_kr_kr_kr
Mnemonic::Movmskps,// Movmskps_r32_xmm
Mnemonic::Movmskps,// Movmskps_r64_xmm
Mnemonic::Vmovmskps,// VEX_Vmovmskps_r32_xmm
Mnemonic::Vmovmskps,// VEX_Vmovmskps_r64_xmm
Mnemonic::Vmovmskps,// VEX_Vmovmskps_r32_ymm
Mnemonic::Vmovmskps,// VEX_Vmovmskps_r64_ymm
Mnemonic::Movmskpd,// Movmskpd_r32_xmm
Mnemonic::Movmskpd,// Movmskpd_r64_xmm
Mnemonic::Vmovmskpd,// VEX_Vmovmskpd_r32_xmm
Mnemonic::Vmovmskpd,// VEX_Vmovmskpd_r64_xmm
Mnemonic::Vmovmskpd,// VEX_Vmovmskpd_r32_ymm
Mnemonic::Vmovmskpd,// VEX_Vmovmskpd_r64_ymm
Mnemonic::Sqrtps,// Sqrtps_xmm_xmmm128
Mnemonic::Vsqrtps,// VEX_Vsqrtps_xmm_xmmm128
Mnemonic::Vsqrtps,// VEX_Vsqrtps_ymm_ymmm256
Mnemonic::Vsqrtps,// EVEX_Vsqrtps_xmm_k1z_xmmm128b32
Mnemonic::Vsqrtps,// EVEX_Vsqrtps_ymm_k1z_ymmm256b32
Mnemonic::Vsqrtps,// EVEX_Vsqrtps_zmm_k1z_zmmm512b32_er
Mnemonic::Sqrtpd,// Sqrtpd_xmm_xmmm128
Mnemonic::Vsqrtpd,// VEX_Vsqrtpd_xmm_xmmm128
Mnemonic::Vsqrtpd,// VEX_Vsqrtpd_ymm_ymmm256
Mnemonic::Vsqrtpd,// EVEX_Vsqrtpd_xmm_k1z_xmmm128b64
Mnemonic::Vsqrtpd,// EVEX_Vsqrtpd_ymm_k1z_ymmm256b64
Mnemonic::Vsqrtpd,// EVEX_Vsqrtpd_zmm_k1z_zmmm512b64_er
Mnemonic::Sqrtss,// Sqrtss_xmm_xmmm32
Mnemonic::Vsqrtss,// VEX_Vsqrtss_xmm_xmm_xmmm32
Mnemonic::Vsqrtss,// EVEX_Vsqrtss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Sqrtsd,// Sqrtsd_xmm_xmmm64
Mnemonic::Vsqrtsd,// VEX_Vsqrtsd_xmm_xmm_xmmm64
Mnemonic::Vsqrtsd,// EVEX_Vsqrtsd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Rsqrtps,// Rsqrtps_xmm_xmmm128
Mnemonic::Vrsqrtps,// VEX_Vrsqrtps_xmm_xmmm128
Mnemonic::Vrsqrtps,// VEX_Vrsqrtps_ymm_ymmm256
Mnemonic::Rsqrtss,// Rsqrtss_xmm_xmmm32
Mnemonic::Vrsqrtss,// VEX_Vrsqrtss_xmm_xmm_xmmm32
Mnemonic::Rcpps,// Rcpps_xmm_xmmm128
Mnemonic::Vrcpps,// VEX_Vrcpps_xmm_xmmm128
Mnemonic::Vrcpps,// VEX_Vrcpps_ymm_ymmm256
Mnemonic::Rcpss,// Rcpss_xmm_xmmm32
Mnemonic::Vrcpss,// VEX_Vrcpss_xmm_xmm_xmmm32
Mnemonic::Andps,// Andps_xmm_xmmm128
Mnemonic::Vandps,// VEX_Vandps_xmm_xmm_xmmm128
Mnemonic::Vandps,// VEX_Vandps_ymm_ymm_ymmm256
Mnemonic::Vandps,// EVEX_Vandps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vandps,// EVEX_Vandps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vandps,// EVEX_Vandps_zmm_k1z_zmm_zmmm512b32
Mnemonic::Andpd,// Andpd_xmm_xmmm128
Mnemonic::Vandpd,// VEX_Vandpd_xmm_xmm_xmmm128
Mnemonic::Vandpd,// VEX_Vandpd_ymm_ymm_ymmm256
Mnemonic::Vandpd,// EVEX_Vandpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vandpd,// EVEX_Vandpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vandpd,// EVEX_Vandpd_zmm_k1z_zmm_zmmm512b64
Mnemonic::Andnps,// Andnps_xmm_xmmm128
Mnemonic::Vandnps,// VEX_Vandnps_xmm_xmm_xmmm128
Mnemonic::Vandnps,// VEX_Vandnps_ymm_ymm_ymmm256
Mnemonic::Vandnps,// EVEX_Vandnps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vandnps,// EVEX_Vandnps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vandnps,// EVEX_Vandnps_zmm_k1z_zmm_zmmm512b32
Mnemonic::Andnpd,// Andnpd_xmm_xmmm128
Mnemonic::Vandnpd,// VEX_Vandnpd_xmm_xmm_xmmm128
Mnemonic::Vandnpd,// VEX_Vandnpd_ymm_ymm_ymmm256
Mnemonic::Vandnpd,// EVEX_Vandnpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vandnpd,// EVEX_Vandnpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vandnpd,// EVEX_Vandnpd_zmm_k1z_zmm_zmmm512b64
Mnemonic::Orps,// Orps_xmm_xmmm128
Mnemonic::Vorps,// VEX_Vorps_xmm_xmm_xmmm128
Mnemonic::Vorps,// VEX_Vorps_ymm_ymm_ymmm256
Mnemonic::Vorps,// EVEX_Vorps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vorps,// EVEX_Vorps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vorps,// EVEX_Vorps_zmm_k1z_zmm_zmmm512b32
Mnemonic::Orpd,// Orpd_xmm_xmmm128
Mnemonic::Vorpd,// VEX_Vorpd_xmm_xmm_xmmm128
Mnemonic::Vorpd,// VEX_Vorpd_ymm_ymm_ymmm256
Mnemonic::Vorpd,// EVEX_Vorpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vorpd,// EVEX_Vorpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vorpd,// EVEX_Vorpd_zmm_k1z_zmm_zmmm512b64
Mnemonic::Xorps,// Xorps_xmm_xmmm128
Mnemonic::Vxorps,// VEX_Vxorps_xmm_xmm_xmmm128
Mnemonic::Vxorps,// VEX_Vxorps_ymm_ymm_ymmm256
Mnemonic::Vxorps,// EVEX_Vxorps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vxorps,// EVEX_Vxorps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vxorps,// EVEX_Vxorps_zmm_k1z_zmm_zmmm512b32
Mnemonic::Xorpd,// Xorpd_xmm_xmmm128
Mnemonic::Vxorpd,// VEX_Vxorpd_xmm_xmm_xmmm128
Mnemonic::Vxorpd,// VEX_Vxorpd_ymm_ymm_ymmm256
Mnemonic::Vxorpd,// EVEX_Vxorpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vxorpd,// EVEX_Vxorpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vxorpd,// EVEX_Vxorpd_zmm_k1z_zmm_zmmm512b64
Mnemonic::Addps,// Addps_xmm_xmmm128
Mnemonic::Vaddps,// VEX_Vaddps_xmm_xmm_xmmm128
Mnemonic::Vaddps,// VEX_Vaddps_ymm_ymm_ymmm256
Mnemonic::Vaddps,// EVEX_Vaddps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vaddps,// EVEX_Vaddps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vaddps,// EVEX_Vaddps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Addpd,// Addpd_xmm_xmmm128
Mnemonic::Vaddpd,// VEX_Vaddpd_xmm_xmm_xmmm128
Mnemonic::Vaddpd,// VEX_Vaddpd_ymm_ymm_ymmm256
Mnemonic::Vaddpd,// EVEX_Vaddpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vaddpd,// EVEX_Vaddpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vaddpd,// EVEX_Vaddpd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Addss,// Addss_xmm_xmmm32
Mnemonic::Vaddss,// VEX_Vaddss_xmm_xmm_xmmm32
Mnemonic::Vaddss,// EVEX_Vaddss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Addsd,// Addsd_xmm_xmmm64
Mnemonic::Vaddsd,// VEX_Vaddsd_xmm_xmm_xmmm64
Mnemonic::Vaddsd,// EVEX_Vaddsd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Mulps,// Mulps_xmm_xmmm128
Mnemonic::Vmulps,// VEX_Vmulps_xmm_xmm_xmmm128
Mnemonic::Vmulps,// VEX_Vmulps_ymm_ymm_ymmm256
Mnemonic::Vmulps,// EVEX_Vmulps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vmulps,// EVEX_Vmulps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vmulps,// EVEX_Vmulps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Mulpd,// Mulpd_xmm_xmmm128
Mnemonic::Vmulpd,// VEX_Vmulpd_xmm_xmm_xmmm128
Mnemonic::Vmulpd,// VEX_Vmulpd_ymm_ymm_ymmm256
Mnemonic::Vmulpd,// EVEX_Vmulpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vmulpd,// EVEX_Vmulpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vmulpd,// EVEX_Vmulpd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Mulss,// Mulss_xmm_xmmm32
Mnemonic::Vmulss,// VEX_Vmulss_xmm_xmm_xmmm32
Mnemonic::Vmulss,// EVEX_Vmulss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Mulsd,// Mulsd_xmm_xmmm64
Mnemonic::Vmulsd,// VEX_Vmulsd_xmm_xmm_xmmm64
Mnemonic::Vmulsd,// EVEX_Vmulsd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Cvtps2pd,// Cvtps2pd_xmm_xmmm64
Mnemonic::Vcvtps2pd,// VEX_Vcvtps2pd_xmm_xmmm64
Mnemonic::Vcvtps2pd,// VEX_Vcvtps2pd_ymm_xmmm128
Mnemonic::Vcvtps2pd,// EVEX_Vcvtps2pd_xmm_k1z_xmmm64b32
Mnemonic::Vcvtps2pd,// EVEX_Vcvtps2pd_ymm_k1z_xmmm128b32
Mnemonic::Vcvtps2pd,// EVEX_Vcvtps2pd_zmm_k1z_ymmm256b32_sae
Mnemonic::Cvtpd2ps,// Cvtpd2ps_xmm_xmmm128
Mnemonic::Vcvtpd2ps,// VEX_Vcvtpd2ps_xmm_xmmm128
Mnemonic::Vcvtpd2ps,// VEX_Vcvtpd2ps_xmm_ymmm256
Mnemonic::Vcvtpd2ps,// EVEX_Vcvtpd2ps_xmm_k1z_xmmm128b64
Mnemonic::Vcvtpd2ps,// EVEX_Vcvtpd2ps_xmm_k1z_ymmm256b64
Mnemonic::Vcvtpd2ps,// EVEX_Vcvtpd2ps_ymm_k1z_zmmm512b64_er
Mnemonic::Cvtss2sd,// Cvtss2sd_xmm_xmmm32
Mnemonic::Vcvtss2sd,// VEX_Vcvtss2sd_xmm_xmm_xmmm32
Mnemonic::Vcvtss2sd,// EVEX_Vcvtss2sd_xmm_k1z_xmm_xmmm32_sae
Mnemonic::Cvtsd2ss,// Cvtsd2ss_xmm_xmmm64
Mnemonic::Vcvtsd2ss,// VEX_Vcvtsd2ss_xmm_xmm_xmmm64
Mnemonic::Vcvtsd2ss,// EVEX_Vcvtsd2ss_xmm_k1z_xmm_xmmm64_er
Mnemonic::Cvtdq2ps,// Cvtdq2ps_xmm_xmmm128
Mnemonic::Vcvtdq2ps,// VEX_Vcvtdq2ps_xmm_xmmm128
Mnemonic::Vcvtdq2ps,// VEX_Vcvtdq2ps_ymm_ymmm256
Mnemonic::Vcvtdq2ps,// EVEX_Vcvtdq2ps_xmm_k1z_xmmm128b32
Mnemonic::Vcvtdq2ps,// EVEX_Vcvtdq2ps_ymm_k1z_ymmm256b32
Mnemonic::Vcvtdq2ps,// EVEX_Vcvtdq2ps_zmm_k1z_zmmm512b32_er
Mnemonic::Vcvtqq2ps,// EVEX_Vcvtqq2ps_xmm_k1z_xmmm128b64
Mnemonic::Vcvtqq2ps,// EVEX_Vcvtqq2ps_xmm_k1z_ymmm256b64
Mnemonic::Vcvtqq2ps,// EVEX_Vcvtqq2ps_ymm_k1z_zmmm512b64_er
Mnemonic::Cvtps2dq,// Cvtps2dq_xmm_xmmm128
Mnemonic::Vcvtps2dq,// VEX_Vcvtps2dq_xmm_xmmm128
Mnemonic::Vcvtps2dq,// VEX_Vcvtps2dq_ymm_ymmm256
Mnemonic::Vcvtps2dq,// EVEX_Vcvtps2dq_xmm_k1z_xmmm128b32
Mnemonic::Vcvtps2dq,// EVEX_Vcvtps2dq_ymm_k1z_ymmm256b32
Mnemonic::Vcvtps2dq,// EVEX_Vcvtps2dq_zmm_k1z_zmmm512b32_er
Mnemonic::Cvttps2dq,// Cvttps2dq_xmm_xmmm128
Mnemonic::Vcvttps2dq,// VEX_Vcvttps2dq_xmm_xmmm128
Mnemonic::Vcvttps2dq,// VEX_Vcvttps2dq_ymm_ymmm256
Mnemonic::Vcvttps2dq,// EVEX_Vcvttps2dq_xmm_k1z_xmmm128b32
Mnemonic::Vcvttps2dq,// EVEX_Vcvttps2dq_ymm_k1z_ymmm256b32
Mnemonic::Vcvttps2dq,// EVEX_Vcvttps2dq_zmm_k1z_zmmm512b32_sae
Mnemonic::Subps,// Subps_xmm_xmmm128
Mnemonic::Vsubps,// VEX_Vsubps_xmm_xmm_xmmm128
Mnemonic::Vsubps,// VEX_Vsubps_ymm_ymm_ymmm256
Mnemonic::Vsubps,// EVEX_Vsubps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vsubps,// EVEX_Vsubps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vsubps,// EVEX_Vsubps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Subpd,// Subpd_xmm_xmmm128
Mnemonic::Vsubpd,// VEX_Vsubpd_xmm_xmm_xmmm128
Mnemonic::Vsubpd,// VEX_Vsubpd_ymm_ymm_ymmm256
Mnemonic::Vsubpd,// EVEX_Vsubpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vsubpd,// EVEX_Vsubpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vsubpd,// EVEX_Vsubpd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Subss,// Subss_xmm_xmmm32
Mnemonic::Vsubss,// VEX_Vsubss_xmm_xmm_xmmm32
Mnemonic::Vsubss,// EVEX_Vsubss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Subsd,// Subsd_xmm_xmmm64
Mnemonic::Vsubsd,// VEX_Vsubsd_xmm_xmm_xmmm64
Mnemonic::Vsubsd,// EVEX_Vsubsd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Minps,// Minps_xmm_xmmm128
Mnemonic::Vminps,// VEX_Vminps_xmm_xmm_xmmm128
Mnemonic::Vminps,// VEX_Vminps_ymm_ymm_ymmm256
Mnemonic::Vminps,// EVEX_Vminps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vminps,// EVEX_Vminps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vminps,// EVEX_Vminps_zmm_k1z_zmm_zmmm512b32_sae
Mnemonic::Minpd,// Minpd_xmm_xmmm128
Mnemonic::Vminpd,// VEX_Vminpd_xmm_xmm_xmmm128
Mnemonic::Vminpd,// VEX_Vminpd_ymm_ymm_ymmm256
Mnemonic::Vminpd,// EVEX_Vminpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vminpd,// EVEX_Vminpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vminpd,// EVEX_Vminpd_zmm_k1z_zmm_zmmm512b64_sae
Mnemonic::Minss,// Minss_xmm_xmmm32
Mnemonic::Vminss,// VEX_Vminss_xmm_xmm_xmmm32
Mnemonic::Vminss,// EVEX_Vminss_xmm_k1z_xmm_xmmm32_sae
Mnemonic::Minsd,// Minsd_xmm_xmmm64
Mnemonic::Vminsd,// VEX_Vminsd_xmm_xmm_xmmm64
Mnemonic::Vminsd,// EVEX_Vminsd_xmm_k1z_xmm_xmmm64_sae
Mnemonic::Divps,// Divps_xmm_xmmm128
Mnemonic::Vdivps,// VEX_Vdivps_xmm_xmm_xmmm128
Mnemonic::Vdivps,// VEX_Vdivps_ymm_ymm_ymmm256
Mnemonic::Vdivps,// EVEX_Vdivps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vdivps,// EVEX_Vdivps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vdivps,// EVEX_Vdivps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Divpd,// Divpd_xmm_xmmm128
Mnemonic::Vdivpd,// VEX_Vdivpd_xmm_xmm_xmmm128
Mnemonic::Vdivpd,// VEX_Vdivpd_ymm_ymm_ymmm256
Mnemonic::Vdivpd,// EVEX_Vdivpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vdivpd,// EVEX_Vdivpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vdivpd,// EVEX_Vdivpd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Divss,// Divss_xmm_xmmm32
Mnemonic::Vdivss,// VEX_Vdivss_xmm_xmm_xmmm32
Mnemonic::Vdivss,// EVEX_Vdivss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Divsd,// Divsd_xmm_xmmm64
Mnemonic::Vdivsd,// VEX_Vdivsd_xmm_xmm_xmmm64
Mnemonic::Vdivsd,// EVEX_Vdivsd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Maxps,// Maxps_xmm_xmmm128
Mnemonic::Vmaxps,// VEX_Vmaxps_xmm_xmm_xmmm128
Mnemonic::Vmaxps,// VEX_Vmaxps_ymm_ymm_ymmm256
Mnemonic::Vmaxps,// EVEX_Vmaxps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vmaxps,// EVEX_Vmaxps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vmaxps,// EVEX_Vmaxps_zmm_k1z_zmm_zmmm512b32_sae
Mnemonic::Maxpd,// Maxpd_xmm_xmmm128
Mnemonic::Vmaxpd,// VEX_Vmaxpd_xmm_xmm_xmmm128
Mnemonic::Vmaxpd,// VEX_Vmaxpd_ymm_ymm_ymmm256
Mnemonic::Vmaxpd,// EVEX_Vmaxpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vmaxpd,// EVEX_Vmaxpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vmaxpd,// EVEX_Vmaxpd_zmm_k1z_zmm_zmmm512b64_sae
Mnemonic::Maxss,// Maxss_xmm_xmmm32
Mnemonic::Vmaxss,// VEX_Vmaxss_xmm_xmm_xmmm32
Mnemonic::Vmaxss,// EVEX_Vmaxss_xmm_k1z_xmm_xmmm32_sae
Mnemonic::Maxsd,// Maxsd_xmm_xmmm64
Mnemonic::Vmaxsd,// VEX_Vmaxsd_xmm_xmm_xmmm64
Mnemonic::Vmaxsd,// EVEX_Vmaxsd_xmm_k1z_xmm_xmmm64_sae
Mnemonic::Punpcklbw,// Punpcklbw_mm_mmm32
Mnemonic::Punpcklbw,// Punpcklbw_xmm_xmmm128
Mnemonic::Vpunpcklbw,// VEX_Vpunpcklbw_xmm_xmm_xmmm128
Mnemonic::Vpunpcklbw,// VEX_Vpunpcklbw_ymm_ymm_ymmm256
Mnemonic::Vpunpcklbw,// EVEX_Vpunpcklbw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpunpcklbw,// EVEX_Vpunpcklbw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpunpcklbw,// EVEX_Vpunpcklbw_zmm_k1z_zmm_zmmm512
Mnemonic::Punpcklwd,// Punpcklwd_mm_mmm32
Mnemonic::Punpcklwd,// Punpcklwd_xmm_xmmm128
Mnemonic::Vpunpcklwd,// VEX_Vpunpcklwd_xmm_xmm_xmmm128
Mnemonic::Vpunpcklwd,// VEX_Vpunpcklwd_ymm_ymm_ymmm256
Mnemonic::Vpunpcklwd,// EVEX_Vpunpcklwd_xmm_k1z_xmm_xmmm128
Mnemonic::Vpunpcklwd,// EVEX_Vpunpcklwd_ymm_k1z_ymm_ymmm256
Mnemonic::Vpunpcklwd,// EVEX_Vpunpcklwd_zmm_k1z_zmm_zmmm512
Mnemonic::Punpckldq,// Punpckldq_mm_mmm32
Mnemonic::Punpckldq,// Punpckldq_xmm_xmmm128
Mnemonic::Vpunpckldq,// VEX_Vpunpckldq_xmm_xmm_xmmm128
Mnemonic::Vpunpckldq,// VEX_Vpunpckldq_ymm_ymm_ymmm256
Mnemonic::Vpunpckldq,// EVEX_Vpunpckldq_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpunpckldq,// EVEX_Vpunpckldq_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpunpckldq,// EVEX_Vpunpckldq_zmm_k1z_zmm_zmmm512b32
Mnemonic::Packsswb,// Packsswb_mm_mmm64
Mnemonic::Packsswb,// Packsswb_xmm_xmmm128
Mnemonic::Vpacksswb,// VEX_Vpacksswb_xmm_xmm_xmmm128
Mnemonic::Vpacksswb,// VEX_Vpacksswb_ymm_ymm_ymmm256
Mnemonic::Vpacksswb,// EVEX_Vpacksswb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpacksswb,// EVEX_Vpacksswb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpacksswb,// EVEX_Vpacksswb_zmm_k1z_zmm_zmmm512
Mnemonic::Pcmpgtb,// Pcmpgtb_mm_mmm64
Mnemonic::Pcmpgtb,// Pcmpgtb_xmm_xmmm128
Mnemonic::Vpcmpgtb,// VEX_Vpcmpgtb_xmm_xmm_xmmm128
Mnemonic::Vpcmpgtb,// VEX_Vpcmpgtb_ymm_ymm_ymmm256
Mnemonic::Vpcmpgtb,// EVEX_Vpcmpgtb_kr_k1_xmm_xmmm128
Mnemonic::Vpcmpgtb,// EVEX_Vpcmpgtb_kr_k1_ymm_ymmm256
Mnemonic::Vpcmpgtb,// EVEX_Vpcmpgtb_kr_k1_zmm_zmmm512
Mnemonic::Pcmpgtw,// Pcmpgtw_mm_mmm64
Mnemonic::Pcmpgtw,// Pcmpgtw_xmm_xmmm128
Mnemonic::Vpcmpgtw,// VEX_Vpcmpgtw_xmm_xmm_xmmm128
Mnemonic::Vpcmpgtw,// VEX_Vpcmpgtw_ymm_ymm_ymmm256
Mnemonic::Vpcmpgtw,// EVEX_Vpcmpgtw_kr_k1_xmm_xmmm128
Mnemonic::Vpcmpgtw,// EVEX_Vpcmpgtw_kr_k1_ymm_ymmm256
Mnemonic::Vpcmpgtw,// EVEX_Vpcmpgtw_kr_k1_zmm_zmmm512
Mnemonic::Pcmpgtd,// Pcmpgtd_mm_mmm64
Mnemonic::Pcmpgtd,// Pcmpgtd_xmm_xmmm128
Mnemonic::Vpcmpgtd,// VEX_Vpcmpgtd_xmm_xmm_xmmm128
Mnemonic::Vpcmpgtd,// VEX_Vpcmpgtd_ymm_ymm_ymmm256
Mnemonic::Vpcmpgtd,// EVEX_Vpcmpgtd_kr_k1_xmm_xmmm128b32
Mnemonic::Vpcmpgtd,// EVEX_Vpcmpgtd_kr_k1_ymm_ymmm256b32
Mnemonic::Vpcmpgtd,// EVEX_Vpcmpgtd_kr_k1_zmm_zmmm512b32
Mnemonic::Packuswb,// Packuswb_mm_mmm64
Mnemonic::Packuswb,// Packuswb_xmm_xmmm128
Mnemonic::Vpackuswb,// VEX_Vpackuswb_xmm_xmm_xmmm128
Mnemonic::Vpackuswb,// VEX_Vpackuswb_ymm_ymm_ymmm256
Mnemonic::Vpackuswb,// EVEX_Vpackuswb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpackuswb,// EVEX_Vpackuswb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpackuswb,// EVEX_Vpackuswb_zmm_k1z_zmm_zmmm512
Mnemonic::Punpckhbw,// Punpckhbw_mm_mmm64
Mnemonic::Punpckhbw,// Punpckhbw_xmm_xmmm128
Mnemonic::Vpunpckhbw,// VEX_Vpunpckhbw_xmm_xmm_xmmm128
Mnemonic::Vpunpckhbw,// VEX_Vpunpckhbw_ymm_ymm_ymmm256
Mnemonic::Vpunpckhbw,// EVEX_Vpunpckhbw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpunpckhbw,// EVEX_Vpunpckhbw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpunpckhbw,// EVEX_Vpunpckhbw_zmm_k1z_zmm_zmmm512
Mnemonic::Punpckhwd,// Punpckhwd_mm_mmm64
Mnemonic::Punpckhwd,// Punpckhwd_xmm_xmmm128
Mnemonic::Vpunpckhwd,// VEX_Vpunpckhwd_xmm_xmm_xmmm128
Mnemonic::Vpunpckhwd,// VEX_Vpunpckhwd_ymm_ymm_ymmm256
Mnemonic::Vpunpckhwd,// EVEX_Vpunpckhwd_xmm_k1z_xmm_xmmm128
Mnemonic::Vpunpckhwd,// EVEX_Vpunpckhwd_ymm_k1z_ymm_ymmm256
Mnemonic::Vpunpckhwd,// EVEX_Vpunpckhwd_zmm_k1z_zmm_zmmm512
Mnemonic::Punpckhdq,// Punpckhdq_mm_mmm64
Mnemonic::Punpckhdq,// Punpckhdq_xmm_xmmm128
Mnemonic::Vpunpckhdq,// VEX_Vpunpckhdq_xmm_xmm_xmmm128
Mnemonic::Vpunpckhdq,// VEX_Vpunpckhdq_ymm_ymm_ymmm256
Mnemonic::Vpunpckhdq,// EVEX_Vpunpckhdq_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpunpckhdq,// EVEX_Vpunpckhdq_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpunpckhdq,// EVEX_Vpunpckhdq_zmm_k1z_zmm_zmmm512b32
Mnemonic::Packssdw,// Packssdw_mm_mmm64
Mnemonic::Packssdw,// Packssdw_xmm_xmmm128
Mnemonic::Vpackssdw,// VEX_Vpackssdw_xmm_xmm_xmmm128
Mnemonic::Vpackssdw,// VEX_Vpackssdw_ymm_ymm_ymmm256
Mnemonic::Vpackssdw,// EVEX_Vpackssdw_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpackssdw,// EVEX_Vpackssdw_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpackssdw,// EVEX_Vpackssdw_zmm_k1z_zmm_zmmm512b32
Mnemonic::Punpcklqdq,// Punpcklqdq_xmm_xmmm128
Mnemonic::Vpunpcklqdq,// VEX_Vpunpcklqdq_xmm_xmm_xmmm128
Mnemonic::Vpunpcklqdq,// VEX_Vpunpcklqdq_ymm_ymm_ymmm256
Mnemonic::Vpunpcklqdq,// EVEX_Vpunpcklqdq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpunpcklqdq,// EVEX_Vpunpcklqdq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpunpcklqdq,// EVEX_Vpunpcklqdq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Punpckhqdq,// Punpckhqdq_xmm_xmmm128
Mnemonic::Vpunpckhqdq,// VEX_Vpunpckhqdq_xmm_xmm_xmmm128
Mnemonic::Vpunpckhqdq,// VEX_Vpunpckhqdq_ymm_ymm_ymmm256
Mnemonic::Vpunpckhqdq,// EVEX_Vpunpckhqdq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpunpckhqdq,// EVEX_Vpunpckhqdq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpunpckhqdq,// EVEX_Vpunpckhqdq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Movd,// Movd_mm_rm32
Mnemonic::Movq,// Movq_mm_rm64
Mnemonic::Movd,// Movd_xmm_rm32
Mnemonic::Movq,// Movq_xmm_rm64
Mnemonic::Vmovd,// VEX_Vmovd_xmm_rm32
Mnemonic::Vmovq,// VEX_Vmovq_xmm_rm64
Mnemonic::Vmovd,// EVEX_Vmovd_xmm_rm32
Mnemonic::Vmovq,// EVEX_Vmovq_xmm_rm64
Mnemonic::Movq,// Movq_mm_mmm64
Mnemonic::Movdqa,// Movdqa_xmm_xmmm128
Mnemonic::Vmovdqa,// VEX_Vmovdqa_xmm_xmmm128
Mnemonic::Vmovdqa,// VEX_Vmovdqa_ymm_ymmm256
Mnemonic::Vmovdqa32,// EVEX_Vmovdqa32_xmm_k1z_xmmm128
Mnemonic::Vmovdqa32,// EVEX_Vmovdqa32_ymm_k1z_ymmm256
Mnemonic::Vmovdqa32,// EVEX_Vmovdqa32_zmm_k1z_zmmm512
Mnemonic::Vmovdqa64,// EVEX_Vmovdqa64_xmm_k1z_xmmm128
Mnemonic::Vmovdqa64,// EVEX_Vmovdqa64_ymm_k1z_ymmm256
Mnemonic::Vmovdqa64,// EVEX_Vmovdqa64_zmm_k1z_zmmm512
Mnemonic::Movdqu,// Movdqu_xmm_xmmm128
Mnemonic::Vmovdqu,// VEX_Vmovdqu_xmm_xmmm128
Mnemonic::Vmovdqu,// VEX_Vmovdqu_ymm_ymmm256
Mnemonic::Vmovdqu32,// EVEX_Vmovdqu32_xmm_k1z_xmmm128
Mnemonic::Vmovdqu32,// EVEX_Vmovdqu32_ymm_k1z_ymmm256
Mnemonic::Vmovdqu32,// EVEX_Vmovdqu32_zmm_k1z_zmmm512
Mnemonic::Vmovdqu64,// EVEX_Vmovdqu64_xmm_k1z_xmmm128
Mnemonic::Vmovdqu64,// EVEX_Vmovdqu64_ymm_k1z_ymmm256
Mnemonic::Vmovdqu64,// EVEX_Vmovdqu64_zmm_k1z_zmmm512
Mnemonic::Vmovdqu8,// EVEX_Vmovdqu8_xmm_k1z_xmmm128
Mnemonic::Vmovdqu8,// EVEX_Vmovdqu8_ymm_k1z_ymmm256
Mnemonic::Vmovdqu8,// EVEX_Vmovdqu8_zmm_k1z_zmmm512
Mnemonic::Vmovdqu16,// EVEX_Vmovdqu16_xmm_k1z_xmmm128
Mnemonic::Vmovdqu16,// EVEX_Vmovdqu16_ymm_k1z_ymmm256
Mnemonic::Vmovdqu16,// EVEX_Vmovdqu16_zmm_k1z_zmmm512
Mnemonic::Pshufw,// Pshufw_mm_mmm64_imm8
Mnemonic::Pshufd,// Pshufd_xmm_xmmm128_imm8
Mnemonic::Vpshufd,// VEX_Vpshufd_xmm_xmmm128_imm8
Mnemonic::Vpshufd,// VEX_Vpshufd_ymm_ymmm256_imm8
Mnemonic::Vpshufd,// EVEX_Vpshufd_xmm_k1z_xmmm128b32_imm8
Mnemonic::Vpshufd,// EVEX_Vpshufd_ymm_k1z_ymmm256b32_imm8
Mnemonic::Vpshufd,// EVEX_Vpshufd_zmm_k1z_zmmm512b32_imm8
Mnemonic::Pshufhw,// Pshufhw_xmm_xmmm128_imm8
Mnemonic::Vpshufhw,// VEX_Vpshufhw_xmm_xmmm128_imm8
Mnemonic::Vpshufhw,// VEX_Vpshufhw_ymm_ymmm256_imm8
Mnemonic::Vpshufhw,// EVEX_Vpshufhw_xmm_k1z_xmmm128_imm8
Mnemonic::Vpshufhw,// EVEX_Vpshufhw_ymm_k1z_ymmm256_imm8
Mnemonic::Vpshufhw,// EVEX_Vpshufhw_zmm_k1z_zmmm512_imm8
Mnemonic::Pshuflw,// Pshuflw_xmm_xmmm128_imm8
Mnemonic::Vpshuflw,// VEX_Vpshuflw_xmm_xmmm128_imm8
Mnemonic::Vpshuflw,// VEX_Vpshuflw_ymm_ymmm256_imm8
Mnemonic::Vpshuflw,// EVEX_Vpshuflw_xmm_k1z_xmmm128_imm8
Mnemonic::Vpshuflw,// EVEX_Vpshuflw_ymm_k1z_ymmm256_imm8
Mnemonic::Vpshuflw,// EVEX_Vpshuflw_zmm_k1z_zmmm512_imm8
Mnemonic::Psrlw,// Psrlw_mm_imm8
Mnemonic::Psrlw,// Psrlw_xmm_imm8
Mnemonic::Vpsrlw,// VEX_Vpsrlw_xmm_xmm_imm8
Mnemonic::Vpsrlw,// VEX_Vpsrlw_ymm_ymm_imm8
Mnemonic::Vpsrlw,// EVEX_Vpsrlw_xmm_k1z_xmmm128_imm8
Mnemonic::Vpsrlw,// EVEX_Vpsrlw_ymm_k1z_ymmm256_imm8
Mnemonic::Vpsrlw,// EVEX_Vpsrlw_zmm_k1z_zmmm512_imm8
Mnemonic::Psraw,// Psraw_mm_imm8
Mnemonic::Psraw,// Psraw_xmm_imm8
Mnemonic::Vpsraw,// VEX_Vpsraw_xmm_xmm_imm8
Mnemonic::Vpsraw,// VEX_Vpsraw_ymm_ymm_imm8
Mnemonic::Vpsraw,// EVEX_Vpsraw_xmm_k1z_xmmm128_imm8
Mnemonic::Vpsraw,// EVEX_Vpsraw_ymm_k1z_ymmm256_imm8
Mnemonic::Vpsraw,// EVEX_Vpsraw_zmm_k1z_zmmm512_imm8
Mnemonic::Psllw,// Psllw_mm_imm8
Mnemonic::Psllw,// Psllw_xmm_imm8
Mnemonic::Vpsllw,// VEX_Vpsllw_xmm_xmm_imm8
Mnemonic::Vpsllw,// VEX_Vpsllw_ymm_ymm_imm8
Mnemonic::Vpsllw,// EVEX_Vpsllw_xmm_k1z_xmmm128_imm8
Mnemonic::Vpsllw,// EVEX_Vpsllw_ymm_k1z_ymmm256_imm8
Mnemonic::Vpsllw,// EVEX_Vpsllw_zmm_k1z_zmmm512_imm8
Mnemonic::Vprord,// EVEX_Vprord_xmm_k1z_xmmm128b32_imm8
Mnemonic::Vprord,// EVEX_Vprord_ymm_k1z_ymmm256b32_imm8
Mnemonic::Vprord,// EVEX_Vprord_zmm_k1z_zmmm512b32_imm8
Mnemonic::Vprorq,// EVEX_Vprorq_xmm_k1z_xmmm128b64_imm8
Mnemonic::Vprorq,// EVEX_Vprorq_ymm_k1z_ymmm256b64_imm8
Mnemonic::Vprorq,// EVEX_Vprorq_zmm_k1z_zmmm512b64_imm8
Mnemonic::Vprold,// EVEX_Vprold_xmm_k1z_xmmm128b32_imm8
Mnemonic::Vprold,// EVEX_Vprold_ymm_k1z_ymmm256b32_imm8
Mnemonic::Vprold,// EVEX_Vprold_zmm_k1z_zmmm512b32_imm8
Mnemonic::Vprolq,// EVEX_Vprolq_xmm_k1z_xmmm128b64_imm8
Mnemonic::Vprolq,// EVEX_Vprolq_ymm_k1z_ymmm256b64_imm8
Mnemonic::Vprolq,// EVEX_Vprolq_zmm_k1z_zmmm512b64_imm8
Mnemonic::Psrld,// Psrld_mm_imm8
Mnemonic::Psrld,// Psrld_xmm_imm8
Mnemonic::Vpsrld,// VEX_Vpsrld_xmm_xmm_imm8
Mnemonic::Vpsrld,// VEX_Vpsrld_ymm_ymm_imm8
Mnemonic::Vpsrld,// EVEX_Vpsrld_xmm_k1z_xmmm128b32_imm8
Mnemonic::Vpsrld,// EVEX_Vpsrld_ymm_k1z_ymmm256b32_imm8
Mnemonic::Vpsrld,// EVEX_Vpsrld_zmm_k1z_zmmm512b32_imm8
Mnemonic::Psrad,// Psrad_mm_imm8
Mnemonic::Psrad,// Psrad_xmm_imm8
Mnemonic::Vpsrad,// VEX_Vpsrad_xmm_xmm_imm8
Mnemonic::Vpsrad,// VEX_Vpsrad_ymm_ymm_imm8
Mnemonic::Vpsrad,// EVEX_Vpsrad_xmm_k1z_xmmm128b32_imm8
Mnemonic::Vpsrad,// EVEX_Vpsrad_ymm_k1z_ymmm256b32_imm8
Mnemonic::Vpsrad,// EVEX_Vpsrad_zmm_k1z_zmmm512b32_imm8
Mnemonic::Vpsraq,// EVEX_Vpsraq_xmm_k1z_xmmm128b64_imm8
Mnemonic::Vpsraq,// EVEX_Vpsraq_ymm_k1z_ymmm256b64_imm8
Mnemonic::Vpsraq,// EVEX_Vpsraq_zmm_k1z_zmmm512b64_imm8
Mnemonic::Pslld,// Pslld_mm_imm8
Mnemonic::Pslld,// Pslld_xmm_imm8
Mnemonic::Vpslld,// VEX_Vpslld_xmm_xmm_imm8
Mnemonic::Vpslld,// VEX_Vpslld_ymm_ymm_imm8
Mnemonic::Vpslld,// EVEX_Vpslld_xmm_k1z_xmmm128b32_imm8
Mnemonic::Vpslld,// EVEX_Vpslld_ymm_k1z_ymmm256b32_imm8
Mnemonic::Vpslld,// EVEX_Vpslld_zmm_k1z_zmmm512b32_imm8
Mnemonic::Psrlq,// Psrlq_mm_imm8
Mnemonic::Psrlq,// Psrlq_xmm_imm8
Mnemonic::Vpsrlq,// VEX_Vpsrlq_xmm_xmm_imm8
Mnemonic::Vpsrlq,// VEX_Vpsrlq_ymm_ymm_imm8
Mnemonic::Vpsrlq,// EVEX_Vpsrlq_xmm_k1z_xmmm128b64_imm8
Mnemonic::Vpsrlq,// EVEX_Vpsrlq_ymm_k1z_ymmm256b64_imm8
Mnemonic::Vpsrlq,// EVEX_Vpsrlq_zmm_k1z_zmmm512b64_imm8
Mnemonic::Psrldq,// Psrldq_xmm_imm8
Mnemonic::Vpsrldq,// VEX_Vpsrldq_xmm_xmm_imm8
Mnemonic::Vpsrldq,// VEX_Vpsrldq_ymm_ymm_imm8
Mnemonic::Vpsrldq,// EVEX_Vpsrldq_xmm_xmmm128_imm8
Mnemonic::Vpsrldq,// EVEX_Vpsrldq_ymm_ymmm256_imm8
Mnemonic::Vpsrldq,// EVEX_Vpsrldq_zmm_zmmm512_imm8
Mnemonic::Psllq,// Psllq_mm_imm8
Mnemonic::Psllq,// Psllq_xmm_imm8
Mnemonic::Vpsllq,// VEX_Vpsllq_xmm_xmm_imm8
Mnemonic::Vpsllq,// VEX_Vpsllq_ymm_ymm_imm8
Mnemonic::Vpsllq,// EVEX_Vpsllq_xmm_k1z_xmmm128b64_imm8
Mnemonic::Vpsllq,// EVEX_Vpsllq_ymm_k1z_ymmm256b64_imm8
Mnemonic::Vpsllq,// EVEX_Vpsllq_zmm_k1z_zmmm512b64_imm8
Mnemonic::Pslldq,// Pslldq_xmm_imm8
Mnemonic::Vpslldq,// VEX_Vpslldq_xmm_xmm_imm8
Mnemonic::Vpslldq,// VEX_Vpslldq_ymm_ymm_imm8
Mnemonic::Vpslldq,// EVEX_Vpslldq_xmm_xmmm128_imm8
Mnemonic::Vpslldq,// EVEX_Vpslldq_ymm_ymmm256_imm8
Mnemonic::Vpslldq,// EVEX_Vpslldq_zmm_zmmm512_imm8
Mnemonic::Pcmpeqb,// Pcmpeqb_mm_mmm64
Mnemonic::Pcmpeqb,// Pcmpeqb_xmm_xmmm128
Mnemonic::Vpcmpeqb,// VEX_Vpcmpeqb_xmm_xmm_xmmm128
Mnemonic::Vpcmpeqb,// VEX_Vpcmpeqb_ymm_ymm_ymmm256
Mnemonic::Vpcmpeqb,// EVEX_Vpcmpeqb_kr_k1_xmm_xmmm128
Mnemonic::Vpcmpeqb,// EVEX_Vpcmpeqb_kr_k1_ymm_ymmm256
Mnemonic::Vpcmpeqb,// EVEX_Vpcmpeqb_kr_k1_zmm_zmmm512
Mnemonic::Pcmpeqw,// Pcmpeqw_mm_mmm64
Mnemonic::Pcmpeqw,// Pcmpeqw_xmm_xmmm128
Mnemonic::Vpcmpeqw,// VEX_Vpcmpeqw_xmm_xmm_xmmm128
Mnemonic::Vpcmpeqw,// VEX_Vpcmpeqw_ymm_ymm_ymmm256
Mnemonic::Vpcmpeqw,// EVEX_Vpcmpeqw_kr_k1_xmm_xmmm128
Mnemonic::Vpcmpeqw,// EVEX_Vpcmpeqw_kr_k1_ymm_ymmm256
Mnemonic::Vpcmpeqw,// EVEX_Vpcmpeqw_kr_k1_zmm_zmmm512
Mnemonic::Pcmpeqd,// Pcmpeqd_mm_mmm64
Mnemonic::Pcmpeqd,// Pcmpeqd_xmm_xmmm128
Mnemonic::Vpcmpeqd,// VEX_Vpcmpeqd_xmm_xmm_xmmm128
Mnemonic::Vpcmpeqd,// VEX_Vpcmpeqd_ymm_ymm_ymmm256
Mnemonic::Vpcmpeqd,// EVEX_Vpcmpeqd_kr_k1_xmm_xmmm128b32
Mnemonic::Vpcmpeqd,// EVEX_Vpcmpeqd_kr_k1_ymm_ymmm256b32
Mnemonic::Vpcmpeqd,// EVEX_Vpcmpeqd_kr_k1_zmm_zmmm512b32
Mnemonic::Emms,// Emms
Mnemonic::Vzeroupper,// VEX_Vzeroupper
Mnemonic::Vzeroall,// VEX_Vzeroall
Mnemonic::Vmread,// Vmread_rm32_r32
Mnemonic::Vmread,// Vmread_rm64_r64
Mnemonic::Vcvttps2udq,// EVEX_Vcvttps2udq_xmm_k1z_xmmm128b32
Mnemonic::Vcvttps2udq,// EVEX_Vcvttps2udq_ymm_k1z_ymmm256b32
Mnemonic::Vcvttps2udq,// EVEX_Vcvttps2udq_zmm_k1z_zmmm512b32_sae
Mnemonic::Vcvttpd2udq,// EVEX_Vcvttpd2udq_xmm_k1z_xmmm128b64
Mnemonic::Vcvttpd2udq,// EVEX_Vcvttpd2udq_xmm_k1z_ymmm256b64
Mnemonic::Vcvttpd2udq,// EVEX_Vcvttpd2udq_ymm_k1z_zmmm512b64_sae
Mnemonic::Extrq,// Extrq_xmm_imm8_imm8
Mnemonic::Vcvttps2uqq,// EVEX_Vcvttps2uqq_xmm_k1z_xmmm64b32
Mnemonic::Vcvttps2uqq,// EVEX_Vcvttps2uqq_ymm_k1z_xmmm128b32
Mnemonic::Vcvttps2uqq,// EVEX_Vcvttps2uqq_zmm_k1z_ymmm256b32_sae
Mnemonic::Vcvttpd2uqq,// EVEX_Vcvttpd2uqq_xmm_k1z_xmmm128b64
Mnemonic::Vcvttpd2uqq,// EVEX_Vcvttpd2uqq_ymm_k1z_ymmm256b64
Mnemonic::Vcvttpd2uqq,// EVEX_Vcvttpd2uqq_zmm_k1z_zmmm512b64_sae
Mnemonic::Vcvttss2usi,// EVEX_Vcvttss2usi_r32_xmmm32_sae
Mnemonic::Vcvttss2usi,// EVEX_Vcvttss2usi_r64_xmmm32_sae
Mnemonic::Insertq,// Insertq_xmm_xmm_imm8_imm8
Mnemonic::Vcvttsd2usi,// EVEX_Vcvttsd2usi_r32_xmmm64_sae
Mnemonic::Vcvttsd2usi,// EVEX_Vcvttsd2usi_r64_xmmm64_sae
Mnemonic::Vmwrite,// Vmwrite_r32_rm32
Mnemonic::Vmwrite,// Vmwrite_r64_rm64
Mnemonic::Vcvtps2udq,// EVEX_Vcvtps2udq_xmm_k1z_xmmm128b32
Mnemonic::Vcvtps2udq,// EVEX_Vcvtps2udq_ymm_k1z_ymmm256b32
Mnemonic::Vcvtps2udq,// EVEX_Vcvtps2udq_zmm_k1z_zmmm512b32_er
Mnemonic::Vcvtpd2udq,// EVEX_Vcvtpd2udq_xmm_k1z_xmmm128b64
Mnemonic::Vcvtpd2udq,// EVEX_Vcvtpd2udq_xmm_k1z_ymmm256b64
Mnemonic::Vcvtpd2udq,// EVEX_Vcvtpd2udq_ymm_k1z_zmmm512b64_er
Mnemonic::Extrq,// Extrq_xmm_xmm
Mnemonic::Vcvtps2uqq,// EVEX_Vcvtps2uqq_xmm_k1z_xmmm64b32
Mnemonic::Vcvtps2uqq,// EVEX_Vcvtps2uqq_ymm_k1z_xmmm128b32
Mnemonic::Vcvtps2uqq,// EVEX_Vcvtps2uqq_zmm_k1z_ymmm256b32_er
Mnemonic::Vcvtpd2uqq,// EVEX_Vcvtpd2uqq_xmm_k1z_xmmm128b64
Mnemonic::Vcvtpd2uqq,// EVEX_Vcvtpd2uqq_ymm_k1z_ymmm256b64
Mnemonic::Vcvtpd2uqq,// EVEX_Vcvtpd2uqq_zmm_k1z_zmmm512b64_er
Mnemonic::Vcvtss2usi,// EVEX_Vcvtss2usi_r32_xmmm32_er
Mnemonic::Vcvtss2usi,// EVEX_Vcvtss2usi_r64_xmmm32_er
Mnemonic::Insertq,// Insertq_xmm_xmm
Mnemonic::Vcvtsd2usi,// EVEX_Vcvtsd2usi_r32_xmmm64_er
Mnemonic::Vcvtsd2usi,// EVEX_Vcvtsd2usi_r64_xmmm64_er
Mnemonic::Vcvttps2qq,// EVEX_Vcvttps2qq_xmm_k1z_xmmm64b32
Mnemonic::Vcvttps2qq,// EVEX_Vcvttps2qq_ymm_k1z_xmmm128b32
Mnemonic::Vcvttps2qq,// EVEX_Vcvttps2qq_zmm_k1z_ymmm256b32_sae
Mnemonic::Vcvttpd2qq,// EVEX_Vcvttpd2qq_xmm_k1z_xmmm128b64
Mnemonic::Vcvttpd2qq,// EVEX_Vcvttpd2qq_ymm_k1z_ymmm256b64
Mnemonic::Vcvttpd2qq,// EVEX_Vcvttpd2qq_zmm_k1z_zmmm512b64_sae
Mnemonic::Vcvtudq2pd,// EVEX_Vcvtudq2pd_xmm_k1z_xmmm64b32
Mnemonic::Vcvtudq2pd,// EVEX_Vcvtudq2pd_ymm_k1z_xmmm128b32
Mnemonic::Vcvtudq2pd,// EVEX_Vcvtudq2pd_zmm_k1z_ymmm256b32_er
Mnemonic::Vcvtuqq2pd,// EVEX_Vcvtuqq2pd_xmm_k1z_xmmm128b64
Mnemonic::Vcvtuqq2pd,// EVEX_Vcvtuqq2pd_ymm_k1z_ymmm256b64
Mnemonic::Vcvtuqq2pd,// EVEX_Vcvtuqq2pd_zmm_k1z_zmmm512b64_er
Mnemonic::Vcvtudq2ps,// EVEX_Vcvtudq2ps_xmm_k1z_xmmm128b32
Mnemonic::Vcvtudq2ps,// EVEX_Vcvtudq2ps_ymm_k1z_ymmm256b32
Mnemonic::Vcvtudq2ps,// EVEX_Vcvtudq2ps_zmm_k1z_zmmm512b32_er
Mnemonic::Vcvtuqq2ps,// EVEX_Vcvtuqq2ps_xmm_k1z_xmmm128b64
Mnemonic::Vcvtuqq2ps,// EVEX_Vcvtuqq2ps_xmm_k1z_ymmm256b64
Mnemonic::Vcvtuqq2ps,// EVEX_Vcvtuqq2ps_ymm_k1z_zmmm512b64_er
Mnemonic::Vcvtps2qq,// EVEX_Vcvtps2qq_xmm_k1z_xmmm64b32
Mnemonic::Vcvtps2qq,// EVEX_Vcvtps2qq_ymm_k1z_xmmm128b32
Mnemonic::Vcvtps2qq,// EVEX_Vcvtps2qq_zmm_k1z_ymmm256b32_er
Mnemonic::Vcvtpd2qq,// EVEX_Vcvtpd2qq_xmm_k1z_xmmm128b64
Mnemonic::Vcvtpd2qq,// EVEX_Vcvtpd2qq_ymm_k1z_ymmm256b64
Mnemonic::Vcvtpd2qq,// EVEX_Vcvtpd2qq_zmm_k1z_zmmm512b64_er
Mnemonic::Vcvtusi2ss,// EVEX_Vcvtusi2ss_xmm_xmm_rm32_er
Mnemonic::Vcvtusi2ss,// EVEX_Vcvtusi2ss_xmm_xmm_rm64_er
Mnemonic::Vcvtusi2sd,// EVEX_Vcvtusi2sd_xmm_xmm_rm32_er
Mnemonic::Vcvtusi2sd,// EVEX_Vcvtusi2sd_xmm_xmm_rm64_er
Mnemonic::Haddpd,// Haddpd_xmm_xmmm128
Mnemonic::Vhaddpd,// VEX_Vhaddpd_xmm_xmm_xmmm128
Mnemonic::Vhaddpd,// VEX_Vhaddpd_ymm_ymm_ymmm256
Mnemonic::Haddps,// Haddps_xmm_xmmm128
Mnemonic::Vhaddps,// VEX_Vhaddps_xmm_xmm_xmmm128
Mnemonic::Vhaddps,// VEX_Vhaddps_ymm_ymm_ymmm256
Mnemonic::Hsubpd,// Hsubpd_xmm_xmmm128
Mnemonic::Vhsubpd,// VEX_Vhsubpd_xmm_xmm_xmmm128
Mnemonic::Vhsubpd,// VEX_Vhsubpd_ymm_ymm_ymmm256
Mnemonic::Hsubps,// Hsubps_xmm_xmmm128
Mnemonic::Vhsubps,// VEX_Vhsubps_xmm_xmm_xmmm128
Mnemonic::Vhsubps,// VEX_Vhsubps_ymm_ymm_ymmm256
Mnemonic::Movd,// Movd_rm32_mm
Mnemonic::Movq,// Movq_rm64_mm
Mnemonic::Movd,// Movd_rm32_xmm
Mnemonic::Movq,// Movq_rm64_xmm
Mnemonic::Vmovd,// VEX_Vmovd_rm32_xmm
Mnemonic::Vmovq,// VEX_Vmovq_rm64_xmm
Mnemonic::Vmovd,// EVEX_Vmovd_rm32_xmm
Mnemonic::Vmovq,// EVEX_Vmovq_rm64_xmm
Mnemonic::Movq,// Movq_xmm_xmmm64
Mnemonic::Vmovq,// VEX_Vmovq_xmm_xmmm64
Mnemonic::Vmovq,// EVEX_Vmovq_xmm_xmmm64
Mnemonic::Movq,// Movq_mmm64_mm
Mnemonic::Movdqa,// Movdqa_xmmm128_xmm
Mnemonic::Vmovdqa,// VEX_Vmovdqa_xmmm128_xmm
Mnemonic::Vmovdqa,// VEX_Vmovdqa_ymmm256_ymm
Mnemonic::Vmovdqa32,// EVEX_Vmovdqa32_xmmm128_k1z_xmm
Mnemonic::Vmovdqa32,// EVEX_Vmovdqa32_ymmm256_k1z_ymm
Mnemonic::Vmovdqa32,// EVEX_Vmovdqa32_zmmm512_k1z_zmm
Mnemonic::Vmovdqa64,// EVEX_Vmovdqa64_xmmm128_k1z_xmm
Mnemonic::Vmovdqa64,// EVEX_Vmovdqa64_ymmm256_k1z_ymm
Mnemonic::Vmovdqa64,// EVEX_Vmovdqa64_zmmm512_k1z_zmm
Mnemonic::Movdqu,// Movdqu_xmmm128_xmm
Mnemonic::Vmovdqu,// VEX_Vmovdqu_xmmm128_xmm
Mnemonic::Vmovdqu,// VEX_Vmovdqu_ymmm256_ymm
Mnemonic::Vmovdqu32,// EVEX_Vmovdqu32_xmmm128_k1z_xmm
Mnemonic::Vmovdqu32,// EVEX_Vmovdqu32_ymmm256_k1z_ymm
Mnemonic::Vmovdqu32,// EVEX_Vmovdqu32_zmmm512_k1z_zmm
Mnemonic::Vmovdqu64,// EVEX_Vmovdqu64_xmmm128_k1z_xmm
Mnemonic::Vmovdqu64,// EVEX_Vmovdqu64_ymmm256_k1z_ymm
Mnemonic::Vmovdqu64,// EVEX_Vmovdqu64_zmmm512_k1z_zmm
Mnemonic::Vmovdqu8,// EVEX_Vmovdqu8_xmmm128_k1z_xmm
Mnemonic::Vmovdqu8,// EVEX_Vmovdqu8_ymmm256_k1z_ymm
Mnemonic::Vmovdqu8,// EVEX_Vmovdqu8_zmmm512_k1z_zmm
Mnemonic::Vmovdqu16,// EVEX_Vmovdqu16_xmmm128_k1z_xmm
Mnemonic::Vmovdqu16,// EVEX_Vmovdqu16_ymmm256_k1z_ymm
Mnemonic::Vmovdqu16,// EVEX_Vmovdqu16_zmmm512_k1z_zmm
Mnemonic::Jo,// Jo_rel16
Mnemonic::Jo,// Jo_rel32_32
Mnemonic::Jo,// Jo_rel32_64
Mnemonic::Jno,// Jno_rel16
Mnemonic::Jno,// Jno_rel32_32
Mnemonic::Jno,// Jno_rel32_64
Mnemonic::Jb,// Jb_rel16
Mnemonic::Jb,// Jb_rel32_32
Mnemonic::Jb,// Jb_rel32_64
Mnemonic::Jae,// Jae_rel16
Mnemonic::Jae,// Jae_rel32_32
Mnemonic::Jae,// Jae_rel32_64
Mnemonic::Je,// Je_rel16
Mnemonic::Je,// Je_rel32_32
Mnemonic::Je,// Je_rel32_64
Mnemonic::Jne,// Jne_rel16
Mnemonic::Jne,// Jne_rel32_32
Mnemonic::Jne,// Jne_rel32_64
Mnemonic::Jbe,// Jbe_rel16
Mnemonic::Jbe,// Jbe_rel32_32
Mnemonic::Jbe,// Jbe_rel32_64
Mnemonic::Ja,// Ja_rel16
Mnemonic::Ja,// Ja_rel32_32
Mnemonic::Ja,// Ja_rel32_64
Mnemonic::Js,// Js_rel16
Mnemonic::Js,// Js_rel32_32
Mnemonic::Js,// Js_rel32_64
Mnemonic::Jns,// Jns_rel16
Mnemonic::Jns,// Jns_rel32_32
Mnemonic::Jns,// Jns_rel32_64
Mnemonic::Jp,// Jp_rel16
Mnemonic::Jp,// Jp_rel32_32
Mnemonic::Jp,// Jp_rel32_64
Mnemonic::Jnp,// Jnp_rel16
Mnemonic::Jnp,// Jnp_rel32_32
Mnemonic::Jnp,// Jnp_rel32_64
Mnemonic::Jl,// Jl_rel16
Mnemonic::Jl,// Jl_rel32_32
Mnemonic::Jl,// Jl_rel32_64
Mnemonic::Jge,// Jge_rel16
Mnemonic::Jge,// Jge_rel32_32
Mnemonic::Jge,// Jge_rel32_64
Mnemonic::Jle,// Jle_rel16
Mnemonic::Jle,// Jle_rel32_32
Mnemonic::Jle,// Jle_rel32_64
Mnemonic::Jg,// Jg_rel16
Mnemonic::Jg,// Jg_rel32_32
Mnemonic::Jg,// Jg_rel32_64
Mnemonic::Seto,// Seto_rm8
Mnemonic::Setno,// Setno_rm8
Mnemonic::Setb,// Setb_rm8
Mnemonic::Setae,// Setae_rm8
Mnemonic::Sete,// Sete_rm8
Mnemonic::Setne,// Setne_rm8
Mnemonic::Setbe,// Setbe_rm8
Mnemonic::Seta,// Seta_rm8
Mnemonic::Sets,// Sets_rm8
Mnemonic::Setns,// Setns_rm8
Mnemonic::Setp,// Setp_rm8
Mnemonic::Setnp,// Setnp_rm8
Mnemonic::Setl,// Setl_rm8
Mnemonic::Setge,// Setge_rm8
Mnemonic::Setle,// Setle_rm8
Mnemonic::Setg,// Setg_rm8
Mnemonic::Kmovw,// VEX_Kmovw_kr_km16
Mnemonic::Kmovq,// VEX_Kmovq_kr_km64
Mnemonic::Kmovb,// VEX_Kmovb_kr_km8
Mnemonic::Kmovd,// VEX_Kmovd_kr_km32
Mnemonic::Kmovw,// VEX_Kmovw_m16_kr
Mnemonic::Kmovq,// VEX_Kmovq_m64_kr
Mnemonic::Kmovb,// VEX_Kmovb_m8_kr
Mnemonic::Kmovd,// VEX_Kmovd_m32_kr
Mnemonic::Kmovw,// VEX_Kmovw_kr_r32
Mnemonic::Kmovb,// VEX_Kmovb_kr_r32
Mnemonic::Kmovd,// VEX_Kmovd_kr_r32
Mnemonic::Kmovq,// VEX_Kmovq_kr_r64
Mnemonic::Kmovw,// VEX_Kmovw_r32_kr
Mnemonic::Kmovb,// VEX_Kmovb_r32_kr
Mnemonic::Kmovd,// VEX_Kmovd_r32_kr
Mnemonic::Kmovq,// VEX_Kmovq_r64_kr
Mnemonic::Kortestw,// VEX_Kortestw_kr_kr
Mnemonic::Kortestq,// VEX_Kortestq_kr_kr
Mnemonic::Kortestb,// VEX_Kortestb_kr_kr
Mnemonic::Kortestd,// VEX_Kortestd_kr_kr
Mnemonic::Ktestw,// VEX_Ktestw_kr_kr
Mnemonic::Ktestq,// VEX_Ktestq_kr_kr
Mnemonic::Ktestb,// VEX_Ktestb_kr_kr
Mnemonic::Ktestd,// VEX_Ktestd_kr_kr
Mnemonic::Push,// Pushw_FS
Mnemonic::Push,// Pushd_FS
Mnemonic::Push,// Pushq_FS
Mnemonic::Pop,// Popw_FS
Mnemonic::Pop,// Popd_FS
Mnemonic::Pop,// Popq_FS
Mnemonic::Cpuid,// Cpuid
Mnemonic::Bt,// Bt_rm16_r16
Mnemonic::Bt,// Bt_rm32_r32
Mnemonic::Bt,// Bt_rm64_r64
Mnemonic::Shld,// Shld_rm16_r16_imm8
Mnemonic::Shld,// Shld_rm32_r32_imm8
Mnemonic::Shld,// Shld_rm64_r64_imm8
Mnemonic::Shld,// Shld_rm16_r16_CL
Mnemonic::Shld,// Shld_rm32_r32_CL
Mnemonic::Shld,// Shld_rm64_r64_CL
Mnemonic::Montmul,// Montmul_16
Mnemonic::Montmul,// Montmul_32
Mnemonic::Montmul,// Montmul_64
Mnemonic::Xsha1,// Xsha1_16
Mnemonic::Xsha1,// Xsha1_32
Mnemonic::Xsha1,// Xsha1_64
Mnemonic::Xsha256,// Xsha256_16
Mnemonic::Xsha256,// Xsha256_32
Mnemonic::Xsha256,// Xsha256_64
Mnemonic::Xbts,// Xbts_r16_rm16
Mnemonic::Xbts,// Xbts_r32_rm32
Mnemonic::Xstore,// Xstore_16
Mnemonic::Xstore,// Xstore_32
Mnemonic::Xstore,// Xstore_64
Mnemonic::Xcryptecb,// Xcryptecb_16
Mnemonic::Xcryptecb,// Xcryptecb_32
Mnemonic::Xcryptecb,// Xcryptecb_64
Mnemonic::Xcryptcbc,// Xcryptcbc_16
Mnemonic::Xcryptcbc,// Xcryptcbc_32
Mnemonic::Xcryptcbc,// Xcryptcbc_64
Mnemonic::Xcryptctr,// Xcryptctr_16
Mnemonic::Xcryptctr,// Xcryptctr_32
Mnemonic::Xcryptctr,// Xcryptctr_64
Mnemonic::Xcryptcfb,// Xcryptcfb_16
Mnemonic::Xcryptcfb,// Xcryptcfb_32
Mnemonic::Xcryptcfb,// Xcryptcfb_64
Mnemonic::Xcryptofb,// Xcryptofb_16
Mnemonic::Xcryptofb,// Xcryptofb_32
Mnemonic::Xcryptofb,// Xcryptofb_64
Mnemonic::Ibts,// Ibts_rm16_r16
Mnemonic::Ibts,// Ibts_rm32_r32
Mnemonic::Cmpxchg,// Cmpxchg486_rm8_r8
Mnemonic::Cmpxchg,// Cmpxchg486_rm16_r16
Mnemonic::Cmpxchg,// Cmpxchg486_rm32_r32
Mnemonic::Push,// Pushw_GS
Mnemonic::Push,// Pushd_GS
Mnemonic::Push,// Pushq_GS
Mnemonic::Pop,// Popw_GS
Mnemonic::Pop,// Popd_GS
Mnemonic::Pop,// Popq_GS
Mnemonic::Rsm,// Rsm
Mnemonic::Bts,// Bts_rm16_r16
Mnemonic::Bts,// Bts_rm32_r32
Mnemonic::Bts,// Bts_rm64_r64
Mnemonic::Shrd,// Shrd_rm16_r16_imm8
Mnemonic::Shrd,// Shrd_rm32_r32_imm8
Mnemonic::Shrd,// Shrd_rm64_r64_imm8
Mnemonic::Shrd,// Shrd_rm16_r16_CL
Mnemonic::Shrd,// Shrd_rm32_r32_CL
Mnemonic::Shrd,// Shrd_rm64_r64_CL
Mnemonic::Fxsave,// Fxsave_m512byte
Mnemonic::Fxsave64,// Fxsave64_m512byte
Mnemonic::Rdfsbase,// Rdfsbase_r32
Mnemonic::Rdfsbase,// Rdfsbase_r64
Mnemonic::Fxrstor,// Fxrstor_m512byte
Mnemonic::Fxrstor64,// Fxrstor64_m512byte
Mnemonic::Rdgsbase,// Rdgsbase_r32
Mnemonic::Rdgsbase,// Rdgsbase_r64
Mnemonic::Ldmxcsr,// Ldmxcsr_m32
Mnemonic::Wrfsbase,// Wrfsbase_r32
Mnemonic::Wrfsbase,// Wrfsbase_r64
Mnemonic::Vldmxcsr,// VEX_Vldmxcsr_m32
Mnemonic::Stmxcsr,// Stmxcsr_m32
Mnemonic::Wrgsbase,// Wrgsbase_r32
Mnemonic::Wrgsbase,// Wrgsbase_r64
Mnemonic::Vstmxcsr,// VEX_Vstmxcsr_m32
Mnemonic::Xsave,// Xsave_mem
Mnemonic::Xsave64,// Xsave64_mem
Mnemonic::Ptwrite,// Ptwrite_rm32
Mnemonic::Ptwrite,// Ptwrite_rm64
Mnemonic::Xrstor,// Xrstor_mem
Mnemonic::Xrstor64,// Xrstor64_mem
Mnemonic::Incsspd,// Incsspd_r32
Mnemonic::Incsspq,// Incsspq_r64
Mnemonic::Xsaveopt,// Xsaveopt_mem
Mnemonic::Xsaveopt64,// Xsaveopt64_mem
Mnemonic::Clwb,// Clwb_m8
Mnemonic::Tpause,// Tpause_r32
Mnemonic::Tpause,// Tpause_r64
Mnemonic::Clrssbsy,// Clrssbsy_m64
Mnemonic::Umonitor,// Umonitor_r16
Mnemonic::Umonitor,// Umonitor_r32
Mnemonic::Umonitor,// Umonitor_r64
Mnemonic::Umwait,// Umwait_r32
Mnemonic::Umwait,// Umwait_r64
Mnemonic::Clflush,// Clflush_m8
Mnemonic::Clflushopt,// Clflushopt_m8
Mnemonic::Lfence,// Lfence
Mnemonic::Lfence,// Lfence_E9
Mnemonic::Lfence,// Lfence_EA
Mnemonic::Lfence,// Lfence_EB
Mnemonic::Lfence,// Lfence_EC
Mnemonic::Lfence,// Lfence_ED
Mnemonic::Lfence,// Lfence_EE
Mnemonic::Lfence,// Lfence_EF
Mnemonic::Mfence,// Mfence
Mnemonic::Mfence,// Mfence_F1
Mnemonic::Mfence,// Mfence_F2
Mnemonic::Mfence,// Mfence_F3
Mnemonic::Mfence,// Mfence_F4
Mnemonic::Mfence,// Mfence_F5
Mnemonic::Mfence,// Mfence_F6
Mnemonic::Mfence,// Mfence_F7
Mnemonic::Sfence,// Sfence
Mnemonic::Sfence,// Sfence_F9
Mnemonic::Sfence,// Sfence_FA
Mnemonic::Sfence,// Sfence_FB
Mnemonic::Sfence,// Sfence_FC
Mnemonic::Sfence,// Sfence_FD
Mnemonic::Sfence,// Sfence_FE
Mnemonic::Sfence,// Sfence_FF
Mnemonic::Pcommit,// Pcommit
Mnemonic::Imul,// Imul_r16_rm16
Mnemonic::Imul,// Imul_r32_rm32
Mnemonic::Imul,// Imul_r64_rm64
Mnemonic::Cmpxchg,// Cmpxchg_rm8_r8
Mnemonic::Cmpxchg,// Cmpxchg_rm16_r16
Mnemonic::Cmpxchg,// Cmpxchg_rm32_r32
Mnemonic::Cmpxchg,// Cmpxchg_rm64_r64
Mnemonic::Lss,// Lss_r16_m1616
Mnemonic::Lss,// Lss_r32_m1632
Mnemonic::Lss,// Lss_r64_m1664
Mnemonic::Btr,// Btr_rm16_r16
Mnemonic::Btr,// Btr_rm32_r32
Mnemonic::Btr,// Btr_rm64_r64
Mnemonic::Lfs,// Lfs_r16_m1616
Mnemonic::Lfs,// Lfs_r32_m1632
Mnemonic::Lfs,// Lfs_r64_m1664
Mnemonic::Lgs,// Lgs_r16_m1616
Mnemonic::Lgs,// Lgs_r32_m1632
Mnemonic::Lgs,// Lgs_r64_m1664
Mnemonic::Movzx,// Movzx_r16_rm8
Mnemonic::Movzx,// Movzx_r32_rm8
Mnemonic::Movzx,// Movzx_r64_rm8
Mnemonic::Movzx,// Movzx_r16_rm16
Mnemonic::Movzx,// Movzx_r32_rm16
Mnemonic::Movzx,// Movzx_r64_rm16
Mnemonic::Jmpe,// Jmpe_disp16
Mnemonic::Jmpe,// Jmpe_disp32
Mnemonic::Popcnt,// Popcnt_r16_rm16
Mnemonic::Popcnt,// Popcnt_r32_rm32
Mnemonic::Popcnt,// Popcnt_r64_rm64
Mnemonic::Ud1,// Ud1_r16_rm16
Mnemonic::Ud1,// Ud1_r32_rm32
Mnemonic::Ud1,// Ud1_r64_rm64
Mnemonic::Bt,// Bt_rm16_imm8
Mnemonic::Bt,// Bt_rm32_imm8
Mnemonic::Bt,// Bt_rm64_imm8
Mnemonic::Bts,// Bts_rm16_imm8
Mnemonic::Bts,// Bts_rm32_imm8
Mnemonic::Bts,// Bts_rm64_imm8
Mnemonic::Btr,// Btr_rm16_imm8
Mnemonic::Btr,// Btr_rm32_imm8
Mnemonic::Btr,// Btr_rm64_imm8
Mnemonic::Btc,// Btc_rm16_imm8
Mnemonic::Btc,// Btc_rm32_imm8
Mnemonic::Btc,// Btc_rm64_imm8
Mnemonic::Btc,// Btc_rm16_r16
Mnemonic::Btc,// Btc_rm32_r32
Mnemonic::Btc,// Btc_rm64_r64
Mnemonic::Bsf,// Bsf_r16_rm16
Mnemonic::Bsf,// Bsf_r32_rm32
Mnemonic::Bsf,// Bsf_r64_rm64
Mnemonic::Tzcnt,// Tzcnt_r16_rm16
Mnemonic::Tzcnt,// Tzcnt_r32_rm32
Mnemonic::Tzcnt,// Tzcnt_r64_rm64
Mnemonic::Bsr,// Bsr_r16_rm16
Mnemonic::Bsr,// Bsr_r32_rm32
Mnemonic::Bsr,// Bsr_r64_rm64
Mnemonic::Lzcnt,// Lzcnt_r16_rm16
Mnemonic::Lzcnt,// Lzcnt_r32_rm32
Mnemonic::Lzcnt,// Lzcnt_r64_rm64
Mnemonic::Movsx,// Movsx_r16_rm8
Mnemonic::Movsx,// Movsx_r32_rm8
Mnemonic::Movsx,// Movsx_r64_rm8
Mnemonic::Movsx,// Movsx_r16_rm16
Mnemonic::Movsx,// Movsx_r32_rm16
Mnemonic::Movsx,// Movsx_r64_rm16
Mnemonic::Xadd,// Xadd_rm8_r8
Mnemonic::Xadd,// Xadd_rm16_r16
Mnemonic::Xadd,// Xadd_rm32_r32
Mnemonic::Xadd,// Xadd_rm64_r64
Mnemonic::Cmpps,// Cmpps_xmm_xmmm128_imm8
Mnemonic::Vcmpps,// VEX_Vcmpps_xmm_xmm_xmmm128_imm8
Mnemonic::Vcmpps,// VEX_Vcmpps_ymm_ymm_ymmm256_imm8
Mnemonic::Vcmpps,// EVEX_Vcmpps_kr_k1_xmm_xmmm128b32_imm8
Mnemonic::Vcmpps,// EVEX_Vcmpps_kr_k1_ymm_ymmm256b32_imm8
Mnemonic::Vcmpps,// EVEX_Vcmpps_kr_k1_zmm_zmmm512b32_imm8_sae
Mnemonic::Cmppd,// Cmppd_xmm_xmmm128_imm8
Mnemonic::Vcmppd,// VEX_Vcmppd_xmm_xmm_xmmm128_imm8
Mnemonic::Vcmppd,// VEX_Vcmppd_ymm_ymm_ymmm256_imm8
Mnemonic::Vcmppd,// EVEX_Vcmppd_kr_k1_xmm_xmmm128b64_imm8
Mnemonic::Vcmppd,// EVEX_Vcmppd_kr_k1_ymm_ymmm256b64_imm8
Mnemonic::Vcmppd,// EVEX_Vcmppd_kr_k1_zmm_zmmm512b64_imm8_sae
Mnemonic::Cmpss,// Cmpss_xmm_xmmm32_imm8
Mnemonic::Vcmpss,// VEX_Vcmpss_xmm_xmm_xmmm32_imm8
Mnemonic::Vcmpss,// EVEX_Vcmpss_kr_k1_xmm_xmmm32_imm8_sae
Mnemonic::Cmpsd,// Cmpsd_xmm_xmmm64_imm8
Mnemonic::Vcmpsd,// VEX_Vcmpsd_xmm_xmm_xmmm64_imm8
Mnemonic::Vcmpsd,// EVEX_Vcmpsd_kr_k1_xmm_xmmm64_imm8_sae
Mnemonic::Movnti,// Movnti_m32_r32
Mnemonic::Movnti,// Movnti_m64_r64
Mnemonic::Pinsrw,// Pinsrw_mm_r32m16_imm8
Mnemonic::Pinsrw,// Pinsrw_mm_r64m16_imm8
Mnemonic::Pinsrw,// Pinsrw_xmm_r32m16_imm8
Mnemonic::Pinsrw,// Pinsrw_xmm_r64m16_imm8
Mnemonic::Vpinsrw,// VEX_Vpinsrw_xmm_xmm_r32m16_imm8
Mnemonic::Vpinsrw,// VEX_Vpinsrw_xmm_xmm_r64m16_imm8
Mnemonic::Vpinsrw,// EVEX_Vpinsrw_xmm_xmm_r32m16_imm8
Mnemonic::Vpinsrw,// EVEX_Vpinsrw_xmm_xmm_r64m16_imm8
Mnemonic::Pextrw,// Pextrw_r32_mm_imm8
Mnemonic::Pextrw,// Pextrw_r64_mm_imm8
Mnemonic::Pextrw,// Pextrw_r32_xmm_imm8
Mnemonic::Pextrw,// Pextrw_r64_xmm_imm8
Mnemonic::Vpextrw,// VEX_Vpextrw_r32_xmm_imm8
Mnemonic::Vpextrw,// VEX_Vpextrw_r64_xmm_imm8
Mnemonic::Vpextrw,// EVEX_Vpextrw_r32_xmm_imm8
Mnemonic::Vpextrw,// EVEX_Vpextrw_r64_xmm_imm8
Mnemonic::Shufps,// Shufps_xmm_xmmm128_imm8
Mnemonic::Vshufps,// VEX_Vshufps_xmm_xmm_xmmm128_imm8
Mnemonic::Vshufps,// VEX_Vshufps_ymm_ymm_ymmm256_imm8
Mnemonic::Vshufps,// EVEX_Vshufps_xmm_k1z_xmm_xmmm128b32_imm8
Mnemonic::Vshufps,// EVEX_Vshufps_ymm_k1z_ymm_ymmm256b32_imm8
Mnemonic::Vshufps,// EVEX_Vshufps_zmm_k1z_zmm_zmmm512b32_imm8
Mnemonic::Shufpd,// Shufpd_xmm_xmmm128_imm8
Mnemonic::Vshufpd,// VEX_Vshufpd_xmm_xmm_xmmm128_imm8
Mnemonic::Vshufpd,// VEX_Vshufpd_ymm_ymm_ymmm256_imm8
Mnemonic::Vshufpd,// EVEX_Vshufpd_xmm_k1z_xmm_xmmm128b64_imm8
Mnemonic::Vshufpd,// EVEX_Vshufpd_ymm_k1z_ymm_ymmm256b64_imm8
Mnemonic::Vshufpd,// EVEX_Vshufpd_zmm_k1z_zmm_zmmm512b64_imm8
Mnemonic::Cmpxchg8b,// Cmpxchg8b_m64
Mnemonic::Cmpxchg16b,// Cmpxchg16b_m128
Mnemonic::Xrstors,// Xrstors_mem
Mnemonic::Xrstors64,// Xrstors64_mem
Mnemonic::Xsavec,// Xsavec_mem
Mnemonic::Xsavec64,// Xsavec64_mem
Mnemonic::Xsaves,// Xsaves_mem
Mnemonic::Xsaves64,// Xsaves64_mem
Mnemonic::Vmptrld,// Vmptrld_m64
Mnemonic::Vmclear,// Vmclear_m64
Mnemonic::Vmxon,// Vmxon_m64
Mnemonic::Rdrand,// Rdrand_r16
Mnemonic::Rdrand,// Rdrand_r32
Mnemonic::Rdrand,// Rdrand_r64
Mnemonic::Vmptrst,// Vmptrst_m64
Mnemonic::Rdseed,// Rdseed_r16
Mnemonic::Rdseed,// Rdseed_r32
Mnemonic::Rdseed,// Rdseed_r64
Mnemonic::Rdpid,// Rdpid_r32
Mnemonic::Rdpid,// Rdpid_r64
Mnemonic::Bswap,// Bswap_r16
Mnemonic::Bswap,// Bswap_r32
Mnemonic::Bswap,// Bswap_r64
Mnemonic::Addsubpd,// Addsubpd_xmm_xmmm128
Mnemonic::Vaddsubpd,// VEX_Vaddsubpd_xmm_xmm_xmmm128
Mnemonic::Vaddsubpd,// VEX_Vaddsubpd_ymm_ymm_ymmm256
Mnemonic::Addsubps,// Addsubps_xmm_xmmm128
Mnemonic::Vaddsubps,// VEX_Vaddsubps_xmm_xmm_xmmm128
Mnemonic::Vaddsubps,// VEX_Vaddsubps_ymm_ymm_ymmm256
Mnemonic::Psrlw,// Psrlw_mm_mmm64
Mnemonic::Psrlw,// Psrlw_xmm_xmmm128
Mnemonic::Vpsrlw,// VEX_Vpsrlw_xmm_xmm_xmmm128
Mnemonic::Vpsrlw,// VEX_Vpsrlw_ymm_ymm_xmmm128
Mnemonic::Vpsrlw,// EVEX_Vpsrlw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsrlw,// EVEX_Vpsrlw_ymm_k1z_ymm_xmmm128
Mnemonic::Vpsrlw,// EVEX_Vpsrlw_zmm_k1z_zmm_xmmm128
Mnemonic::Psrld,// Psrld_mm_mmm64
Mnemonic::Psrld,// Psrld_xmm_xmmm128
Mnemonic::Vpsrld,// VEX_Vpsrld_xmm_xmm_xmmm128
Mnemonic::Vpsrld,// VEX_Vpsrld_ymm_ymm_xmmm128
Mnemonic::Vpsrld,// EVEX_Vpsrld_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsrld,// EVEX_Vpsrld_ymm_k1z_ymm_xmmm128
Mnemonic::Vpsrld,// EVEX_Vpsrld_zmm_k1z_zmm_xmmm128
Mnemonic::Psrlq,// Psrlq_mm_mmm64
Mnemonic::Psrlq,// Psrlq_xmm_xmmm128
Mnemonic::Vpsrlq,// VEX_Vpsrlq_xmm_xmm_xmmm128
Mnemonic::Vpsrlq,// VEX_Vpsrlq_ymm_ymm_xmmm128
Mnemonic::Vpsrlq,// EVEX_Vpsrlq_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsrlq,// EVEX_Vpsrlq_ymm_k1z_ymm_xmmm128
Mnemonic::Vpsrlq,// EVEX_Vpsrlq_zmm_k1z_zmm_xmmm128
Mnemonic::Paddq,// Paddq_mm_mmm64
Mnemonic::Paddq,// Paddq_xmm_xmmm128
Mnemonic::Vpaddq,// VEX_Vpaddq_xmm_xmm_xmmm128
Mnemonic::Vpaddq,// VEX_Vpaddq_ymm_ymm_ymmm256
Mnemonic::Vpaddq,// EVEX_Vpaddq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpaddq,// EVEX_Vpaddq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpaddq,// EVEX_Vpaddq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Pmullw,// Pmullw_mm_mmm64
Mnemonic::Pmullw,// Pmullw_xmm_xmmm128
Mnemonic::Vpmullw,// VEX_Vpmullw_xmm_xmm_xmmm128
Mnemonic::Vpmullw,// VEX_Vpmullw_ymm_ymm_ymmm256
Mnemonic::Vpmullw,// EVEX_Vpmullw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpmullw,// EVEX_Vpmullw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpmullw,// EVEX_Vpmullw_zmm_k1z_zmm_zmmm512
Mnemonic::Movq,// Movq_xmmm64_xmm
Mnemonic::Vmovq,// VEX_Vmovq_xmmm64_xmm
Mnemonic::Vmovq,// EVEX_Vmovq_xmmm64_xmm
Mnemonic::Movq2dq,// Movq2dq_xmm_mm
Mnemonic::Movdq2q,// Movdq2q_mm_xmm
Mnemonic::Pmovmskb,// Pmovmskb_r32_mm
Mnemonic::Pmovmskb,// Pmovmskb_r64_mm
Mnemonic::Pmovmskb,// Pmovmskb_r32_xmm
Mnemonic::Pmovmskb,// Pmovmskb_r64_xmm
Mnemonic::Vpmovmskb,// VEX_Vpmovmskb_r32_xmm
Mnemonic::Vpmovmskb,// VEX_Vpmovmskb_r64_xmm
Mnemonic::Vpmovmskb,// VEX_Vpmovmskb_r32_ymm
Mnemonic::Vpmovmskb,// VEX_Vpmovmskb_r64_ymm
Mnemonic::Psubusb,// Psubusb_mm_mmm64
Mnemonic::Psubusb,// Psubusb_xmm_xmmm128
Mnemonic::Vpsubusb,// VEX_Vpsubusb_xmm_xmm_xmmm128
Mnemonic::Vpsubusb,// VEX_Vpsubusb_ymm_ymm_ymmm256
Mnemonic::Vpsubusb,// EVEX_Vpsubusb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsubusb,// EVEX_Vpsubusb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpsubusb,// EVEX_Vpsubusb_zmm_k1z_zmm_zmmm512
Mnemonic::Psubusw,// Psubusw_mm_mmm64
Mnemonic::Psubusw,// Psubusw_xmm_xmmm128
Mnemonic::Vpsubusw,// VEX_Vpsubusw_xmm_xmm_xmmm128
Mnemonic::Vpsubusw,// VEX_Vpsubusw_ymm_ymm_ymmm256
Mnemonic::Vpsubusw,// EVEX_Vpsubusw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsubusw,// EVEX_Vpsubusw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpsubusw,// EVEX_Vpsubusw_zmm_k1z_zmm_zmmm512
Mnemonic::Pminub,// Pminub_mm_mmm64
Mnemonic::Pminub,// Pminub_xmm_xmmm128
Mnemonic::Vpminub,// VEX_Vpminub_xmm_xmm_xmmm128
Mnemonic::Vpminub,// VEX_Vpminub_ymm_ymm_ymmm256
Mnemonic::Vpminub,// EVEX_Vpminub_xmm_k1z_xmm_xmmm128
Mnemonic::Vpminub,// EVEX_Vpminub_ymm_k1z_ymm_ymmm256
Mnemonic::Vpminub,// EVEX_Vpminub_zmm_k1z_zmm_zmmm512
Mnemonic::Pand,// Pand_mm_mmm64
Mnemonic::Pand,// Pand_xmm_xmmm128
Mnemonic::Vpand,// VEX_Vpand_xmm_xmm_xmmm128
Mnemonic::Vpand,// VEX_Vpand_ymm_ymm_ymmm256
Mnemonic::Vpandd,// EVEX_Vpandd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpandd,// EVEX_Vpandd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpandd,// EVEX_Vpandd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpandq,// EVEX_Vpandq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpandq,// EVEX_Vpandq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpandq,// EVEX_Vpandq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Paddusb,// Paddusb_mm_mmm64
Mnemonic::Paddusb,// Paddusb_xmm_xmmm128
Mnemonic::Vpaddusb,// VEX_Vpaddusb_xmm_xmm_xmmm128
Mnemonic::Vpaddusb,// VEX_Vpaddusb_ymm_ymm_ymmm256
Mnemonic::Vpaddusb,// EVEX_Vpaddusb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpaddusb,// EVEX_Vpaddusb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpaddusb,// EVEX_Vpaddusb_zmm_k1z_zmm_zmmm512
Mnemonic::Paddusw,// Paddusw_mm_mmm64
Mnemonic::Paddusw,// Paddusw_xmm_xmmm128
Mnemonic::Vpaddusw,// VEX_Vpaddusw_xmm_xmm_xmmm128
Mnemonic::Vpaddusw,// VEX_Vpaddusw_ymm_ymm_ymmm256
Mnemonic::Vpaddusw,// EVEX_Vpaddusw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpaddusw,// EVEX_Vpaddusw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpaddusw,// EVEX_Vpaddusw_zmm_k1z_zmm_zmmm512
Mnemonic::Pmaxub,// Pmaxub_mm_mmm64
Mnemonic::Pmaxub,// Pmaxub_xmm_xmmm128
Mnemonic::Vpmaxub,// VEX_Vpmaxub_xmm_xmm_xmmm128
Mnemonic::Vpmaxub,// VEX_Vpmaxub_ymm_ymm_ymmm256
Mnemonic::Vpmaxub,// EVEX_Vpmaxub_xmm_k1z_xmm_xmmm128
Mnemonic::Vpmaxub,// EVEX_Vpmaxub_ymm_k1z_ymm_ymmm256
Mnemonic::Vpmaxub,// EVEX_Vpmaxub_zmm_k1z_zmm_zmmm512
Mnemonic::Pandn,// Pandn_mm_mmm64
Mnemonic::Pandn,// Pandn_xmm_xmmm128
Mnemonic::Vpandn,// VEX_Vpandn_xmm_xmm_xmmm128
Mnemonic::Vpandn,// VEX_Vpandn_ymm_ymm_ymmm256
Mnemonic::Vpandnd,// EVEX_Vpandnd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpandnd,// EVEX_Vpandnd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpandnd,// EVEX_Vpandnd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpandnq,// EVEX_Vpandnq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpandnq,// EVEX_Vpandnq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpandnq,// EVEX_Vpandnq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Pavgb,// Pavgb_mm_mmm64
Mnemonic::Pavgb,// Pavgb_xmm_xmmm128
Mnemonic::Vpavgb,// VEX_Vpavgb_xmm_xmm_xmmm128
Mnemonic::Vpavgb,// VEX_Vpavgb_ymm_ymm_ymmm256
Mnemonic::Vpavgb,// EVEX_Vpavgb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpavgb,// EVEX_Vpavgb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpavgb,// EVEX_Vpavgb_zmm_k1z_zmm_zmmm512
Mnemonic::Psraw,// Psraw_mm_mmm64
Mnemonic::Psraw,// Psraw_xmm_xmmm128
Mnemonic::Vpsraw,// VEX_Vpsraw_xmm_xmm_xmmm128
Mnemonic::Vpsraw,// VEX_Vpsraw_ymm_ymm_xmmm128
Mnemonic::Vpsraw,// EVEX_Vpsraw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsraw,// EVEX_Vpsraw_ymm_k1z_ymm_xmmm128
Mnemonic::Vpsraw,// EVEX_Vpsraw_zmm_k1z_zmm_xmmm128
Mnemonic::Psrad,// Psrad_mm_mmm64
Mnemonic::Psrad,// Psrad_xmm_xmmm128
Mnemonic::Vpsrad,// VEX_Vpsrad_xmm_xmm_xmmm128
Mnemonic::Vpsrad,// VEX_Vpsrad_ymm_ymm_xmmm128
Mnemonic::Vpsrad,// EVEX_Vpsrad_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsrad,// EVEX_Vpsrad_ymm_k1z_ymm_xmmm128
Mnemonic::Vpsrad,// EVEX_Vpsrad_zmm_k1z_zmm_xmmm128
Mnemonic::Vpsraq,// EVEX_Vpsraq_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsraq,// EVEX_Vpsraq_ymm_k1z_ymm_xmmm128
Mnemonic::Vpsraq,// EVEX_Vpsraq_zmm_k1z_zmm_xmmm128
Mnemonic::Pavgw,// Pavgw_mm_mmm64
Mnemonic::Pavgw,// Pavgw_xmm_xmmm128
Mnemonic::Vpavgw,// VEX_Vpavgw_xmm_xmm_xmmm128
Mnemonic::Vpavgw,// VEX_Vpavgw_ymm_ymm_ymmm256
Mnemonic::Vpavgw,// EVEX_Vpavgw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpavgw,// EVEX_Vpavgw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpavgw,// EVEX_Vpavgw_zmm_k1z_zmm_zmmm512
Mnemonic::Pmulhuw,// Pmulhuw_mm_mmm64
Mnemonic::Pmulhuw,// Pmulhuw_xmm_xmmm128
Mnemonic::Vpmulhuw,// VEX_Vpmulhuw_xmm_xmm_xmmm128
Mnemonic::Vpmulhuw,// VEX_Vpmulhuw_ymm_ymm_ymmm256
Mnemonic::Vpmulhuw,// EVEX_Vpmulhuw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpmulhuw,// EVEX_Vpmulhuw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpmulhuw,// EVEX_Vpmulhuw_zmm_k1z_zmm_zmmm512
Mnemonic::Pmulhw,// Pmulhw_mm_mmm64
Mnemonic::Pmulhw,// Pmulhw_xmm_xmmm128
Mnemonic::Vpmulhw,// VEX_Vpmulhw_xmm_xmm_xmmm128
Mnemonic::Vpmulhw,// VEX_Vpmulhw_ymm_ymm_ymmm256
Mnemonic::Vpmulhw,// EVEX_Vpmulhw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpmulhw,// EVEX_Vpmulhw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpmulhw,// EVEX_Vpmulhw_zmm_k1z_zmm_zmmm512
Mnemonic::Cvttpd2dq,// Cvttpd2dq_xmm_xmmm128
Mnemonic::Vcvttpd2dq,// VEX_Vcvttpd2dq_xmm_xmmm128
Mnemonic::Vcvttpd2dq,// VEX_Vcvttpd2dq_xmm_ymmm256
Mnemonic::Vcvttpd2dq,// EVEX_Vcvttpd2dq_xmm_k1z_xmmm128b64
Mnemonic::Vcvttpd2dq,// EVEX_Vcvttpd2dq_xmm_k1z_ymmm256b64
Mnemonic::Vcvttpd2dq,// EVEX_Vcvttpd2dq_ymm_k1z_zmmm512b64_sae
Mnemonic::Cvtdq2pd,// Cvtdq2pd_xmm_xmmm64
Mnemonic::Vcvtdq2pd,// VEX_Vcvtdq2pd_xmm_xmmm64
Mnemonic::Vcvtdq2pd,// VEX_Vcvtdq2pd_ymm_xmmm128
Mnemonic::Vcvtdq2pd,// EVEX_Vcvtdq2pd_xmm_k1z_xmmm64b32
Mnemonic::Vcvtdq2pd,// EVEX_Vcvtdq2pd_ymm_k1z_xmmm128b32
Mnemonic::Vcvtdq2pd,// EVEX_Vcvtdq2pd_zmm_k1z_ymmm256b32_er
Mnemonic::Vcvtqq2pd,// EVEX_Vcvtqq2pd_xmm_k1z_xmmm128b64
Mnemonic::Vcvtqq2pd,// EVEX_Vcvtqq2pd_ymm_k1z_ymmm256b64
Mnemonic::Vcvtqq2pd,// EVEX_Vcvtqq2pd_zmm_k1z_zmmm512b64_er
Mnemonic::Cvtpd2dq,// Cvtpd2dq_xmm_xmmm128
Mnemonic::Vcvtpd2dq,// VEX_Vcvtpd2dq_xmm_xmmm128
Mnemonic::Vcvtpd2dq,// VEX_Vcvtpd2dq_xmm_ymmm256
Mnemonic::Vcvtpd2dq,// EVEX_Vcvtpd2dq_xmm_k1z_xmmm128b64
Mnemonic::Vcvtpd2dq,// EVEX_Vcvtpd2dq_xmm_k1z_ymmm256b64
Mnemonic::Vcvtpd2dq,// EVEX_Vcvtpd2dq_ymm_k1z_zmmm512b64_er
Mnemonic::Movntq,// Movntq_m64_mm
Mnemonic::Movntdq,// Movntdq_m128_xmm
Mnemonic::Vmovntdq,// VEX_Vmovntdq_m128_xmm
Mnemonic::Vmovntdq,// VEX_Vmovntdq_m256_ymm
Mnemonic::Vmovntdq,// EVEX_Vmovntdq_m128_xmm
Mnemonic::Vmovntdq,// EVEX_Vmovntdq_m256_ymm
Mnemonic::Vmovntdq,// EVEX_Vmovntdq_m512_zmm
Mnemonic::Psubsb,// Psubsb_mm_mmm64
Mnemonic::Psubsb,// Psubsb_xmm_xmmm128
Mnemonic::Vpsubsb,// VEX_Vpsubsb_xmm_xmm_xmmm128
Mnemonic::Vpsubsb,// VEX_Vpsubsb_ymm_ymm_ymmm256
Mnemonic::Vpsubsb,// EVEX_Vpsubsb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsubsb,// EVEX_Vpsubsb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpsubsb,// EVEX_Vpsubsb_zmm_k1z_zmm_zmmm512
Mnemonic::Psubsw,// Psubsw_mm_mmm64
Mnemonic::Psubsw,// Psubsw_xmm_xmmm128
Mnemonic::Vpsubsw,// VEX_Vpsubsw_xmm_xmm_xmmm128
Mnemonic::Vpsubsw,// VEX_Vpsubsw_ymm_ymm_ymmm256
Mnemonic::Vpsubsw,// EVEX_Vpsubsw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsubsw,// EVEX_Vpsubsw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpsubsw,// EVEX_Vpsubsw_zmm_k1z_zmm_zmmm512
Mnemonic::Pminsw,// Pminsw_mm_mmm64
Mnemonic::Pminsw,// Pminsw_xmm_xmmm128
Mnemonic::Vpminsw,// VEX_Vpminsw_xmm_xmm_xmmm128
Mnemonic::Vpminsw,// VEX_Vpminsw_ymm_ymm_ymmm256
Mnemonic::Vpminsw,// EVEX_Vpminsw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpminsw,// EVEX_Vpminsw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpminsw,// EVEX_Vpminsw_zmm_k1z_zmm_zmmm512
Mnemonic::Por,// Por_mm_mmm64
Mnemonic::Por,// Por_xmm_xmmm128
Mnemonic::Vpor,// VEX_Vpor_xmm_xmm_xmmm128
Mnemonic::Vpor,// VEX_Vpor_ymm_ymm_ymmm256
Mnemonic::Vpord,// EVEX_Vpord_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpord,// EVEX_Vpord_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpord,// EVEX_Vpord_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vporq,// EVEX_Vporq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vporq,// EVEX_Vporq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vporq,// EVEX_Vporq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Paddsb,// Paddsb_mm_mmm64
Mnemonic::Paddsb,// Paddsb_xmm_xmmm128
Mnemonic::Vpaddsb,// VEX_Vpaddsb_xmm_xmm_xmmm128
Mnemonic::Vpaddsb,// VEX_Vpaddsb_ymm_ymm_ymmm256
Mnemonic::Vpaddsb,// EVEX_Vpaddsb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpaddsb,// EVEX_Vpaddsb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpaddsb,// EVEX_Vpaddsb_zmm_k1z_zmm_zmmm512
Mnemonic::Paddsw,// Paddsw_mm_mmm64
Mnemonic::Paddsw,// Paddsw_xmm_xmmm128
Mnemonic::Vpaddsw,// VEX_Vpaddsw_xmm_xmm_xmmm128
Mnemonic::Vpaddsw,// VEX_Vpaddsw_ymm_ymm_ymmm256
Mnemonic::Vpaddsw,// EVEX_Vpaddsw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpaddsw,// EVEX_Vpaddsw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpaddsw,// EVEX_Vpaddsw_zmm_k1z_zmm_zmmm512
Mnemonic::Pmaxsw,// Pmaxsw_mm_mmm64
Mnemonic::Pmaxsw,// Pmaxsw_xmm_xmmm128
Mnemonic::Vpmaxsw,// VEX_Vpmaxsw_xmm_xmm_xmmm128
Mnemonic::Vpmaxsw,// VEX_Vpmaxsw_ymm_ymm_ymmm256
Mnemonic::Vpmaxsw,// EVEX_Vpmaxsw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpmaxsw,// EVEX_Vpmaxsw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpmaxsw,// EVEX_Vpmaxsw_zmm_k1z_zmm_zmmm512
Mnemonic::Pxor,// Pxor_mm_mmm64
Mnemonic::Pxor,// Pxor_xmm_xmmm128
Mnemonic::Vpxor,// VEX_Vpxor_xmm_xmm_xmmm128
Mnemonic::Vpxor,// VEX_Vpxor_ymm_ymm_ymmm256
Mnemonic::Vpxord,// EVEX_Vpxord_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpxord,// EVEX_Vpxord_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpxord,// EVEX_Vpxord_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpxorq,// EVEX_Vpxorq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpxorq,// EVEX_Vpxorq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpxorq,// EVEX_Vpxorq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Lddqu,// Lddqu_xmm_m128
Mnemonic::Vlddqu,// VEX_Vlddqu_xmm_m128
Mnemonic::Vlddqu,// VEX_Vlddqu_ymm_m256
Mnemonic::Psllw,// Psllw_mm_mmm64
Mnemonic::Psllw,// Psllw_xmm_xmmm128
Mnemonic::Vpsllw,// VEX_Vpsllw_xmm_xmm_xmmm128
Mnemonic::Vpsllw,// VEX_Vpsllw_ymm_ymm_xmmm128
Mnemonic::Vpsllw,// EVEX_Vpsllw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsllw,// EVEX_Vpsllw_ymm_k1z_ymm_xmmm128
Mnemonic::Vpsllw,// EVEX_Vpsllw_zmm_k1z_zmm_xmmm128
Mnemonic::Pslld,// Pslld_mm_mmm64
Mnemonic::Pslld,// Pslld_xmm_xmmm128
Mnemonic::Vpslld,// VEX_Vpslld_xmm_xmm_xmmm128
Mnemonic::Vpslld,// VEX_Vpslld_ymm_ymm_xmmm128
Mnemonic::Vpslld,// EVEX_Vpslld_xmm_k1z_xmm_xmmm128
Mnemonic::Vpslld,// EVEX_Vpslld_ymm_k1z_ymm_xmmm128
Mnemonic::Vpslld,// EVEX_Vpslld_zmm_k1z_zmm_xmmm128
Mnemonic::Psllq,// Psllq_mm_mmm64
Mnemonic::Psllq,// Psllq_xmm_xmmm128
Mnemonic::Vpsllq,// VEX_Vpsllq_xmm_xmm_xmmm128
Mnemonic::Vpsllq,// VEX_Vpsllq_ymm_ymm_xmmm128
Mnemonic::Vpsllq,// EVEX_Vpsllq_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsllq,// EVEX_Vpsllq_ymm_k1z_ymm_xmmm128
Mnemonic::Vpsllq,// EVEX_Vpsllq_zmm_k1z_zmm_xmmm128
Mnemonic::Pmuludq,// Pmuludq_mm_mmm64
Mnemonic::Pmuludq,// Pmuludq_xmm_xmmm128
Mnemonic::Vpmuludq,// VEX_Vpmuludq_xmm_xmm_xmmm128
Mnemonic::Vpmuludq,// VEX_Vpmuludq_ymm_ymm_ymmm256
Mnemonic::Vpmuludq,// EVEX_Vpmuludq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpmuludq,// EVEX_Vpmuludq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpmuludq,// EVEX_Vpmuludq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Pmaddwd,// Pmaddwd_mm_mmm64
Mnemonic::Pmaddwd,// Pmaddwd_xmm_xmmm128
Mnemonic::Vpmaddwd,// VEX_Vpmaddwd_xmm_xmm_xmmm128
Mnemonic::Vpmaddwd,// VEX_Vpmaddwd_ymm_ymm_ymmm256
Mnemonic::Vpmaddwd,// EVEX_Vpmaddwd_xmm_k1z_xmm_xmmm128
Mnemonic::Vpmaddwd,// EVEX_Vpmaddwd_ymm_k1z_ymm_ymmm256
Mnemonic::Vpmaddwd,// EVEX_Vpmaddwd_zmm_k1z_zmm_zmmm512
Mnemonic::Psadbw,// Psadbw_mm_mmm64
Mnemonic::Psadbw,// Psadbw_xmm_xmmm128
Mnemonic::Vpsadbw,// VEX_Vpsadbw_xmm_xmm_xmmm128
Mnemonic::Vpsadbw,// VEX_Vpsadbw_ymm_ymm_ymmm256
Mnemonic::Vpsadbw,// EVEX_Vpsadbw_xmm_xmm_xmmm128
Mnemonic::Vpsadbw,// EVEX_Vpsadbw_ymm_ymm_ymmm256
Mnemonic::Vpsadbw,// EVEX_Vpsadbw_zmm_zmm_zmmm512
Mnemonic::Maskmovq,// Maskmovq_rDI_mm_mm
Mnemonic::Maskmovdqu,// Maskmovdqu_rDI_xmm_xmm
Mnemonic::Vmaskmovdqu,// VEX_Vmaskmovdqu_rDI_xmm_xmm
Mnemonic::Psubb,// Psubb_mm_mmm64
Mnemonic::Psubb,// Psubb_xmm_xmmm128
Mnemonic::Vpsubb,// VEX_Vpsubb_xmm_xmm_xmmm128
Mnemonic::Vpsubb,// VEX_Vpsubb_ymm_ymm_ymmm256
Mnemonic::Vpsubb,// EVEX_Vpsubb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsubb,// EVEX_Vpsubb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpsubb,// EVEX_Vpsubb_zmm_k1z_zmm_zmmm512
Mnemonic::Psubw,// Psubw_mm_mmm64
Mnemonic::Psubw,// Psubw_xmm_xmmm128
Mnemonic::Vpsubw,// VEX_Vpsubw_xmm_xmm_xmmm128
Mnemonic::Vpsubw,// VEX_Vpsubw_ymm_ymm_ymmm256
Mnemonic::Vpsubw,// EVEX_Vpsubw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsubw,// EVEX_Vpsubw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpsubw,// EVEX_Vpsubw_zmm_k1z_zmm_zmmm512
Mnemonic::Psubd,// Psubd_mm_mmm64
Mnemonic::Psubd,// Psubd_xmm_xmmm128
Mnemonic::Vpsubd,// VEX_Vpsubd_xmm_xmm_xmmm128
Mnemonic::Vpsubd,// VEX_Vpsubd_ymm_ymm_ymmm256
Mnemonic::Vpsubd,// EVEX_Vpsubd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpsubd,// EVEX_Vpsubd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpsubd,// EVEX_Vpsubd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Psubq,// Psubq_mm_mmm64
Mnemonic::Psubq,// Psubq_xmm_xmmm128
Mnemonic::Vpsubq,// VEX_Vpsubq_xmm_xmm_xmmm128
Mnemonic::Vpsubq,// VEX_Vpsubq_ymm_ymm_ymmm256
Mnemonic::Vpsubq,// EVEX_Vpsubq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpsubq,// EVEX_Vpsubq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpsubq,// EVEX_Vpsubq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Paddb,// Paddb_mm_mmm64
Mnemonic::Paddb,// Paddb_xmm_xmmm128
Mnemonic::Vpaddb,// VEX_Vpaddb_xmm_xmm_xmmm128
Mnemonic::Vpaddb,// VEX_Vpaddb_ymm_ymm_ymmm256
Mnemonic::Vpaddb,// EVEX_Vpaddb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpaddb,// EVEX_Vpaddb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpaddb,// EVEX_Vpaddb_zmm_k1z_zmm_zmmm512
Mnemonic::Paddw,// Paddw_mm_mmm64
Mnemonic::Paddw,// Paddw_xmm_xmmm128
Mnemonic::Vpaddw,// VEX_Vpaddw_xmm_xmm_xmmm128
Mnemonic::Vpaddw,// VEX_Vpaddw_ymm_ymm_ymmm256
Mnemonic::Vpaddw,// EVEX_Vpaddw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpaddw,// EVEX_Vpaddw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpaddw,// EVEX_Vpaddw_zmm_k1z_zmm_zmmm512
Mnemonic::Paddd,// Paddd_mm_mmm64
Mnemonic::Paddd,// Paddd_xmm_xmmm128
Mnemonic::Vpaddd,// VEX_Vpaddd_xmm_xmm_xmmm128
Mnemonic::Vpaddd,// VEX_Vpaddd_ymm_ymm_ymmm256
Mnemonic::Vpaddd,// EVEX_Vpaddd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpaddd,// EVEX_Vpaddd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpaddd,// EVEX_Vpaddd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Ud0,// Ud0_r16_rm16
Mnemonic::Ud0,// Ud0_r32_rm32
Mnemonic::Ud0,// Ud0_r64_rm64
Mnemonic::Pshufb,// Pshufb_mm_mmm64
Mnemonic::Pshufb,// Pshufb_xmm_xmmm128
Mnemonic::Vpshufb,// VEX_Vpshufb_xmm_xmm_xmmm128
Mnemonic::Vpshufb,// VEX_Vpshufb_ymm_ymm_ymmm256
Mnemonic::Vpshufb,// EVEX_Vpshufb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpshufb,// EVEX_Vpshufb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpshufb,// EVEX_Vpshufb_zmm_k1z_zmm_zmmm512
Mnemonic::Phaddw,// Phaddw_mm_mmm64
Mnemonic::Phaddw,// Phaddw_xmm_xmmm128
Mnemonic::Vphaddw,// VEX_Vphaddw_xmm_xmm_xmmm128
Mnemonic::Vphaddw,// VEX_Vphaddw_ymm_ymm_ymmm256
Mnemonic::Phaddd,// Phaddd_mm_mmm64
Mnemonic::Phaddd,// Phaddd_xmm_xmmm128
Mnemonic::Vphaddd,// VEX_Vphaddd_xmm_xmm_xmmm128
Mnemonic::Vphaddd,// VEX_Vphaddd_ymm_ymm_ymmm256
Mnemonic::Phaddsw,// Phaddsw_mm_mmm64
Mnemonic::Phaddsw,// Phaddsw_xmm_xmmm128
Mnemonic::Vphaddsw,// VEX_Vphaddsw_xmm_xmm_xmmm128
Mnemonic::Vphaddsw,// VEX_Vphaddsw_ymm_ymm_ymmm256
Mnemonic::Pmaddubsw,// Pmaddubsw_mm_mmm64
Mnemonic::Pmaddubsw,// Pmaddubsw_xmm_xmmm128
Mnemonic::Vpmaddubsw,// VEX_Vpmaddubsw_xmm_xmm_xmmm128
Mnemonic::Vpmaddubsw,// VEX_Vpmaddubsw_ymm_ymm_ymmm256
Mnemonic::Vpmaddubsw,// EVEX_Vpmaddubsw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpmaddubsw,// EVEX_Vpmaddubsw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpmaddubsw,// EVEX_Vpmaddubsw_zmm_k1z_zmm_zmmm512
Mnemonic::Phsubw,// Phsubw_mm_mmm64
Mnemonic::Phsubw,// Phsubw_xmm_xmmm128
Mnemonic::Vphsubw,// VEX_Vphsubw_xmm_xmm_xmmm128
Mnemonic::Vphsubw,// VEX_Vphsubw_ymm_ymm_ymmm256
Mnemonic::Phsubd,// Phsubd_mm_mmm64
Mnemonic::Phsubd,// Phsubd_xmm_xmmm128
Mnemonic::Vphsubd,// VEX_Vphsubd_xmm_xmm_xmmm128
Mnemonic::Vphsubd,// VEX_Vphsubd_ymm_ymm_ymmm256
Mnemonic::Phsubsw,// Phsubsw_mm_mmm64
Mnemonic::Phsubsw,// Phsubsw_xmm_xmmm128
Mnemonic::Vphsubsw,// VEX_Vphsubsw_xmm_xmm_xmmm128
Mnemonic::Vphsubsw,// VEX_Vphsubsw_ymm_ymm_ymmm256
Mnemonic::Psignb,// Psignb_mm_mmm64
Mnemonic::Psignb,// Psignb_xmm_xmmm128
Mnemonic::Vpsignb,// VEX_Vpsignb_xmm_xmm_xmmm128
Mnemonic::Vpsignb,// VEX_Vpsignb_ymm_ymm_ymmm256
Mnemonic::Psignw,// Psignw_mm_mmm64
Mnemonic::Psignw,// Psignw_xmm_xmmm128
Mnemonic::Vpsignw,// VEX_Vpsignw_xmm_xmm_xmmm128
Mnemonic::Vpsignw,// VEX_Vpsignw_ymm_ymm_ymmm256
Mnemonic::Psignd,// Psignd_mm_mmm64
Mnemonic::Psignd,// Psignd_xmm_xmmm128
Mnemonic::Vpsignd,// VEX_Vpsignd_xmm_xmm_xmmm128
Mnemonic::Vpsignd,// VEX_Vpsignd_ymm_ymm_ymmm256
Mnemonic::Pmulhrsw,// Pmulhrsw_mm_mmm64
Mnemonic::Pmulhrsw,// Pmulhrsw_xmm_xmmm128
Mnemonic::Vpmulhrsw,// VEX_Vpmulhrsw_xmm_xmm_xmmm128
Mnemonic::Vpmulhrsw,// VEX_Vpmulhrsw_ymm_ymm_ymmm256
Mnemonic::Vpmulhrsw,// EVEX_Vpmulhrsw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpmulhrsw,// EVEX_Vpmulhrsw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpmulhrsw,// EVEX_Vpmulhrsw_zmm_k1z_zmm_zmmm512
Mnemonic::Vpermilps,// VEX_Vpermilps_xmm_xmm_xmmm128
Mnemonic::Vpermilps,// VEX_Vpermilps_ymm_ymm_ymmm256
Mnemonic::Vpermilps,// EVEX_Vpermilps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpermilps,// EVEX_Vpermilps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpermilps,// EVEX_Vpermilps_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpermilpd,// VEX_Vpermilpd_xmm_xmm_xmmm128
Mnemonic::Vpermilpd,// VEX_Vpermilpd_ymm_ymm_ymmm256
Mnemonic::Vpermilpd,// EVEX_Vpermilpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpermilpd,// EVEX_Vpermilpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpermilpd,// EVEX_Vpermilpd_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vtestps,// VEX_Vtestps_xmm_xmmm128
Mnemonic::Vtestps,// VEX_Vtestps_ymm_ymmm256
Mnemonic::Vtestpd,// VEX_Vtestpd_xmm_xmmm128
Mnemonic::Vtestpd,// VEX_Vtestpd_ymm_ymmm256
Mnemonic::Pblendvb,// Pblendvb_xmm_xmmm128
Mnemonic::Vpsrlvw,// EVEX_Vpsrlvw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsrlvw,// EVEX_Vpsrlvw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpsrlvw,// EVEX_Vpsrlvw_zmm_k1z_zmm_zmmm512
Mnemonic::Vpmovuswb,// EVEX_Vpmovuswb_xmmm64_k1z_xmm
Mnemonic::Vpmovuswb,// EVEX_Vpmovuswb_xmmm128_k1z_ymm
Mnemonic::Vpmovuswb,// EVEX_Vpmovuswb_ymmm256_k1z_zmm
Mnemonic::Vpsravw,// EVEX_Vpsravw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsravw,// EVEX_Vpsravw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpsravw,// EVEX_Vpsravw_zmm_k1z_zmm_zmmm512
Mnemonic::Vpmovusdb,// EVEX_Vpmovusdb_xmmm32_k1z_xmm
Mnemonic::Vpmovusdb,// EVEX_Vpmovusdb_xmmm64_k1z_ymm
Mnemonic::Vpmovusdb,// EVEX_Vpmovusdb_xmmm128_k1z_zmm
Mnemonic::Vpsllvw,// EVEX_Vpsllvw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpsllvw,// EVEX_Vpsllvw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpsllvw,// EVEX_Vpsllvw_zmm_k1z_zmm_zmmm512
Mnemonic::Vpmovusqb,// EVEX_Vpmovusqb_xmmm16_k1z_xmm
Mnemonic::Vpmovusqb,// EVEX_Vpmovusqb_xmmm32_k1z_ymm
Mnemonic::Vpmovusqb,// EVEX_Vpmovusqb_xmmm64_k1z_zmm
Mnemonic::Vcvtph2ps,// VEX_Vcvtph2ps_xmm_xmmm64
Mnemonic::Vcvtph2ps,// VEX_Vcvtph2ps_ymm_xmmm128
Mnemonic::Vcvtph2ps,// EVEX_Vcvtph2ps_xmm_k1z_xmmm64
Mnemonic::Vcvtph2ps,// EVEX_Vcvtph2ps_ymm_k1z_xmmm128
Mnemonic::Vcvtph2ps,// EVEX_Vcvtph2ps_zmm_k1z_ymmm256_sae
Mnemonic::Vpmovusdw,// EVEX_Vpmovusdw_xmmm64_k1z_xmm
Mnemonic::Vpmovusdw,// EVEX_Vpmovusdw_xmmm128_k1z_ymm
Mnemonic::Vpmovusdw,// EVEX_Vpmovusdw_ymmm256_k1z_zmm
Mnemonic::Blendvps,// Blendvps_xmm_xmmm128
Mnemonic::Vprorvd,// EVEX_Vprorvd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vprorvd,// EVEX_Vprorvd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vprorvd,// EVEX_Vprorvd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vprorvq,// EVEX_Vprorvq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vprorvq,// EVEX_Vprorvq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vprorvq,// EVEX_Vprorvq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vpmovusqw,// EVEX_Vpmovusqw_xmmm32_k1z_xmm
Mnemonic::Vpmovusqw,// EVEX_Vpmovusqw_xmmm64_k1z_ymm
Mnemonic::Vpmovusqw,// EVEX_Vpmovusqw_xmmm128_k1z_zmm
Mnemonic::Blendvpd,// Blendvpd_xmm_xmmm128
Mnemonic::Vprolvd,// EVEX_Vprolvd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vprolvd,// EVEX_Vprolvd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vprolvd,// EVEX_Vprolvd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vprolvq,// EVEX_Vprolvq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vprolvq,// EVEX_Vprolvq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vprolvq,// EVEX_Vprolvq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vpmovusqd,// EVEX_Vpmovusqd_xmmm64_k1z_xmm
Mnemonic::Vpmovusqd,// EVEX_Vpmovusqd_xmmm128_k1z_ymm
Mnemonic::Vpmovusqd,// EVEX_Vpmovusqd_ymmm256_k1z_zmm
Mnemonic::Vpermps,// VEX_Vpermps_ymm_ymm_ymmm256
Mnemonic::Vpermps,// EVEX_Vpermps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpermps,// EVEX_Vpermps_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpermpd,// EVEX_Vpermpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpermpd,// EVEX_Vpermpd_zmm_k1z_zmm_zmmm512b64
Mnemonic::Ptest,// Ptest_xmm_xmmm128
Mnemonic::Vptest,// VEX_Vptest_xmm_xmmm128
Mnemonic::Vptest,// VEX_Vptest_ymm_ymmm256
Mnemonic::Vbroadcastss,// VEX_Vbroadcastss_xmm_m32
Mnemonic::Vbroadcastss,// VEX_Vbroadcastss_ymm_m32
Mnemonic::Vbroadcastss,// EVEX_Vbroadcastss_xmm_k1z_xmmm32
Mnemonic::Vbroadcastss,// EVEX_Vbroadcastss_ymm_k1z_xmmm32
Mnemonic::Vbroadcastss,// EVEX_Vbroadcastss_zmm_k1z_xmmm32
Mnemonic::Vbroadcastsd,// VEX_Vbroadcastsd_ymm_m64
Mnemonic::Vbroadcastf32x2,// EVEX_Vbroadcastf32x2_ymm_k1z_xmmm64
Mnemonic::Vbroadcastf32x2,// EVEX_Vbroadcastf32x2_zmm_k1z_xmmm64
Mnemonic::Vbroadcastsd,// EVEX_Vbroadcastsd_ymm_k1z_xmmm64
Mnemonic::Vbroadcastsd,// EVEX_Vbroadcastsd_zmm_k1z_xmmm64
Mnemonic::Vbroadcastf128,// VEX_Vbroadcastf128_ymm_m128
Mnemonic::Vbroadcastf32x4,// EVEX_Vbroadcastf32x4_ymm_k1z_m128
Mnemonic::Vbroadcastf32x4,// EVEX_Vbroadcastf32x4_zmm_k1z_m128
Mnemonic::Vbroadcastf64x2,// EVEX_Vbroadcastf64x2_ymm_k1z_m128
Mnemonic::Vbroadcastf64x2,// EVEX_Vbroadcastf64x2_zmm_k1z_m128
Mnemonic::Vbroadcastf32x8,// EVEX_Vbroadcastf32x8_zmm_k1z_m256
Mnemonic::Vbroadcastf64x4,// EVEX_Vbroadcastf64x4_zmm_k1z_m256
Mnemonic::Pabsb,// Pabsb_mm_mmm64
Mnemonic::Pabsb,// Pabsb_xmm_xmmm128
Mnemonic::Vpabsb,// VEX_Vpabsb_xmm_xmmm128
Mnemonic::Vpabsb,// VEX_Vpabsb_ymm_ymmm256
Mnemonic::Vpabsb,// EVEX_Vpabsb_xmm_k1z_xmmm128
Mnemonic::Vpabsb,// EVEX_Vpabsb_ymm_k1z_ymmm256
Mnemonic::Vpabsb,// EVEX_Vpabsb_zmm_k1z_zmmm512
Mnemonic::Pabsw,// Pabsw_mm_mmm64
Mnemonic::Pabsw,// Pabsw_xmm_xmmm128
Mnemonic::Vpabsw,// VEX_Vpabsw_xmm_xmmm128
Mnemonic::Vpabsw,// VEX_Vpabsw_ymm_ymmm256
Mnemonic::Vpabsw,// EVEX_Vpabsw_xmm_k1z_xmmm128
Mnemonic::Vpabsw,// EVEX_Vpabsw_ymm_k1z_ymmm256
Mnemonic::Vpabsw,// EVEX_Vpabsw_zmm_k1z_zmmm512
Mnemonic::Pabsd,// Pabsd_mm_mmm64
Mnemonic::Pabsd,// Pabsd_xmm_xmmm128
Mnemonic::Vpabsd,// VEX_Vpabsd_xmm_xmmm128
Mnemonic::Vpabsd,// VEX_Vpabsd_ymm_ymmm256
Mnemonic::Vpabsd,// EVEX_Vpabsd_xmm_k1z_xmmm128b32
Mnemonic::Vpabsd,// EVEX_Vpabsd_ymm_k1z_ymmm256b32
Mnemonic::Vpabsd,// EVEX_Vpabsd_zmm_k1z_zmmm512b32
Mnemonic::Vpabsq,// EVEX_Vpabsq_xmm_k1z_xmmm128b64
Mnemonic::Vpabsq,// EVEX_Vpabsq_ymm_k1z_ymmm256b64
Mnemonic::Vpabsq,// EVEX_Vpabsq_zmm_k1z_zmmm512b64
Mnemonic::Pmovsxbw,// Pmovsxbw_xmm_xmmm64
Mnemonic::Vpmovsxbw,// VEX_Vpmovsxbw_xmm_xmmm64
Mnemonic::Vpmovsxbw,// VEX_Vpmovsxbw_ymm_xmmm128
Mnemonic::Vpmovsxbw,// EVEX_Vpmovsxbw_xmm_k1z_xmmm64
Mnemonic::Vpmovsxbw,// EVEX_Vpmovsxbw_ymm_k1z_xmmm128
Mnemonic::Vpmovsxbw,// EVEX_Vpmovsxbw_zmm_k1z_ymmm256
Mnemonic::Vpmovswb,// EVEX_Vpmovswb_xmmm64_k1z_xmm
Mnemonic::Vpmovswb,// EVEX_Vpmovswb_xmmm128_k1z_ymm
Mnemonic::Vpmovswb,// EVEX_Vpmovswb_ymmm256_k1z_zmm
Mnemonic::Pmovsxbd,// Pmovsxbd_xmm_xmmm32
Mnemonic::Vpmovsxbd,// VEX_Vpmovsxbd_xmm_xmmm32
Mnemonic::Vpmovsxbd,// VEX_Vpmovsxbd_ymm_xmmm64
Mnemonic::Vpmovsxbd,// EVEX_Vpmovsxbd_xmm_k1z_xmmm32
Mnemonic::Vpmovsxbd,// EVEX_Vpmovsxbd_ymm_k1z_xmmm64
Mnemonic::Vpmovsxbd,// EVEX_Vpmovsxbd_zmm_k1z_xmmm128
Mnemonic::Vpmovsdb,// EVEX_Vpmovsdb_xmmm32_k1z_xmm
Mnemonic::Vpmovsdb,// EVEX_Vpmovsdb_xmmm64_k1z_ymm
Mnemonic::Vpmovsdb,// EVEX_Vpmovsdb_xmmm128_k1z_zmm
Mnemonic::Pmovsxbq,// Pmovsxbq_xmm_xmmm16
Mnemonic::Vpmovsxbq,// VEX_Vpmovsxbq_xmm_xmmm16
Mnemonic::Vpmovsxbq,// VEX_Vpmovsxbq_ymm_xmmm32
Mnemonic::Vpmovsxbq,// EVEX_Vpmovsxbq_xmm_k1z_xmmm16
Mnemonic::Vpmovsxbq,// EVEX_Vpmovsxbq_ymm_k1z_xmmm32
Mnemonic::Vpmovsxbq,// EVEX_Vpmovsxbq_zmm_k1z_xmmm64
Mnemonic::Vpmovsqb,// EVEX_Vpmovsqb_xmmm16_k1z_xmm
Mnemonic::Vpmovsqb,// EVEX_Vpmovsqb_xmmm32_k1z_ymm
Mnemonic::Vpmovsqb,// EVEX_Vpmovsqb_xmmm64_k1z_zmm
Mnemonic::Pmovsxwd,// Pmovsxwd_xmm_xmmm64
Mnemonic::Vpmovsxwd,// VEX_Vpmovsxwd_xmm_xmmm64
Mnemonic::Vpmovsxwd,// VEX_Vpmovsxwd_ymm_xmmm128
Mnemonic::Vpmovsxwd,// EVEX_Vpmovsxwd_xmm_k1z_xmmm64
Mnemonic::Vpmovsxwd,// EVEX_Vpmovsxwd_ymm_k1z_xmmm128
Mnemonic::Vpmovsxwd,// EVEX_Vpmovsxwd_zmm_k1z_ymmm256
Mnemonic::Vpmovsdw,// EVEX_Vpmovsdw_xmmm64_k1z_xmm
Mnemonic::Vpmovsdw,// EVEX_Vpmovsdw_xmmm128_k1z_ymm
Mnemonic::Vpmovsdw,// EVEX_Vpmovsdw_ymmm256_k1z_zmm
Mnemonic::Pmovsxwq,// Pmovsxwq_xmm_xmmm32
Mnemonic::Vpmovsxwq,// VEX_Vpmovsxwq_xmm_xmmm32
Mnemonic::Vpmovsxwq,// VEX_Vpmovsxwq_ymm_xmmm64
Mnemonic::Vpmovsxwq,// EVEX_Vpmovsxwq_xmm_k1z_xmmm32
Mnemonic::Vpmovsxwq,// EVEX_Vpmovsxwq_ymm_k1z_xmmm64
Mnemonic::Vpmovsxwq,// EVEX_Vpmovsxwq_zmm_k1z_xmmm128
Mnemonic::Vpmovsqw,// EVEX_Vpmovsqw_xmmm32_k1z_xmm
Mnemonic::Vpmovsqw,// EVEX_Vpmovsqw_xmmm64_k1z_ymm
Mnemonic::Vpmovsqw,// EVEX_Vpmovsqw_xmmm128_k1z_zmm
Mnemonic::Pmovsxdq,// Pmovsxdq_xmm_xmmm64
Mnemonic::Vpmovsxdq,// VEX_Vpmovsxdq_xmm_xmmm64
Mnemonic::Vpmovsxdq,// VEX_Vpmovsxdq_ymm_xmmm128
Mnemonic::Vpmovsxdq,// EVEX_Vpmovsxdq_xmm_k1z_xmmm64
Mnemonic::Vpmovsxdq,// EVEX_Vpmovsxdq_ymm_k1z_xmmm128
Mnemonic::Vpmovsxdq,// EVEX_Vpmovsxdq_zmm_k1z_ymmm256
Mnemonic::Vpmovsqd,// EVEX_Vpmovsqd_xmmm64_k1z_xmm
Mnemonic::Vpmovsqd,// EVEX_Vpmovsqd_xmmm128_k1z_ymm
Mnemonic::Vpmovsqd,// EVEX_Vpmovsqd_ymmm256_k1z_zmm
Mnemonic::Vptestmb,// EVEX_Vptestmb_kr_k1_xmm_xmmm128
Mnemonic::Vptestmb,// EVEX_Vptestmb_kr_k1_ymm_ymmm256
Mnemonic::Vptestmb,// EVEX_Vptestmb_kr_k1_zmm_zmmm512
Mnemonic::Vptestmw,// EVEX_Vptestmw_kr_k1_xmm_xmmm128
Mnemonic::Vptestmw,// EVEX_Vptestmw_kr_k1_ymm_ymmm256
Mnemonic::Vptestmw,// EVEX_Vptestmw_kr_k1_zmm_zmmm512
Mnemonic::Vptestnmb,// EVEX_Vptestnmb_kr_k1_xmm_xmmm128
Mnemonic::Vptestnmb,// EVEX_Vptestnmb_kr_k1_ymm_ymmm256
Mnemonic::Vptestnmb,// EVEX_Vptestnmb_kr_k1_zmm_zmmm512
Mnemonic::Vptestnmw,// EVEX_Vptestnmw_kr_k1_xmm_xmmm128
Mnemonic::Vptestnmw,// EVEX_Vptestnmw_kr_k1_ymm_ymmm256
Mnemonic::Vptestnmw,// EVEX_Vptestnmw_kr_k1_zmm_zmmm512
Mnemonic::Vptestmd,// EVEX_Vptestmd_kr_k1_xmm_xmmm128b32
Mnemonic::Vptestmd,// EVEX_Vptestmd_kr_k1_ymm_ymmm256b32
Mnemonic::Vptestmd,// EVEX_Vptestmd_kr_k1_zmm_zmmm512b32
Mnemonic::Vptestmq,// EVEX_Vptestmq_kr_k1_xmm_xmmm128b64
Mnemonic::Vptestmq,// EVEX_Vptestmq_kr_k1_ymm_ymmm256b64
Mnemonic::Vptestmq,// EVEX_Vptestmq_kr_k1_zmm_zmmm512b64
Mnemonic::Vptestnmd,// EVEX_Vptestnmd_kr_k1_xmm_xmmm128b32
Mnemonic::Vptestnmd,// EVEX_Vptestnmd_kr_k1_ymm_ymmm256b32
Mnemonic::Vptestnmd,// EVEX_Vptestnmd_kr_k1_zmm_zmmm512b32
Mnemonic::Vptestnmq,// EVEX_Vptestnmq_kr_k1_xmm_xmmm128b64
Mnemonic::Vptestnmq,// EVEX_Vptestnmq_kr_k1_ymm_ymmm256b64
Mnemonic::Vptestnmq,// EVEX_Vptestnmq_kr_k1_zmm_zmmm512b64
Mnemonic::Pmuldq,// Pmuldq_xmm_xmmm128
Mnemonic::Vpmuldq,// VEX_Vpmuldq_xmm_xmm_xmmm128
Mnemonic::Vpmuldq,// VEX_Vpmuldq_ymm_ymm_ymmm256
Mnemonic::Vpmuldq,// EVEX_Vpmuldq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpmuldq,// EVEX_Vpmuldq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpmuldq,// EVEX_Vpmuldq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vpmovm2b,// EVEX_Vpmovm2b_xmm_kr
Mnemonic::Vpmovm2b,// EVEX_Vpmovm2b_ymm_kr
Mnemonic::Vpmovm2b,// EVEX_Vpmovm2b_zmm_kr
Mnemonic::Vpmovm2w,// EVEX_Vpmovm2w_xmm_kr
Mnemonic::Vpmovm2w,// EVEX_Vpmovm2w_ymm_kr
Mnemonic::Vpmovm2w,// EVEX_Vpmovm2w_zmm_kr
Mnemonic::Pcmpeqq,// Pcmpeqq_xmm_xmmm128
Mnemonic::Vpcmpeqq,// VEX_Vpcmpeqq_xmm_xmm_xmmm128
Mnemonic::Vpcmpeqq,// VEX_Vpcmpeqq_ymm_ymm_ymmm256
Mnemonic::Vpcmpeqq,// EVEX_Vpcmpeqq_kr_k1_xmm_xmmm128b64
Mnemonic::Vpcmpeqq,// EVEX_Vpcmpeqq_kr_k1_ymm_ymmm256b64
Mnemonic::Vpcmpeqq,// EVEX_Vpcmpeqq_kr_k1_zmm_zmmm512b64
Mnemonic::Vpmovb2m,// EVEX_Vpmovb2m_kr_xmm
Mnemonic::Vpmovb2m,// EVEX_Vpmovb2m_kr_ymm
Mnemonic::Vpmovb2m,// EVEX_Vpmovb2m_kr_zmm
Mnemonic::Vpmovw2m,// EVEX_Vpmovw2m_kr_xmm
Mnemonic::Vpmovw2m,// EVEX_Vpmovw2m_kr_ymm
Mnemonic::Vpmovw2m,// EVEX_Vpmovw2m_kr_zmm
Mnemonic::Movntdqa,// Movntdqa_xmm_m128
Mnemonic::Vmovntdqa,// VEX_Vmovntdqa_xmm_m128
Mnemonic::Vmovntdqa,// VEX_Vmovntdqa_ymm_m256
Mnemonic::Vmovntdqa,// EVEX_Vmovntdqa_xmm_m128
Mnemonic::Vmovntdqa,// EVEX_Vmovntdqa_ymm_m256
Mnemonic::Vmovntdqa,// EVEX_Vmovntdqa_zmm_m512
Mnemonic::Vpbroadcastmb2q,// EVEX_Vpbroadcastmb2q_xmm_kr
Mnemonic::Vpbroadcastmb2q,// EVEX_Vpbroadcastmb2q_ymm_kr
Mnemonic::Vpbroadcastmb2q,// EVEX_Vpbroadcastmb2q_zmm_kr
Mnemonic::Packusdw,// Packusdw_xmm_xmmm128
Mnemonic::Vpackusdw,// VEX_Vpackusdw_xmm_xmm_xmmm128
Mnemonic::Vpackusdw,// VEX_Vpackusdw_ymm_ymm_ymmm256
Mnemonic::Vpackusdw,// EVEX_Vpackusdw_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpackusdw,// EVEX_Vpackusdw_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpackusdw,// EVEX_Vpackusdw_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vmaskmovps,// VEX_Vmaskmovps_xmm_xmm_m128
Mnemonic::Vmaskmovps,// VEX_Vmaskmovps_ymm_ymm_m256
Mnemonic::Vscalefps,// EVEX_Vscalefps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vscalefps,// EVEX_Vscalefps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vscalefps,// EVEX_Vscalefps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vscalefpd,// EVEX_Vscalefpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vscalefpd,// EVEX_Vscalefpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vscalefpd,// EVEX_Vscalefpd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vmaskmovpd,// VEX_Vmaskmovpd_xmm_xmm_m128
Mnemonic::Vmaskmovpd,// VEX_Vmaskmovpd_ymm_ymm_m256
Mnemonic::Vscalefss,// EVEX_Vscalefss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Vscalefsd,// EVEX_Vscalefsd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Vmaskmovps,// VEX_Vmaskmovps_m128_xmm_xmm
Mnemonic::Vmaskmovps,// VEX_Vmaskmovps_m256_ymm_ymm
Mnemonic::Vmaskmovpd,// VEX_Vmaskmovpd_m128_xmm_xmm
Mnemonic::Vmaskmovpd,// VEX_Vmaskmovpd_m256_ymm_ymm
Mnemonic::Pmovzxbw,// Pmovzxbw_xmm_xmmm64
Mnemonic::Vpmovzxbw,// VEX_Vpmovzxbw_xmm_xmmm64
Mnemonic::Vpmovzxbw,// VEX_Vpmovzxbw_ymm_xmmm128
Mnemonic::Vpmovzxbw,// EVEX_Vpmovzxbw_xmm_k1z_xmmm64
Mnemonic::Vpmovzxbw,// EVEX_Vpmovzxbw_ymm_k1z_xmmm128
Mnemonic::Vpmovzxbw,// EVEX_Vpmovzxbw_zmm_k1z_ymmm256
Mnemonic::Vpmovwb,// EVEX_Vpmovwb_xmmm64_k1z_xmm
Mnemonic::Vpmovwb,// EVEX_Vpmovwb_xmmm128_k1z_ymm
Mnemonic::Vpmovwb,// EVEX_Vpmovwb_ymmm256_k1z_zmm
Mnemonic::Pmovzxbd,// Pmovzxbd_xmm_xmmm32
Mnemonic::Vpmovzxbd,// VEX_Vpmovzxbd_xmm_xmmm32
Mnemonic::Vpmovzxbd,// VEX_Vpmovzxbd_ymm_xmmm64
Mnemonic::Vpmovzxbd,// EVEX_Vpmovzxbd_xmm_k1z_xmmm32
Mnemonic::Vpmovzxbd,// EVEX_Vpmovzxbd_ymm_k1z_xmmm64
Mnemonic::Vpmovzxbd,// EVEX_Vpmovzxbd_zmm_k1z_xmmm128
Mnemonic::Vpmovdb,// EVEX_Vpmovdb_xmmm32_k1z_xmm
Mnemonic::Vpmovdb,// EVEX_Vpmovdb_xmmm64_k1z_ymm
Mnemonic::Vpmovdb,// EVEX_Vpmovdb_xmmm128_k1z_zmm
Mnemonic::Pmovzxbq,// Pmovzxbq_xmm_xmmm16
Mnemonic::Vpmovzxbq,// VEX_Vpmovzxbq_xmm_xmmm16
Mnemonic::Vpmovzxbq,// VEX_Vpmovzxbq_ymm_xmmm32
Mnemonic::Vpmovzxbq,// EVEX_Vpmovzxbq_xmm_k1z_xmmm16
Mnemonic::Vpmovzxbq,// EVEX_Vpmovzxbq_ymm_k1z_xmmm32
Mnemonic::Vpmovzxbq,// EVEX_Vpmovzxbq_zmm_k1z_xmmm64
Mnemonic::Vpmovqb,// EVEX_Vpmovqb_xmmm16_k1z_xmm
Mnemonic::Vpmovqb,// EVEX_Vpmovqb_xmmm32_k1z_ymm
Mnemonic::Vpmovqb,// EVEX_Vpmovqb_xmmm64_k1z_zmm
Mnemonic::Pmovzxwd,// Pmovzxwd_xmm_xmmm64
Mnemonic::Vpmovzxwd,// VEX_Vpmovzxwd_xmm_xmmm64
Mnemonic::Vpmovzxwd,// VEX_Vpmovzxwd_ymm_xmmm128
Mnemonic::Vpmovzxwd,// EVEX_Vpmovzxwd_xmm_k1z_xmmm64
Mnemonic::Vpmovzxwd,// EVEX_Vpmovzxwd_ymm_k1z_xmmm128
Mnemonic::Vpmovzxwd,// EVEX_Vpmovzxwd_zmm_k1z_ymmm256
Mnemonic::Vpmovdw,// EVEX_Vpmovdw_xmmm64_k1z_xmm
Mnemonic::Vpmovdw,// EVEX_Vpmovdw_xmmm128_k1z_ymm
Mnemonic::Vpmovdw,// EVEX_Vpmovdw_ymmm256_k1z_zmm
Mnemonic::Pmovzxwq,// Pmovzxwq_xmm_xmmm32
Mnemonic::Vpmovzxwq,// VEX_Vpmovzxwq_xmm_xmmm32
Mnemonic::Vpmovzxwq,// VEX_Vpmovzxwq_ymm_xmmm64
Mnemonic::Vpmovzxwq,// EVEX_Vpmovzxwq_xmm_k1z_xmmm32
Mnemonic::Vpmovzxwq,// EVEX_Vpmovzxwq_ymm_k1z_xmmm64
Mnemonic::Vpmovzxwq,// EVEX_Vpmovzxwq_zmm_k1z_xmmm128
Mnemonic::Vpmovqw,// EVEX_Vpmovqw_xmmm32_k1z_xmm
Mnemonic::Vpmovqw,// EVEX_Vpmovqw_xmmm64_k1z_ymm
Mnemonic::Vpmovqw,// EVEX_Vpmovqw_xmmm128_k1z_zmm
Mnemonic::Pmovzxdq,// Pmovzxdq_xmm_xmmm64
Mnemonic::Vpmovzxdq,// VEX_Vpmovzxdq_xmm_xmmm64
Mnemonic::Vpmovzxdq,// VEX_Vpmovzxdq_ymm_xmmm128
Mnemonic::Vpmovzxdq,// EVEX_Vpmovzxdq_xmm_k1z_xmmm64
Mnemonic::Vpmovzxdq,// EVEX_Vpmovzxdq_ymm_k1z_xmmm128
Mnemonic::Vpmovzxdq,// EVEX_Vpmovzxdq_zmm_k1z_ymmm256
Mnemonic::Vpmovqd,// EVEX_Vpmovqd_xmmm64_k1z_xmm
Mnemonic::Vpmovqd,// EVEX_Vpmovqd_xmmm128_k1z_ymm
Mnemonic::Vpmovqd,// EVEX_Vpmovqd_ymmm256_k1z_zmm
Mnemonic::Vpermd,// VEX_Vpermd_ymm_ymm_ymmm256
Mnemonic::Vpermd,// EVEX_Vpermd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpermd,// EVEX_Vpermd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpermq,// EVEX_Vpermq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpermq,// EVEX_Vpermq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Pcmpgtq,// Pcmpgtq_xmm_xmmm128
Mnemonic::Vpcmpgtq,// VEX_Vpcmpgtq_xmm_xmm_xmmm128
Mnemonic::Vpcmpgtq,// VEX_Vpcmpgtq_ymm_ymm_ymmm256
Mnemonic::Vpcmpgtq,// EVEX_Vpcmpgtq_kr_k1_xmm_xmmm128b64
Mnemonic::Vpcmpgtq,// EVEX_Vpcmpgtq_kr_k1_ymm_ymmm256b64
Mnemonic::Vpcmpgtq,// EVEX_Vpcmpgtq_kr_k1_zmm_zmmm512b64
Mnemonic::Pminsb,// Pminsb_xmm_xmmm128
Mnemonic::Vpminsb,// VEX_Vpminsb_xmm_xmm_xmmm128
Mnemonic::Vpminsb,// VEX_Vpminsb_ymm_ymm_ymmm256
Mnemonic::Vpminsb,// EVEX_Vpminsb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpminsb,// EVEX_Vpminsb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpminsb,// EVEX_Vpminsb_zmm_k1z_zmm_zmmm512
Mnemonic::Vpmovm2d,// EVEX_Vpmovm2d_xmm_kr
Mnemonic::Vpmovm2d,// EVEX_Vpmovm2d_ymm_kr
Mnemonic::Vpmovm2d,// EVEX_Vpmovm2d_zmm_kr
Mnemonic::Vpmovm2q,// EVEX_Vpmovm2q_xmm_kr
Mnemonic::Vpmovm2q,// EVEX_Vpmovm2q_ymm_kr
Mnemonic::Vpmovm2q,// EVEX_Vpmovm2q_zmm_kr
Mnemonic::Pminsd,// Pminsd_xmm_xmmm128
Mnemonic::Vpminsd,// VEX_Vpminsd_xmm_xmm_xmmm128
Mnemonic::Vpminsd,// VEX_Vpminsd_ymm_ymm_ymmm256
Mnemonic::Vpminsd,// EVEX_Vpminsd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpminsd,// EVEX_Vpminsd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpminsd,// EVEX_Vpminsd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpminsq,// EVEX_Vpminsq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpminsq,// EVEX_Vpminsq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpminsq,// EVEX_Vpminsq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vpmovd2m,// EVEX_Vpmovd2m_kr_xmm
Mnemonic::Vpmovd2m,// EVEX_Vpmovd2m_kr_ymm
Mnemonic::Vpmovd2m,// EVEX_Vpmovd2m_kr_zmm
Mnemonic::Vpmovq2m,// EVEX_Vpmovq2m_kr_xmm
Mnemonic::Vpmovq2m,// EVEX_Vpmovq2m_kr_ymm
Mnemonic::Vpmovq2m,// EVEX_Vpmovq2m_kr_zmm
Mnemonic::Pminuw,// Pminuw_xmm_xmmm128
Mnemonic::Vpminuw,// VEX_Vpminuw_xmm_xmm_xmmm128
Mnemonic::Vpminuw,// VEX_Vpminuw_ymm_ymm_ymmm256
Mnemonic::Vpminuw,// EVEX_Vpminuw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpminuw,// EVEX_Vpminuw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpminuw,// EVEX_Vpminuw_zmm_k1z_zmm_zmmm512
Mnemonic::Vpbroadcastmw2d,// EVEX_Vpbroadcastmw2d_xmm_kr
Mnemonic::Vpbroadcastmw2d,// EVEX_Vpbroadcastmw2d_ymm_kr
Mnemonic::Vpbroadcastmw2d,// EVEX_Vpbroadcastmw2d_zmm_kr
Mnemonic::Pminud,// Pminud_xmm_xmmm128
Mnemonic::Vpminud,// VEX_Vpminud_xmm_xmm_xmmm128
Mnemonic::Vpminud,// VEX_Vpminud_ymm_ymm_ymmm256
Mnemonic::Vpminud,// EVEX_Vpminud_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpminud,// EVEX_Vpminud_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpminud,// EVEX_Vpminud_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpminuq,// EVEX_Vpminuq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpminuq,// EVEX_Vpminuq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpminuq,// EVEX_Vpminuq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Pmaxsb,// Pmaxsb_xmm_xmmm128
Mnemonic::Vpmaxsb,// VEX_Vpmaxsb_xmm_xmm_xmmm128
Mnemonic::Vpmaxsb,// VEX_Vpmaxsb_ymm_ymm_ymmm256
Mnemonic::Vpmaxsb,// EVEX_Vpmaxsb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpmaxsb,// EVEX_Vpmaxsb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpmaxsb,// EVEX_Vpmaxsb_zmm_k1z_zmm_zmmm512
Mnemonic::Pmaxsd,// Pmaxsd_xmm_xmmm128
Mnemonic::Vpmaxsd,// VEX_Vpmaxsd_xmm_xmm_xmmm128
Mnemonic::Vpmaxsd,// VEX_Vpmaxsd_ymm_ymm_ymmm256
Mnemonic::Vpmaxsd,// EVEX_Vpmaxsd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpmaxsd,// EVEX_Vpmaxsd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpmaxsd,// EVEX_Vpmaxsd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpmaxsq,// EVEX_Vpmaxsq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpmaxsq,// EVEX_Vpmaxsq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpmaxsq,// EVEX_Vpmaxsq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Pmaxuw,// Pmaxuw_xmm_xmmm128
Mnemonic::Vpmaxuw,// VEX_Vpmaxuw_xmm_xmm_xmmm128
Mnemonic::Vpmaxuw,// VEX_Vpmaxuw_ymm_ymm_ymmm256
Mnemonic::Vpmaxuw,// EVEX_Vpmaxuw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpmaxuw,// EVEX_Vpmaxuw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpmaxuw,// EVEX_Vpmaxuw_zmm_k1z_zmm_zmmm512
Mnemonic::Pmaxud,// Pmaxud_xmm_xmmm128
Mnemonic::Vpmaxud,// VEX_Vpmaxud_xmm_xmm_xmmm128
Mnemonic::Vpmaxud,// VEX_Vpmaxud_ymm_ymm_ymmm256
Mnemonic::Vpmaxud,// EVEX_Vpmaxud_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpmaxud,// EVEX_Vpmaxud_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpmaxud,// EVEX_Vpmaxud_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpmaxuq,// EVEX_Vpmaxuq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpmaxuq,// EVEX_Vpmaxuq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpmaxuq,// EVEX_Vpmaxuq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Pmulld,// Pmulld_xmm_xmmm128
Mnemonic::Vpmulld,// VEX_Vpmulld_xmm_xmm_xmmm128
Mnemonic::Vpmulld,// VEX_Vpmulld_ymm_ymm_ymmm256
Mnemonic::Vpmulld,// EVEX_Vpmulld_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpmulld,// EVEX_Vpmulld_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpmulld,// EVEX_Vpmulld_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpmullq,// EVEX_Vpmullq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpmullq,// EVEX_Vpmullq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpmullq,// EVEX_Vpmullq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Phminposuw,// Phminposuw_xmm_xmmm128
Mnemonic::Vphminposuw,// VEX_Vphminposuw_xmm_xmmm128
Mnemonic::Vgetexpps,// EVEX_Vgetexpps_xmm_k1z_xmmm128b32
Mnemonic::Vgetexpps,// EVEX_Vgetexpps_ymm_k1z_ymmm256b32
Mnemonic::Vgetexpps,// EVEX_Vgetexpps_zmm_k1z_zmmm512b32_sae
Mnemonic::Vgetexppd,// EVEX_Vgetexppd_xmm_k1z_xmmm128b64
Mnemonic::Vgetexppd,// EVEX_Vgetexppd_ymm_k1z_ymmm256b64
Mnemonic::Vgetexppd,// EVEX_Vgetexppd_zmm_k1z_zmmm512b64_sae
Mnemonic::Vgetexpss,// EVEX_Vgetexpss_xmm_k1z_xmm_xmmm32_sae
Mnemonic::Vgetexpsd,// EVEX_Vgetexpsd_xmm_k1z_xmm_xmmm64_sae
Mnemonic::Vplzcntd,// EVEX_Vplzcntd_xmm_k1z_xmmm128b32
Mnemonic::Vplzcntd,// EVEX_Vplzcntd_ymm_k1z_ymmm256b32
Mnemonic::Vplzcntd,// EVEX_Vplzcntd_zmm_k1z_zmmm512b32
Mnemonic::Vplzcntq,// EVEX_Vplzcntq_xmm_k1z_xmmm128b64
Mnemonic::Vplzcntq,// EVEX_Vplzcntq_ymm_k1z_ymmm256b64
Mnemonic::Vplzcntq,// EVEX_Vplzcntq_zmm_k1z_zmmm512b64
Mnemonic::Vpsrlvd,// VEX_Vpsrlvd_xmm_xmm_xmmm128
Mnemonic::Vpsrlvd,// VEX_Vpsrlvd_ymm_ymm_ymmm256
Mnemonic::Vpsrlvq,// VEX_Vpsrlvq_xmm_xmm_xmmm128
Mnemonic::Vpsrlvq,// VEX_Vpsrlvq_ymm_ymm_ymmm256
Mnemonic::Vpsrlvd,// EVEX_Vpsrlvd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpsrlvd,// EVEX_Vpsrlvd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpsrlvd,// EVEX_Vpsrlvd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpsrlvq,// EVEX_Vpsrlvq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpsrlvq,// EVEX_Vpsrlvq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpsrlvq,// EVEX_Vpsrlvq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vpsravd,// VEX_Vpsravd_xmm_xmm_xmmm128
Mnemonic::Vpsravd,// VEX_Vpsravd_ymm_ymm_ymmm256
Mnemonic::Vpsravd,// EVEX_Vpsravd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpsravd,// EVEX_Vpsravd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpsravd,// EVEX_Vpsravd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpsravq,// EVEX_Vpsravq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpsravq,// EVEX_Vpsravq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpsravq,// EVEX_Vpsravq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vpsllvd,// VEX_Vpsllvd_xmm_xmm_xmmm128
Mnemonic::Vpsllvd,// VEX_Vpsllvd_ymm_ymm_ymmm256
Mnemonic::Vpsllvq,// VEX_Vpsllvq_xmm_xmm_xmmm128
Mnemonic::Vpsllvq,// VEX_Vpsllvq_ymm_ymm_ymmm256
Mnemonic::Vpsllvd,// EVEX_Vpsllvd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpsllvd,// EVEX_Vpsllvd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpsllvd,// EVEX_Vpsllvd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpsllvq,// EVEX_Vpsllvq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpsllvq,// EVEX_Vpsllvq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpsllvq,// EVEX_Vpsllvq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vrcp14ps,// EVEX_Vrcp14ps_xmm_k1z_xmmm128b32
Mnemonic::Vrcp14ps,// EVEX_Vrcp14ps_ymm_k1z_ymmm256b32
Mnemonic::Vrcp14ps,// EVEX_Vrcp14ps_zmm_k1z_zmmm512b32
Mnemonic::Vrcp14pd,// EVEX_Vrcp14pd_xmm_k1z_xmmm128b64
Mnemonic::Vrcp14pd,// EVEX_Vrcp14pd_ymm_k1z_ymmm256b64
Mnemonic::Vrcp14pd,// EVEX_Vrcp14pd_zmm_k1z_zmmm512b64
Mnemonic::Vrcp14ss,// EVEX_Vrcp14ss_xmm_k1z_xmm_xmmm32
Mnemonic::Vrcp14sd,// EVEX_Vrcp14sd_xmm_k1z_xmm_xmmm64
Mnemonic::Vrsqrt14ps,// EVEX_Vrsqrt14ps_xmm_k1z_xmmm128b32
Mnemonic::Vrsqrt14ps,// EVEX_Vrsqrt14ps_ymm_k1z_ymmm256b32
Mnemonic::Vrsqrt14ps,// EVEX_Vrsqrt14ps_zmm_k1z_zmmm512b32
Mnemonic::Vrsqrt14pd,// EVEX_Vrsqrt14pd_xmm_k1z_xmmm128b64
Mnemonic::Vrsqrt14pd,// EVEX_Vrsqrt14pd_ymm_k1z_ymmm256b64
Mnemonic::Vrsqrt14pd,// EVEX_Vrsqrt14pd_zmm_k1z_zmmm512b64
Mnemonic::Vrsqrt14ss,// EVEX_Vrsqrt14ss_xmm_k1z_xmm_xmmm32
Mnemonic::Vrsqrt14sd,// EVEX_Vrsqrt14sd_xmm_k1z_xmm_xmmm64
Mnemonic::Vpdpbusd,// EVEX_Vpdpbusd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpdpbusd,// EVEX_Vpdpbusd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpdpbusd,// EVEX_Vpdpbusd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpdpbusds,// EVEX_Vpdpbusds_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpdpbusds,// EVEX_Vpdpbusds_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpdpbusds,// EVEX_Vpdpbusds_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpdpwssd,// EVEX_Vpdpwssd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpdpwssd,// EVEX_Vpdpwssd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpdpwssd,// EVEX_Vpdpwssd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vdpbf16ps,// EVEX_Vdpbf16ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vdpbf16ps,// EVEX_Vdpbf16ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vdpbf16ps,// EVEX_Vdpbf16ps_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vp4dpwssd,// EVEX_Vp4dpwssd_zmm_k1z_zmmp3_m128
Mnemonic::Vpdpwssds,// EVEX_Vpdpwssds_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpdpwssds,// EVEX_Vpdpwssds_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpdpwssds,// EVEX_Vpdpwssds_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vp4dpwssds,// EVEX_Vp4dpwssds_zmm_k1z_zmmp3_m128
Mnemonic::Vpopcntb,// EVEX_Vpopcntb_xmm_k1z_xmmm128
Mnemonic::Vpopcntb,// EVEX_Vpopcntb_ymm_k1z_ymmm256
Mnemonic::Vpopcntb,// EVEX_Vpopcntb_zmm_k1z_zmmm512
Mnemonic::Vpopcntw,// EVEX_Vpopcntw_xmm_k1z_xmmm128
Mnemonic::Vpopcntw,// EVEX_Vpopcntw_ymm_k1z_ymmm256
Mnemonic::Vpopcntw,// EVEX_Vpopcntw_zmm_k1z_zmmm512
Mnemonic::Vpopcntd,// EVEX_Vpopcntd_xmm_k1z_xmmm128b32
Mnemonic::Vpopcntd,// EVEX_Vpopcntd_ymm_k1z_ymmm256b32
Mnemonic::Vpopcntd,// EVEX_Vpopcntd_zmm_k1z_zmmm512b32
Mnemonic::Vpopcntq,// EVEX_Vpopcntq_xmm_k1z_xmmm128b64
Mnemonic::Vpopcntq,// EVEX_Vpopcntq_ymm_k1z_ymmm256b64
Mnemonic::Vpopcntq,// EVEX_Vpopcntq_zmm_k1z_zmmm512b64
Mnemonic::Vpbroadcastd,// VEX_Vpbroadcastd_xmm_xmmm32
Mnemonic::Vpbroadcastd,// VEX_Vpbroadcastd_ymm_xmmm32
Mnemonic::Vpbroadcastd,// EVEX_Vpbroadcastd_xmm_k1z_xmmm32
Mnemonic::Vpbroadcastd,// EVEX_Vpbroadcastd_ymm_k1z_xmmm32
Mnemonic::Vpbroadcastd,// EVEX_Vpbroadcastd_zmm_k1z_xmmm32
Mnemonic::Vpbroadcastq,// VEX_Vpbroadcastq_xmm_xmmm64
Mnemonic::Vpbroadcastq,// VEX_Vpbroadcastq_ymm_xmmm64
Mnemonic::Vbroadcasti32x2,// EVEX_Vbroadcasti32x2_xmm_k1z_xmmm64
Mnemonic::Vbroadcasti32x2,// EVEX_Vbroadcasti32x2_ymm_k1z_xmmm64
Mnemonic::Vbroadcasti32x2,// EVEX_Vbroadcasti32x2_zmm_k1z_xmmm64
Mnemonic::Vpbroadcastq,// EVEX_Vpbroadcastq_xmm_k1z_xmmm64
Mnemonic::Vpbroadcastq,// EVEX_Vpbroadcastq_ymm_k1z_xmmm64
Mnemonic::Vpbroadcastq,// EVEX_Vpbroadcastq_zmm_k1z_xmmm64
Mnemonic::Vbroadcasti128,// VEX_Vbroadcasti128_ymm_m128
Mnemonic::Vbroadcasti32x4,// EVEX_Vbroadcasti32x4_ymm_k1z_m128
Mnemonic::Vbroadcasti32x4,// EVEX_Vbroadcasti32x4_zmm_k1z_m128
Mnemonic::Vbroadcasti64x2,// EVEX_Vbroadcasti64x2_ymm_k1z_m128
Mnemonic::Vbroadcasti64x2,// EVEX_Vbroadcasti64x2_zmm_k1z_m128
Mnemonic::Vbroadcasti32x8,// EVEX_Vbroadcasti32x8_zmm_k1z_m256
Mnemonic::Vbroadcasti64x4,// EVEX_Vbroadcasti64x4_zmm_k1z_m256
Mnemonic::Vpexpandb,// EVEX_Vpexpandb_xmm_k1z_xmmm128
Mnemonic::Vpexpandb,// EVEX_Vpexpandb_ymm_k1z_ymmm256
Mnemonic::Vpexpandb,// EVEX_Vpexpandb_zmm_k1z_zmmm512
Mnemonic::Vpexpandw,// EVEX_Vpexpandw_xmm_k1z_xmmm128
Mnemonic::Vpexpandw,// EVEX_Vpexpandw_ymm_k1z_ymmm256
Mnemonic::Vpexpandw,// EVEX_Vpexpandw_zmm_k1z_zmmm512
Mnemonic::Vpcompressb,// EVEX_Vpcompressb_xmmm128_k1z_xmm
Mnemonic::Vpcompressb,// EVEX_Vpcompressb_ymmm256_k1z_ymm
Mnemonic::Vpcompressb,// EVEX_Vpcompressb_zmmm512_k1z_zmm
Mnemonic::Vpcompressw,// EVEX_Vpcompressw_xmmm128_k1z_xmm
Mnemonic::Vpcompressw,// EVEX_Vpcompressw_ymmm256_k1z_ymm
Mnemonic::Vpcompressw,// EVEX_Vpcompressw_zmmm512_k1z_zmm
Mnemonic::Vpblendmd,// EVEX_Vpblendmd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpblendmd,// EVEX_Vpblendmd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpblendmd,// EVEX_Vpblendmd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpblendmq,// EVEX_Vpblendmq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpblendmq,// EVEX_Vpblendmq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpblendmq,// EVEX_Vpblendmq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vblendmps,// EVEX_Vblendmps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vblendmps,// EVEX_Vblendmps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vblendmps,// EVEX_Vblendmps_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vblendmpd,// EVEX_Vblendmpd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vblendmpd,// EVEX_Vblendmpd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vblendmpd,// EVEX_Vblendmpd_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vpblendmb,// EVEX_Vpblendmb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpblendmb,// EVEX_Vpblendmb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpblendmb,// EVEX_Vpblendmb_zmm_k1z_zmm_zmmm512
Mnemonic::Vpblendmw,// EVEX_Vpblendmw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpblendmw,// EVEX_Vpblendmw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpblendmw,// EVEX_Vpblendmw_zmm_k1z_zmm_zmmm512
Mnemonic::Vp2intersectd,// EVEX_Vp2intersectd_kp1_xmm_xmmm128b32
Mnemonic::Vp2intersectd,// EVEX_Vp2intersectd_kp1_ymm_ymmm256b32
Mnemonic::Vp2intersectd,// EVEX_Vp2intersectd_kp1_zmm_zmmm512b32
Mnemonic::Vp2intersectq,// EVEX_Vp2intersectq_kp1_xmm_xmmm128b64
Mnemonic::Vp2intersectq,// EVEX_Vp2intersectq_kp1_ymm_ymmm256b64
Mnemonic::Vp2intersectq,// EVEX_Vp2intersectq_kp1_zmm_zmmm512b64
Mnemonic::Vpshldvw,// EVEX_Vpshldvw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpshldvw,// EVEX_Vpshldvw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpshldvw,// EVEX_Vpshldvw_zmm_k1z_zmm_zmmm512
Mnemonic::Vpshldvd,// EVEX_Vpshldvd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpshldvd,// EVEX_Vpshldvd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpshldvd,// EVEX_Vpshldvd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpshldvq,// EVEX_Vpshldvq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpshldvq,// EVEX_Vpshldvq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpshldvq,// EVEX_Vpshldvq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vpshrdvw,// EVEX_Vpshrdvw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpshrdvw,// EVEX_Vpshrdvw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpshrdvw,// EVEX_Vpshrdvw_zmm_k1z_zmm_zmmm512
Mnemonic::Vcvtneps2bf16,// EVEX_Vcvtneps2bf16_xmm_k1z_xmmm128b32
Mnemonic::Vcvtneps2bf16,// EVEX_Vcvtneps2bf16_xmm_k1z_ymmm256b32
Mnemonic::Vcvtneps2bf16,// EVEX_Vcvtneps2bf16_ymm_k1z_zmmm512b32
Mnemonic::Vcvtne2ps2bf16,// EVEX_Vcvtne2ps2bf16_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vcvtne2ps2bf16,// EVEX_Vcvtne2ps2bf16_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vcvtne2ps2bf16,// EVEX_Vcvtne2ps2bf16_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpshrdvd,// EVEX_Vpshrdvd_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpshrdvd,// EVEX_Vpshrdvd_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpshrdvd,// EVEX_Vpshrdvd_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpshrdvq,// EVEX_Vpshrdvq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpshrdvq,// EVEX_Vpshrdvq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpshrdvq,// EVEX_Vpshrdvq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vpermi2b,// EVEX_Vpermi2b_xmm_k1z_xmm_xmmm128
Mnemonic::Vpermi2b,// EVEX_Vpermi2b_ymm_k1z_ymm_ymmm256
Mnemonic::Vpermi2b,// EVEX_Vpermi2b_zmm_k1z_zmm_zmmm512
Mnemonic::Vpermi2w,// EVEX_Vpermi2w_xmm_k1z_xmm_xmmm128
Mnemonic::Vpermi2w,// EVEX_Vpermi2w_ymm_k1z_ymm_ymmm256
Mnemonic::Vpermi2w,// EVEX_Vpermi2w_zmm_k1z_zmm_zmmm512
Mnemonic::Vpermi2d,// EVEX_Vpermi2d_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpermi2d,// EVEX_Vpermi2d_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpermi2d,// EVEX_Vpermi2d_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpermi2q,// EVEX_Vpermi2q_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpermi2q,// EVEX_Vpermi2q_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpermi2q,// EVEX_Vpermi2q_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vpermi2ps,// EVEX_Vpermi2ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpermi2ps,// EVEX_Vpermi2ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpermi2ps,// EVEX_Vpermi2ps_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpermi2pd,// EVEX_Vpermi2pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpermi2pd,// EVEX_Vpermi2pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpermi2pd,// EVEX_Vpermi2pd_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vpbroadcastb,// VEX_Vpbroadcastb_xmm_xmmm8
Mnemonic::Vpbroadcastb,// VEX_Vpbroadcastb_ymm_xmmm8
Mnemonic::Vpbroadcastb,// EVEX_Vpbroadcastb_xmm_k1z_xmmm8
Mnemonic::Vpbroadcastb,// EVEX_Vpbroadcastb_ymm_k1z_xmmm8
Mnemonic::Vpbroadcastb,// EVEX_Vpbroadcastb_zmm_k1z_xmmm8
Mnemonic::Vpbroadcastw,// VEX_Vpbroadcastw_xmm_xmmm16
Mnemonic::Vpbroadcastw,// VEX_Vpbroadcastw_ymm_xmmm16
Mnemonic::Vpbroadcastw,// EVEX_Vpbroadcastw_xmm_k1z_xmmm16
Mnemonic::Vpbroadcastw,// EVEX_Vpbroadcastw_ymm_k1z_xmmm16
Mnemonic::Vpbroadcastw,// EVEX_Vpbroadcastw_zmm_k1z_xmmm16
Mnemonic::Vpbroadcastb,// EVEX_Vpbroadcastb_xmm_k1z_r32
Mnemonic::Vpbroadcastb,// EVEX_Vpbroadcastb_ymm_k1z_r32
Mnemonic::Vpbroadcastb,// EVEX_Vpbroadcastb_zmm_k1z_r32
Mnemonic::Vpbroadcastw,// EVEX_Vpbroadcastw_xmm_k1z_r32
Mnemonic::Vpbroadcastw,// EVEX_Vpbroadcastw_ymm_k1z_r32
Mnemonic::Vpbroadcastw,// EVEX_Vpbroadcastw_zmm_k1z_r32
Mnemonic::Vpbroadcastd,// EVEX_Vpbroadcastd_xmm_k1z_r32
Mnemonic::Vpbroadcastd,// EVEX_Vpbroadcastd_ymm_k1z_r32
Mnemonic::Vpbroadcastd,// EVEX_Vpbroadcastd_zmm_k1z_r32
Mnemonic::Vpbroadcastq,// EVEX_Vpbroadcastq_xmm_k1z_r64
Mnemonic::Vpbroadcastq,// EVEX_Vpbroadcastq_ymm_k1z_r64
Mnemonic::Vpbroadcastq,// EVEX_Vpbroadcastq_zmm_k1z_r64
Mnemonic::Vpermt2b,// EVEX_Vpermt2b_xmm_k1z_xmm_xmmm128
Mnemonic::Vpermt2b,// EVEX_Vpermt2b_ymm_k1z_ymm_ymmm256
Mnemonic::Vpermt2b,// EVEX_Vpermt2b_zmm_k1z_zmm_zmmm512
Mnemonic::Vpermt2w,// EVEX_Vpermt2w_xmm_k1z_xmm_xmmm128
Mnemonic::Vpermt2w,// EVEX_Vpermt2w_ymm_k1z_ymm_ymmm256
Mnemonic::Vpermt2w,// EVEX_Vpermt2w_zmm_k1z_zmm_zmmm512
Mnemonic::Vpermt2d,// EVEX_Vpermt2d_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpermt2d,// EVEX_Vpermt2d_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpermt2d,// EVEX_Vpermt2d_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpermt2q,// EVEX_Vpermt2q_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpermt2q,// EVEX_Vpermt2q_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpermt2q,// EVEX_Vpermt2q_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vpermt2ps,// EVEX_Vpermt2ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vpermt2ps,// EVEX_Vpermt2ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vpermt2ps,// EVEX_Vpermt2ps_zmm_k1z_zmm_zmmm512b32
Mnemonic::Vpermt2pd,// EVEX_Vpermt2pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpermt2pd,// EVEX_Vpermt2pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpermt2pd,// EVEX_Vpermt2pd_zmm_k1z_zmm_zmmm512b64
Mnemonic::Invept,// Invept_r32_m128
Mnemonic::Invept,// Invept_r64_m128
Mnemonic::Invvpid,// Invvpid_r32_m128
Mnemonic::Invvpid,// Invvpid_r64_m128
Mnemonic::Invpcid,// Invpcid_r32_m128
Mnemonic::Invpcid,// Invpcid_r64_m128
Mnemonic::Vpmultishiftqb,// EVEX_Vpmultishiftqb_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpmultishiftqb,// EVEX_Vpmultishiftqb_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpmultishiftqb,// EVEX_Vpmultishiftqb_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vexpandps,// EVEX_Vexpandps_xmm_k1z_xmmm128
Mnemonic::Vexpandps,// EVEX_Vexpandps_ymm_k1z_ymmm256
Mnemonic::Vexpandps,// EVEX_Vexpandps_zmm_k1z_zmmm512
Mnemonic::Vexpandpd,// EVEX_Vexpandpd_xmm_k1z_xmmm128
Mnemonic::Vexpandpd,// EVEX_Vexpandpd_ymm_k1z_ymmm256
Mnemonic::Vexpandpd,// EVEX_Vexpandpd_zmm_k1z_zmmm512
Mnemonic::Vpexpandd,// EVEX_Vpexpandd_xmm_k1z_xmmm128
Mnemonic::Vpexpandd,// EVEX_Vpexpandd_ymm_k1z_ymmm256
Mnemonic::Vpexpandd,// EVEX_Vpexpandd_zmm_k1z_zmmm512
Mnemonic::Vpexpandq,// EVEX_Vpexpandq_xmm_k1z_xmmm128
Mnemonic::Vpexpandq,// EVEX_Vpexpandq_ymm_k1z_ymmm256
Mnemonic::Vpexpandq,// EVEX_Vpexpandq_zmm_k1z_zmmm512
Mnemonic::Vcompressps,// EVEX_Vcompressps_xmmm128_k1z_xmm
Mnemonic::Vcompressps,// EVEX_Vcompressps_ymmm256_k1z_ymm
Mnemonic::Vcompressps,// EVEX_Vcompressps_zmmm512_k1z_zmm
Mnemonic::Vcompresspd,// EVEX_Vcompresspd_xmmm128_k1z_xmm
Mnemonic::Vcompresspd,// EVEX_Vcompresspd_ymmm256_k1z_ymm
Mnemonic::Vcompresspd,// EVEX_Vcompresspd_zmmm512_k1z_zmm
Mnemonic::Vpcompressd,// EVEX_Vpcompressd_xmmm128_k1z_xmm
Mnemonic::Vpcompressd,// EVEX_Vpcompressd_ymmm256_k1z_ymm
Mnemonic::Vpcompressd,// EVEX_Vpcompressd_zmmm512_k1z_zmm
Mnemonic::Vpcompressq,// EVEX_Vpcompressq_xmmm128_k1z_xmm
Mnemonic::Vpcompressq,// EVEX_Vpcompressq_ymmm256_k1z_ymm
Mnemonic::Vpcompressq,// EVEX_Vpcompressq_zmmm512_k1z_zmm
Mnemonic::Vpmaskmovd,// VEX_Vpmaskmovd_xmm_xmm_m128
Mnemonic::Vpmaskmovd,// VEX_Vpmaskmovd_ymm_ymm_m256
Mnemonic::Vpmaskmovq,// VEX_Vpmaskmovq_xmm_xmm_m128
Mnemonic::Vpmaskmovq,// VEX_Vpmaskmovq_ymm_ymm_m256
Mnemonic::Vpermb,// EVEX_Vpermb_xmm_k1z_xmm_xmmm128
Mnemonic::Vpermb,// EVEX_Vpermb_ymm_k1z_ymm_ymmm256
Mnemonic::Vpermb,// EVEX_Vpermb_zmm_k1z_zmm_zmmm512
Mnemonic::Vpermw,// EVEX_Vpermw_xmm_k1z_xmm_xmmm128
Mnemonic::Vpermw,// EVEX_Vpermw_ymm_k1z_ymm_ymmm256
Mnemonic::Vpermw,// EVEX_Vpermw_zmm_k1z_zmm_zmmm512
Mnemonic::Vpmaskmovd,// VEX_Vpmaskmovd_m128_xmm_xmm
Mnemonic::Vpmaskmovd,// VEX_Vpmaskmovd_m256_ymm_ymm
Mnemonic::Vpmaskmovq,// VEX_Vpmaskmovq_m128_xmm_xmm
Mnemonic::Vpmaskmovq,// VEX_Vpmaskmovq_m256_ymm_ymm
Mnemonic::Vpshufbitqmb,// EVEX_Vpshufbitqmb_kr_k1_xmm_xmmm128
Mnemonic::Vpshufbitqmb,// EVEX_Vpshufbitqmb_kr_k1_ymm_ymmm256
Mnemonic::Vpshufbitqmb,// EVEX_Vpshufbitqmb_kr_k1_zmm_zmmm512
Mnemonic::Vpgatherdd,// VEX_Vpgatherdd_xmm_vm32x_xmm
Mnemonic::Vpgatherdd,// VEX_Vpgatherdd_ymm_vm32y_ymm
Mnemonic::Vpgatherdq,// VEX_Vpgatherdq_xmm_vm32x_xmm
Mnemonic::Vpgatherdq,// VEX_Vpgatherdq_ymm_vm32x_ymm
Mnemonic::Vpgatherdd,// EVEX_Vpgatherdd_xmm_k1_vm32x
Mnemonic::Vpgatherdd,// EVEX_Vpgatherdd_ymm_k1_vm32y
Mnemonic::Vpgatherdd,// EVEX_Vpgatherdd_zmm_k1_vm32z
Mnemonic::Vpgatherdq,// EVEX_Vpgatherdq_xmm_k1_vm32x
Mnemonic::Vpgatherdq,// EVEX_Vpgatherdq_ymm_k1_vm32x
Mnemonic::Vpgatherdq,// EVEX_Vpgatherdq_zmm_k1_vm32y
Mnemonic::Vpgatherqd,// VEX_Vpgatherqd_xmm_vm64x_xmm
Mnemonic::Vpgatherqd,// VEX_Vpgatherqd_xmm_vm64y_xmm
Mnemonic::Vpgatherqq,// VEX_Vpgatherqq_xmm_vm64x_xmm
Mnemonic::Vpgatherqq,// VEX_Vpgatherqq_ymm_vm64y_ymm
Mnemonic::Vpgatherqd,// EVEX_Vpgatherqd_xmm_k1_vm64x
Mnemonic::Vpgatherqd,// EVEX_Vpgatherqd_xmm_k1_vm64y
Mnemonic::Vpgatherqd,// EVEX_Vpgatherqd_ymm_k1_vm64z
Mnemonic::Vpgatherqq,// EVEX_Vpgatherqq_xmm_k1_vm64x
Mnemonic::Vpgatherqq,// EVEX_Vpgatherqq_ymm_k1_vm64y
Mnemonic::Vpgatherqq,// EVEX_Vpgatherqq_zmm_k1_vm64z
Mnemonic::Vgatherdps,// VEX_Vgatherdps_xmm_vm32x_xmm
Mnemonic::Vgatherdps,// VEX_Vgatherdps_ymm_vm32y_ymm
Mnemonic::Vgatherdpd,// VEX_Vgatherdpd_xmm_vm32x_xmm
Mnemonic::Vgatherdpd,// VEX_Vgatherdpd_ymm_vm32x_ymm
Mnemonic::Vgatherdps,// EVEX_Vgatherdps_xmm_k1_vm32x
Mnemonic::Vgatherdps,// EVEX_Vgatherdps_ymm_k1_vm32y
Mnemonic::Vgatherdps,// EVEX_Vgatherdps_zmm_k1_vm32z
Mnemonic::Vgatherdpd,// EVEX_Vgatherdpd_xmm_k1_vm32x
Mnemonic::Vgatherdpd,// EVEX_Vgatherdpd_ymm_k1_vm32x
Mnemonic::Vgatherdpd,// EVEX_Vgatherdpd_zmm_k1_vm32y
Mnemonic::Vgatherqps,// VEX_Vgatherqps_xmm_vm64x_xmm
Mnemonic::Vgatherqps,// VEX_Vgatherqps_xmm_vm64y_xmm
Mnemonic::Vgatherqpd,// VEX_Vgatherqpd_xmm_vm64x_xmm
Mnemonic::Vgatherqpd,// VEX_Vgatherqpd_ymm_vm64y_ymm
Mnemonic::Vgatherqps,// EVEX_Vgatherqps_xmm_k1_vm64x
Mnemonic::Vgatherqps,// EVEX_Vgatherqps_xmm_k1_vm64y
Mnemonic::Vgatherqps,// EVEX_Vgatherqps_ymm_k1_vm64z
Mnemonic::Vgatherqpd,// EVEX_Vgatherqpd_xmm_k1_vm64x
Mnemonic::Vgatherqpd,// EVEX_Vgatherqpd_ymm_k1_vm64y
Mnemonic::Vgatherqpd,// EVEX_Vgatherqpd_zmm_k1_vm64z
Mnemonic::Vfmaddsub132ps,// VEX_Vfmaddsub132ps_xmm_xmm_xmmm128
Mnemonic::Vfmaddsub132ps,// VEX_Vfmaddsub132ps_ymm_ymm_ymmm256
Mnemonic::Vfmaddsub132pd,// VEX_Vfmaddsub132pd_xmm_xmm_xmmm128
Mnemonic::Vfmaddsub132pd,// VEX_Vfmaddsub132pd_ymm_ymm_ymmm256
Mnemonic::Vfmaddsub132ps,// EVEX_Vfmaddsub132ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfmaddsub132ps,// EVEX_Vfmaddsub132ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfmaddsub132ps,// EVEX_Vfmaddsub132ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfmaddsub132pd,// EVEX_Vfmaddsub132pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfmaddsub132pd,// EVEX_Vfmaddsub132pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfmaddsub132pd,// EVEX_Vfmaddsub132pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfmsubadd132ps,// VEX_Vfmsubadd132ps_xmm_xmm_xmmm128
Mnemonic::Vfmsubadd132ps,// VEX_Vfmsubadd132ps_ymm_ymm_ymmm256
Mnemonic::Vfmsubadd132pd,// VEX_Vfmsubadd132pd_xmm_xmm_xmmm128
Mnemonic::Vfmsubadd132pd,// VEX_Vfmsubadd132pd_ymm_ymm_ymmm256
Mnemonic::Vfmsubadd132ps,// EVEX_Vfmsubadd132ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfmsubadd132ps,// EVEX_Vfmsubadd132ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfmsubadd132ps,// EVEX_Vfmsubadd132ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfmsubadd132pd,// EVEX_Vfmsubadd132pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfmsubadd132pd,// EVEX_Vfmsubadd132pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfmsubadd132pd,// EVEX_Vfmsubadd132pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfmadd132ps,// VEX_Vfmadd132ps_xmm_xmm_xmmm128
Mnemonic::Vfmadd132ps,// VEX_Vfmadd132ps_ymm_ymm_ymmm256
Mnemonic::Vfmadd132pd,// VEX_Vfmadd132pd_xmm_xmm_xmmm128
Mnemonic::Vfmadd132pd,// VEX_Vfmadd132pd_ymm_ymm_ymmm256
Mnemonic::Vfmadd132ps,// EVEX_Vfmadd132ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfmadd132ps,// EVEX_Vfmadd132ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfmadd132ps,// EVEX_Vfmadd132ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfmadd132pd,// EVEX_Vfmadd132pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfmadd132pd,// EVEX_Vfmadd132pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfmadd132pd,// EVEX_Vfmadd132pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfmadd132ss,// VEX_Vfmadd132ss_xmm_xmm_xmmm32
Mnemonic::Vfmadd132sd,// VEX_Vfmadd132sd_xmm_xmm_xmmm64
Mnemonic::Vfmadd132ss,// EVEX_Vfmadd132ss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Vfmadd132sd,// EVEX_Vfmadd132sd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Vfmsub132ps,// VEX_Vfmsub132ps_xmm_xmm_xmmm128
Mnemonic::Vfmsub132ps,// VEX_Vfmsub132ps_ymm_ymm_ymmm256
Mnemonic::Vfmsub132pd,// VEX_Vfmsub132pd_xmm_xmm_xmmm128
Mnemonic::Vfmsub132pd,// VEX_Vfmsub132pd_ymm_ymm_ymmm256
Mnemonic::Vfmsub132ps,// EVEX_Vfmsub132ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfmsub132ps,// EVEX_Vfmsub132ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfmsub132ps,// EVEX_Vfmsub132ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfmsub132pd,// EVEX_Vfmsub132pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfmsub132pd,// EVEX_Vfmsub132pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfmsub132pd,// EVEX_Vfmsub132pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::V4fmaddps,// EVEX_V4fmaddps_zmm_k1z_zmmp3_m128
Mnemonic::Vfmsub132ss,// VEX_Vfmsub132ss_xmm_xmm_xmmm32
Mnemonic::Vfmsub132sd,// VEX_Vfmsub132sd_xmm_xmm_xmmm64
Mnemonic::Vfmsub132ss,// EVEX_Vfmsub132ss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Vfmsub132sd,// EVEX_Vfmsub132sd_xmm_k1z_xmm_xmmm64_er
Mnemonic::V4fmaddss,// EVEX_V4fmaddss_xmm_k1z_xmmp3_m128
Mnemonic::Vfnmadd132ps,// VEX_Vfnmadd132ps_xmm_xmm_xmmm128
Mnemonic::Vfnmadd132ps,// VEX_Vfnmadd132ps_ymm_ymm_ymmm256
Mnemonic::Vfnmadd132pd,// VEX_Vfnmadd132pd_xmm_xmm_xmmm128
Mnemonic::Vfnmadd132pd,// VEX_Vfnmadd132pd_ymm_ymm_ymmm256
Mnemonic::Vfnmadd132ps,// EVEX_Vfnmadd132ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfnmadd132ps,// EVEX_Vfnmadd132ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfnmadd132ps,// EVEX_Vfnmadd132ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfnmadd132pd,// EVEX_Vfnmadd132pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfnmadd132pd,// EVEX_Vfnmadd132pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfnmadd132pd,// EVEX_Vfnmadd132pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfnmadd132ss,// VEX_Vfnmadd132ss_xmm_xmm_xmmm32
Mnemonic::Vfnmadd132sd,// VEX_Vfnmadd132sd_xmm_xmm_xmmm64
Mnemonic::Vfnmadd132ss,// EVEX_Vfnmadd132ss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Vfnmadd132sd,// EVEX_Vfnmadd132sd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Vfnmsub132ps,// VEX_Vfnmsub132ps_xmm_xmm_xmmm128
Mnemonic::Vfnmsub132ps,// VEX_Vfnmsub132ps_ymm_ymm_ymmm256
Mnemonic::Vfnmsub132pd,// VEX_Vfnmsub132pd_xmm_xmm_xmmm128
Mnemonic::Vfnmsub132pd,// VEX_Vfnmsub132pd_ymm_ymm_ymmm256
Mnemonic::Vfnmsub132ps,// EVEX_Vfnmsub132ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfnmsub132ps,// EVEX_Vfnmsub132ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfnmsub132ps,// EVEX_Vfnmsub132ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfnmsub132pd,// EVEX_Vfnmsub132pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfnmsub132pd,// EVEX_Vfnmsub132pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfnmsub132pd,// EVEX_Vfnmsub132pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfnmsub132ss,// VEX_Vfnmsub132ss_xmm_xmm_xmmm32
Mnemonic::Vfnmsub132sd,// VEX_Vfnmsub132sd_xmm_xmm_xmmm64
Mnemonic::Vfnmsub132ss,// EVEX_Vfnmsub132ss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Vfnmsub132sd,// EVEX_Vfnmsub132sd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Vpscatterdd,// EVEX_Vpscatterdd_vm32x_k1_xmm
Mnemonic::Vpscatterdd,// EVEX_Vpscatterdd_vm32y_k1_ymm
Mnemonic::Vpscatterdd,// EVEX_Vpscatterdd_vm32z_k1_zmm
Mnemonic::Vpscatterdq,// EVEX_Vpscatterdq_vm32x_k1_xmm
Mnemonic::Vpscatterdq,// EVEX_Vpscatterdq_vm32x_k1_ymm
Mnemonic::Vpscatterdq,// EVEX_Vpscatterdq_vm32y_k1_zmm
Mnemonic::Vpscatterqd,// EVEX_Vpscatterqd_vm64x_k1_xmm
Mnemonic::Vpscatterqd,// EVEX_Vpscatterqd_vm64y_k1_xmm
Mnemonic::Vpscatterqd,// EVEX_Vpscatterqd_vm64z_k1_ymm
Mnemonic::Vpscatterqq,// EVEX_Vpscatterqq_vm64x_k1_xmm
Mnemonic::Vpscatterqq,// EVEX_Vpscatterqq_vm64y_k1_ymm
Mnemonic::Vpscatterqq,// EVEX_Vpscatterqq_vm64z_k1_zmm
Mnemonic::Vscatterdps,// EVEX_Vscatterdps_vm32x_k1_xmm
Mnemonic::Vscatterdps,// EVEX_Vscatterdps_vm32y_k1_ymm
Mnemonic::Vscatterdps,// EVEX_Vscatterdps_vm32z_k1_zmm
Mnemonic::Vscatterdpd,// EVEX_Vscatterdpd_vm32x_k1_xmm
Mnemonic::Vscatterdpd,// EVEX_Vscatterdpd_vm32x_k1_ymm
Mnemonic::Vscatterdpd,// EVEX_Vscatterdpd_vm32y_k1_zmm
Mnemonic::Vscatterqps,// EVEX_Vscatterqps_vm64x_k1_xmm
Mnemonic::Vscatterqps,// EVEX_Vscatterqps_vm64y_k1_xmm
Mnemonic::Vscatterqps,// EVEX_Vscatterqps_vm64z_k1_ymm
Mnemonic::Vscatterqpd,// EVEX_Vscatterqpd_vm64x_k1_xmm
Mnemonic::Vscatterqpd,// EVEX_Vscatterqpd_vm64y_k1_ymm
Mnemonic::Vscatterqpd,// EVEX_Vscatterqpd_vm64z_k1_zmm
Mnemonic::Vfmaddsub213ps,// VEX_Vfmaddsub213ps_xmm_xmm_xmmm128
Mnemonic::Vfmaddsub213ps,// VEX_Vfmaddsub213ps_ymm_ymm_ymmm256
Mnemonic::Vfmaddsub213pd,// VEX_Vfmaddsub213pd_xmm_xmm_xmmm128
Mnemonic::Vfmaddsub213pd,// VEX_Vfmaddsub213pd_ymm_ymm_ymmm256
Mnemonic::Vfmaddsub213ps,// EVEX_Vfmaddsub213ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfmaddsub213ps,// EVEX_Vfmaddsub213ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfmaddsub213ps,// EVEX_Vfmaddsub213ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfmaddsub213pd,// EVEX_Vfmaddsub213pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfmaddsub213pd,// EVEX_Vfmaddsub213pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfmaddsub213pd,// EVEX_Vfmaddsub213pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfmsubadd213ps,// VEX_Vfmsubadd213ps_xmm_xmm_xmmm128
Mnemonic::Vfmsubadd213ps,// VEX_Vfmsubadd213ps_ymm_ymm_ymmm256
Mnemonic::Vfmsubadd213pd,// VEX_Vfmsubadd213pd_xmm_xmm_xmmm128
Mnemonic::Vfmsubadd213pd,// VEX_Vfmsubadd213pd_ymm_ymm_ymmm256
Mnemonic::Vfmsubadd213ps,// EVEX_Vfmsubadd213ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfmsubadd213ps,// EVEX_Vfmsubadd213ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfmsubadd213ps,// EVEX_Vfmsubadd213ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfmsubadd213pd,// EVEX_Vfmsubadd213pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfmsubadd213pd,// EVEX_Vfmsubadd213pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfmsubadd213pd,// EVEX_Vfmsubadd213pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfmadd213ps,// VEX_Vfmadd213ps_xmm_xmm_xmmm128
Mnemonic::Vfmadd213ps,// VEX_Vfmadd213ps_ymm_ymm_ymmm256
Mnemonic::Vfmadd213pd,// VEX_Vfmadd213pd_xmm_xmm_xmmm128
Mnemonic::Vfmadd213pd,// VEX_Vfmadd213pd_ymm_ymm_ymmm256
Mnemonic::Vfmadd213ps,// EVEX_Vfmadd213ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfmadd213ps,// EVEX_Vfmadd213ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfmadd213ps,// EVEX_Vfmadd213ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfmadd213pd,// EVEX_Vfmadd213pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfmadd213pd,// EVEX_Vfmadd213pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfmadd213pd,// EVEX_Vfmadd213pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfmadd213ss,// VEX_Vfmadd213ss_xmm_xmm_xmmm32
Mnemonic::Vfmadd213sd,// VEX_Vfmadd213sd_xmm_xmm_xmmm64
Mnemonic::Vfmadd213ss,// EVEX_Vfmadd213ss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Vfmadd213sd,// EVEX_Vfmadd213sd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Vfmsub213ps,// VEX_Vfmsub213ps_xmm_xmm_xmmm128
Mnemonic::Vfmsub213ps,// VEX_Vfmsub213ps_ymm_ymm_ymmm256
Mnemonic::Vfmsub213pd,// VEX_Vfmsub213pd_xmm_xmm_xmmm128
Mnemonic::Vfmsub213pd,// VEX_Vfmsub213pd_ymm_ymm_ymmm256
Mnemonic::Vfmsub213ps,// EVEX_Vfmsub213ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfmsub213ps,// EVEX_Vfmsub213ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfmsub213ps,// EVEX_Vfmsub213ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfmsub213pd,// EVEX_Vfmsub213pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfmsub213pd,// EVEX_Vfmsub213pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfmsub213pd,// EVEX_Vfmsub213pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::V4fnmaddps,// EVEX_V4fnmaddps_zmm_k1z_zmmp3_m128
Mnemonic::Vfmsub213ss,// VEX_Vfmsub213ss_xmm_xmm_xmmm32
Mnemonic::Vfmsub213sd,// VEX_Vfmsub213sd_xmm_xmm_xmmm64
Mnemonic::Vfmsub213ss,// EVEX_Vfmsub213ss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Vfmsub213sd,// EVEX_Vfmsub213sd_xmm_k1z_xmm_xmmm64_er
Mnemonic::V4fnmaddss,// EVEX_V4fnmaddss_xmm_k1z_xmmp3_m128
Mnemonic::Vfnmadd213ps,// VEX_Vfnmadd213ps_xmm_xmm_xmmm128
Mnemonic::Vfnmadd213ps,// VEX_Vfnmadd213ps_ymm_ymm_ymmm256
Mnemonic::Vfnmadd213pd,// VEX_Vfnmadd213pd_xmm_xmm_xmmm128
Mnemonic::Vfnmadd213pd,// VEX_Vfnmadd213pd_ymm_ymm_ymmm256
Mnemonic::Vfnmadd213ps,// EVEX_Vfnmadd213ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfnmadd213ps,// EVEX_Vfnmadd213ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfnmadd213ps,// EVEX_Vfnmadd213ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfnmadd213pd,// EVEX_Vfnmadd213pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfnmadd213pd,// EVEX_Vfnmadd213pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfnmadd213pd,// EVEX_Vfnmadd213pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfnmadd213ss,// VEX_Vfnmadd213ss_xmm_xmm_xmmm32
Mnemonic::Vfnmadd213sd,// VEX_Vfnmadd213sd_xmm_xmm_xmmm64
Mnemonic::Vfnmadd213ss,// EVEX_Vfnmadd213ss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Vfnmadd213sd,// EVEX_Vfnmadd213sd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Vfnmsub213ps,// VEX_Vfnmsub213ps_xmm_xmm_xmmm128
Mnemonic::Vfnmsub213ps,// VEX_Vfnmsub213ps_ymm_ymm_ymmm256
Mnemonic::Vfnmsub213pd,// VEX_Vfnmsub213pd_xmm_xmm_xmmm128
Mnemonic::Vfnmsub213pd,// VEX_Vfnmsub213pd_ymm_ymm_ymmm256
Mnemonic::Vfnmsub213ps,// EVEX_Vfnmsub213ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfnmsub213ps,// EVEX_Vfnmsub213ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfnmsub213ps,// EVEX_Vfnmsub213ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfnmsub213pd,// EVEX_Vfnmsub213pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfnmsub213pd,// EVEX_Vfnmsub213pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfnmsub213pd,// EVEX_Vfnmsub213pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfnmsub213ss,// VEX_Vfnmsub213ss_xmm_xmm_xmmm32
Mnemonic::Vfnmsub213sd,// VEX_Vfnmsub213sd_xmm_xmm_xmmm64
Mnemonic::Vfnmsub213ss,// EVEX_Vfnmsub213ss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Vfnmsub213sd,// EVEX_Vfnmsub213sd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Vpmadd52luq,// EVEX_Vpmadd52luq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpmadd52luq,// EVEX_Vpmadd52luq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpmadd52luq,// EVEX_Vpmadd52luq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vpmadd52huq,// EVEX_Vpmadd52huq_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vpmadd52huq,// EVEX_Vpmadd52huq_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vpmadd52huq,// EVEX_Vpmadd52huq_zmm_k1z_zmm_zmmm512b64
Mnemonic::Vfmaddsub231ps,// VEX_Vfmaddsub231ps_xmm_xmm_xmmm128
Mnemonic::Vfmaddsub231ps,// VEX_Vfmaddsub231ps_ymm_ymm_ymmm256
Mnemonic::Vfmaddsub231pd,// VEX_Vfmaddsub231pd_xmm_xmm_xmmm128
Mnemonic::Vfmaddsub231pd,// VEX_Vfmaddsub231pd_ymm_ymm_ymmm256
Mnemonic::Vfmaddsub231ps,// EVEX_Vfmaddsub231ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfmaddsub231ps,// EVEX_Vfmaddsub231ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfmaddsub231ps,// EVEX_Vfmaddsub231ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfmaddsub231pd,// EVEX_Vfmaddsub231pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfmaddsub231pd,// EVEX_Vfmaddsub231pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfmaddsub231pd,// EVEX_Vfmaddsub231pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfmsubadd231ps,// VEX_Vfmsubadd231ps_xmm_xmm_xmmm128
Mnemonic::Vfmsubadd231ps,// VEX_Vfmsubadd231ps_ymm_ymm_ymmm256
Mnemonic::Vfmsubadd231pd,// VEX_Vfmsubadd231pd_xmm_xmm_xmmm128
Mnemonic::Vfmsubadd231pd,// VEX_Vfmsubadd231pd_ymm_ymm_ymmm256
Mnemonic::Vfmsubadd231ps,// EVEX_Vfmsubadd231ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfmsubadd231ps,// EVEX_Vfmsubadd231ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfmsubadd231ps,// EVEX_Vfmsubadd231ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfmsubadd231pd,// EVEX_Vfmsubadd231pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfmsubadd231pd,// EVEX_Vfmsubadd231pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfmsubadd231pd,// EVEX_Vfmsubadd231pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfmadd231ps,// VEX_Vfmadd231ps_xmm_xmm_xmmm128
Mnemonic::Vfmadd231ps,// VEX_Vfmadd231ps_ymm_ymm_ymmm256
Mnemonic::Vfmadd231pd,// VEX_Vfmadd231pd_xmm_xmm_xmmm128
Mnemonic::Vfmadd231pd,// VEX_Vfmadd231pd_ymm_ymm_ymmm256
Mnemonic::Vfmadd231ps,// EVEX_Vfmadd231ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfmadd231ps,// EVEX_Vfmadd231ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfmadd231ps,// EVEX_Vfmadd231ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfmadd231pd,// EVEX_Vfmadd231pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfmadd231pd,// EVEX_Vfmadd231pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfmadd231pd,// EVEX_Vfmadd231pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfmadd231ss,// VEX_Vfmadd231ss_xmm_xmm_xmmm32
Mnemonic::Vfmadd231sd,// VEX_Vfmadd231sd_xmm_xmm_xmmm64
Mnemonic::Vfmadd231ss,// EVEX_Vfmadd231ss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Vfmadd231sd,// EVEX_Vfmadd231sd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Vfmsub231ps,// VEX_Vfmsub231ps_xmm_xmm_xmmm128
Mnemonic::Vfmsub231ps,// VEX_Vfmsub231ps_ymm_ymm_ymmm256
Mnemonic::Vfmsub231pd,// VEX_Vfmsub231pd_xmm_xmm_xmmm128
Mnemonic::Vfmsub231pd,// VEX_Vfmsub231pd_ymm_ymm_ymmm256
Mnemonic::Vfmsub231ps,// EVEX_Vfmsub231ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfmsub231ps,// EVEX_Vfmsub231ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfmsub231ps,// EVEX_Vfmsub231ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfmsub231pd,// EVEX_Vfmsub231pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfmsub231pd,// EVEX_Vfmsub231pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfmsub231pd,// EVEX_Vfmsub231pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfmsub231ss,// VEX_Vfmsub231ss_xmm_xmm_xmmm32
Mnemonic::Vfmsub231sd,// VEX_Vfmsub231sd_xmm_xmm_xmmm64
Mnemonic::Vfmsub231ss,// EVEX_Vfmsub231ss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Vfmsub231sd,// EVEX_Vfmsub231sd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Vfnmadd231ps,// VEX_Vfnmadd231ps_xmm_xmm_xmmm128
Mnemonic::Vfnmadd231ps,// VEX_Vfnmadd231ps_ymm_ymm_ymmm256
Mnemonic::Vfnmadd231pd,// VEX_Vfnmadd231pd_xmm_xmm_xmmm128
Mnemonic::Vfnmadd231pd,// VEX_Vfnmadd231pd_ymm_ymm_ymmm256
Mnemonic::Vfnmadd231ps,// EVEX_Vfnmadd231ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfnmadd231ps,// EVEX_Vfnmadd231ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfnmadd231ps,// EVEX_Vfnmadd231ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfnmadd231pd,// EVEX_Vfnmadd231pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfnmadd231pd,// EVEX_Vfnmadd231pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfnmadd231pd,// EVEX_Vfnmadd231pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfnmadd231ss,// VEX_Vfnmadd231ss_xmm_xmm_xmmm32
Mnemonic::Vfnmadd231sd,// VEX_Vfnmadd231sd_xmm_xmm_xmmm64
Mnemonic::Vfnmadd231ss,// EVEX_Vfnmadd231ss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Vfnmadd231sd,// EVEX_Vfnmadd231sd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Vfnmsub231ps,// VEX_Vfnmsub231ps_xmm_xmm_xmmm128
Mnemonic::Vfnmsub231ps,// VEX_Vfnmsub231ps_ymm_ymm_ymmm256
Mnemonic::Vfnmsub231pd,// VEX_Vfnmsub231pd_xmm_xmm_xmmm128
Mnemonic::Vfnmsub231pd,// VEX_Vfnmsub231pd_ymm_ymm_ymmm256
Mnemonic::Vfnmsub231ps,// EVEX_Vfnmsub231ps_xmm_k1z_xmm_xmmm128b32
Mnemonic::Vfnmsub231ps,// EVEX_Vfnmsub231ps_ymm_k1z_ymm_ymmm256b32
Mnemonic::Vfnmsub231ps,// EVEX_Vfnmsub231ps_zmm_k1z_zmm_zmmm512b32_er
Mnemonic::Vfnmsub231pd,// EVEX_Vfnmsub231pd_xmm_k1z_xmm_xmmm128b64
Mnemonic::Vfnmsub231pd,// EVEX_Vfnmsub231pd_ymm_k1z_ymm_ymmm256b64
Mnemonic::Vfnmsub231pd,// EVEX_Vfnmsub231pd_zmm_k1z_zmm_zmmm512b64_er
Mnemonic::Vfnmsub231ss,// VEX_Vfnmsub231ss_xmm_xmm_xmmm32
Mnemonic::Vfnmsub231sd,// VEX_Vfnmsub231sd_xmm_xmm_xmmm64
Mnemonic::Vfnmsub231ss,// EVEX_Vfnmsub231ss_xmm_k1z_xmm_xmmm32_er
Mnemonic::Vfnmsub231sd,// EVEX_Vfnmsub231sd_xmm_k1z_xmm_xmmm64_er
Mnemonic::Vpconflictd,// EVEX_Vpconflictd_xmm_k1z_xmmm128b32
Mnemonic::Vpconflictd,// EVEX_Vpconflictd_ymm_k1z_ymmm256b32
Mnemonic::Vpconflictd,// EVEX_Vpconflictd_zmm_k1z_zmmm512b32
Mnemonic::Vpconflictq,// EVEX_Vpconflictq_xmm_k1z_xmmm128b64
Mnemonic::Vpconflictq,// EVEX_Vpconflictq_ymm_k1z_ymmm256b64
Mnemonic::Vpconflictq,// EVEX_Vpconflictq_zmm_k1z_zmmm512b64
Mnemonic::Vgatherpf0dps,// EVEX_Vgatherpf0dps_vm32z_k1
Mnemonic::Vgatherpf0dpd,// EVEX_Vgatherpf0dpd_vm32y_k1
Mnemonic::Vgatherpf1dps,// EVEX_Vgatherpf1dps_vm32z_k1
Mnemonic::Vgatherpf1dpd,// EVEX_Vgatherpf1dpd_vm32y_k1
Mnemonic::Vscatterpf0dps,// EVEX_Vscatterpf0dps_vm32z_k1
Mnemonic::Vscatterpf0dpd,// EVEX_Vscatterpf0dpd_vm32y_k1
Mnemonic::Vscatterpf1dps,// EVEX_Vscatterpf1dps_vm32z_k1
Mnemonic::Vscatterpf1dpd,// EVEX_Vscatterpf1dpd_vm32y_k1
Mnemonic::Vgatherpf0qps,// EVEX_Vgatherpf0qps_vm64z_k1
Mnemonic::Vgatherpf0qpd,// EVEX_Vgatherpf0qpd_vm64z_k1
Mnemonic::Vgatherpf1qps,// EVEX_Vgatherpf1qps_vm64z_k1
Mnemonic::Vgatherpf1qpd,// EVEX_Vgatherpf1qpd_vm64z_k1
Mnemonic::Vscatterpf0qps,// EVEX_Vscatterpf0qps_vm64z_k1
Mnemonic::Vscatterpf0qpd,// EVEX_Vscatterpf0qpd_vm64z_k1
Mnemonic::Vscatterpf1qps,// EVEX_Vscatterpf1qps_vm64z_k1
Mnemonic::Vscatterpf1qpd,// EVEX_Vscatterpf1qpd_vm64z_k1
Mnemonic::Sha1nexte,// Sha1nexte_xmm_xmmm128
Mnemonic::Vexp2ps,// EVEX_Vexp2ps_zmm_k1z_zmmm512b32_sae
Mnemonic::Vexp2pd,// EVEX_Vexp2pd_zmm_k1z_zmmm512b64_sae
Mnemonic::Sha1msg1,// Sha1msg1_xmm_xmmm128
Mnemonic::Sha1msg2,// Sha1msg2_xmm_xmmm128
Mnemonic::Vrcp28ps,// EVEX_Vrcp28ps_zmm_k1z_zmmm512b32_sae
Mnemonic::Vrcp28pd,// EVEX_Vrcp28pd_zmm_k1z_zmmm512b64_sae
Mnemonic::Sha256rnds2,// Sha256rnds2_xmm_xmmm128
Mnemonic::Vrcp28ss,// EVEX_Vrcp28ss_xmm_k1z_xmm_xmmm32_sae
Mnemonic::Vrcp28sd,// EVEX_Vrcp28sd_xmm_k1z_xmm_xmmm64_sae
Mnemonic::Sha256msg1,// Sha256msg1_xmm_xmmm128
Mnemonic::Vrsqrt28ps,// EVEX_Vrsqrt28ps_zmm_k1z_zmmm512b32_sae
Mnemonic::Vrsqrt28pd,// EVEX_Vrsqrt28pd_zmm_k1z_zmmm512b64_sae
Mnemonic::Sha256msg2,// Sha256msg2_xmm_xmmm128
Mnemonic::Vrsqrt28ss,// EVEX_Vrsqrt28ss_xmm_k1z_xmm_xmmm32_sae
Mnemonic::Vrsqrt28sd,// EVEX_Vrsqrt28sd_xmm_k1z_xmm_xmmm64_sae
Mnemonic::Gf2p8mulb,// Gf2p8mulb_xmm_xmmm128
Mnemonic::Vgf2p8mulb,// VEX_Vgf2p8mulb_xmm_xmm_xmmm128
Mnemonic::Vgf2p8mulb,// VEX_Vgf2p8mulb_ymm_ymm_ymmm256
Mnemonic::Vgf2p8mulb,// EVEX_Vgf2p8mulb_xmm_k1z_xmm_xmmm128
Mnemonic::Vgf2p8mulb,// EVEX_Vgf2p8mulb_ymm_k1z_ymm_ymmm256
Mnemonic::Vgf2p8mulb,// EVEX_Vgf2p8mulb_zmm_k1z_zmm_zmmm512
Mnemonic::Aesimc,// Aesimc_xmm_xmmm128
Mnemonic::Vaesimc,// VEX_Vaesimc_xmm_xmmm128
Mnemonic::Aesenc,// Aesenc_xmm_xmmm128
Mnemonic::Vaesenc,// VEX_Vaesenc_xmm_xmm_xmmm128
Mnemonic::Vaesenc,// VEX_Vaesenc_ymm_ymm_ymmm256
Mnemonic::Vaesenc,// EVEX_Vaesenc_xmm_xmm_xmmm128
Mnemonic::Vaesenc,// EVEX_Vaesenc_ymm_ymm_ymmm256
Mnemonic::Vaesenc,// EVEX_Vaesenc_zmm_zmm_zmmm512
Mnemonic::Aesenclast,// Aesenclast_xmm_xmmm128
Mnemonic::Vaesenclast,// VEX_Vaesenclast_xmm_xmm_xmmm128
Mnemonic::Vaesenclast,// VEX_Vaesenclast_ymm_ymm_ymmm256
Mnemonic::Vaesenclast,// EVEX_Vaesenclast_xmm_xmm_xmmm128
Mnemonic::Vaesenclast,// EVEX_Vaesenclast_ymm_ymm_ymmm256
Mnemonic::Vaesenclast,// EVEX_Vaesenclast_zmm_zmm_zmmm512
Mnemonic::Aesdec,// Aesdec_xmm_xmmm128
Mnemonic::Vaesdec,// VEX_Vaesdec_xmm_xmm_xmmm128
Mnemonic::Vaesdec,// VEX_Vaesdec_ymm_ymm_ymmm256
Mnemonic::Vaesdec,// EVEX_Vaesdec_xmm_xmm_xmmm128
Mnemonic::Vaesdec,// EVEX_Vaesdec_ymm_ymm_ymmm256
Mnemonic::Vaesdec,// EVEX_Vaesdec_zmm_zmm_zmmm512
Mnemonic::Aesdeclast,// Aesdeclast_xmm_xmmm128
Mnemonic::Vaesdeclast,// VEX_Vaesdeclast_xmm_xmm_xmmm128
Mnemonic::Vaesdeclast,// VEX_Vaesdeclast_ymm_ymm_ymmm256
Mnemonic::Vaesdeclast,// EVEX_Vaesdeclast_xmm_xmm_xmmm128
Mnemonic::Vaesdeclast,// EVEX_Vaesdeclast_ymm_ymm_ymmm256
Mnemonic::Vaesdeclast,// EVEX_Vaesdeclast_zmm_zmm_zmmm512
Mnemonic::Movbe,// Movbe_r16_m16
Mnemonic::Movbe,// Movbe_r32_m32
Mnemonic::Movbe,// Movbe_r64_m64
Mnemonic::Crc32,// Crc32_r32_rm8
Mnemonic::Crc32,// Crc32_r64_rm8
Mnemonic::Movbe,// Movbe_m16_r16
Mnemonic::Movbe,// Movbe_m32_r32
Mnemonic::Movbe,// Movbe_m64_r64
Mnemonic::Crc32,// Crc32_r32_rm16
Mnemonic::Crc32,// Crc32_r32_rm32
Mnemonic::Crc32,// Crc32_r64_rm64
Mnemonic::Andn,// VEX_Andn_r32_r32_rm32
Mnemonic::Andn,// VEX_Andn_r64_r64_rm64
Mnemonic::Blsr,// VEX_Blsr_r32_rm32
Mnemonic::Blsr,// VEX_Blsr_r64_rm64
Mnemonic::Blsmsk,// VEX_Blsmsk_r32_rm32
Mnemonic::Blsmsk,// VEX_Blsmsk_r64_rm64
Mnemonic::Blsi,// VEX_Blsi_r32_rm32
Mnemonic::Blsi,// VEX_Blsi_r64_rm64
Mnemonic::Bzhi,// VEX_Bzhi_r32_rm32_r32
Mnemonic::Bzhi,// VEX_Bzhi_r64_rm64_r64
Mnemonic::Wrussd,// Wrussd_m32_r32
Mnemonic::Wrussq,// Wrussq_m64_r64
Mnemonic::Pext,// VEX_Pext_r32_r32_rm32
Mnemonic::Pext,// VEX_Pext_r64_r64_rm64
Mnemonic::Pdep,// VEX_Pdep_r32_r32_rm32
Mnemonic::Pdep,// VEX_Pdep_r64_r64_rm64
Mnemonic::Wrssd,// Wrssd_m32_r32
Mnemonic::Wrssq,// Wrssq_m64_r64
Mnemonic::Adcx,// Adcx_r32_rm32
Mnemonic::Adcx,// Adcx_r64_rm64
Mnemonic::Adox,// Adox_r32_rm32
Mnemonic::Adox,// Adox_r64_rm64
Mnemonic::Mulx,// VEX_Mulx_r32_r32_rm32
Mnemonic::Mulx,// VEX_Mulx_r64_r64_rm64
Mnemonic::Bextr,// VEX_Bextr_r32_rm32_r32
Mnemonic::Bextr,// VEX_Bextr_r64_rm64_r64
Mnemonic::Shlx,// VEX_Shlx_r32_rm32_r32
Mnemonic::Shlx,// VEX_Shlx_r64_rm64_r64
Mnemonic::Sarx,// VEX_Sarx_r32_rm32_r32
Mnemonic::Sarx,// VEX_Sarx_r64_rm64_r64
Mnemonic::Shrx,// VEX_Shrx_r32_rm32_r32
Mnemonic::Shrx,// VEX_Shrx_r64_rm64_r64
Mnemonic::Movdir64b,// Movdir64b_r16_m512
Mnemonic::Movdir64b,// Movdir64b_r32_m512
Mnemonic::Movdir64b,// Movdir64b_r64_m512
Mnemonic::Enqcmds,// Enqcmds_r16_m512
Mnemonic::Enqcmds,// Enqcmds_r32_m512
Mnemonic::Enqcmds,// Enqcmds_r64_m512
Mnemonic::Enqcmd,// Enqcmd_r16_m512
Mnemonic::Enqcmd,// Enqcmd_r32_m512
Mnemonic::Enqcmd,// Enqcmd_r64_m512
Mnemonic::Movdiri,// Movdiri_m32_r32
Mnemonic::Movdiri,// Movdiri_m64_r64
Mnemonic::Vpermq,// VEX_Vpermq_ymm_ymmm256_imm8
Mnemonic::Vpermq,// EVEX_Vpermq_ymm_k1z_ymmm256b64_imm8
Mnemonic::Vpermq,// EVEX_Vpermq_zmm_k1z_zmmm512b64_imm8
Mnemonic::Vpermpd,// VEX_Vpermpd_ymm_ymmm256_imm8
Mnemonic::Vpermpd,// EVEX_Vpermpd_ymm_k1z_ymmm256b64_imm8
Mnemonic::Vpermpd,// EVEX_Vpermpd_zmm_k1z_zmmm512b64_imm8
Mnemonic::Vpblendd,// VEX_Vpblendd_xmm_xmm_xmmm128_imm8
Mnemonic::Vpblendd,// VEX_Vpblendd_ymm_ymm_ymmm256_imm8
Mnemonic::Valignd,// EVEX_Valignd_xmm_k1z_xmm_xmmm128b32_imm8
Mnemonic::Valignd,// EVEX_Valignd_ymm_k1z_ymm_ymmm256b32_imm8
Mnemonic::Valignd,// EVEX_Valignd_zmm_k1z_zmm_zmmm512b32_imm8
Mnemonic::Valignq,// EVEX_Valignq_xmm_k1z_xmm_xmmm128b64_imm8
Mnemonic::Valignq,// EVEX_Valignq_ymm_k1z_ymm_ymmm256b64_imm8
Mnemonic::Valignq,// EVEX_Valignq_zmm_k1z_zmm_zmmm512b64_imm8
Mnemonic::Vpermilps,// VEX_Vpermilps_xmm_xmmm128_imm8
Mnemonic::Vpermilps,// VEX_Vpermilps_ymm_ymmm256_imm8
Mnemonic::Vpermilps,// EVEX_Vpermilps_xmm_k1z_xmmm128b32_imm8
Mnemonic::Vpermilps,// EVEX_Vpermilps_ymm_k1z_ymmm256b32_imm8
Mnemonic::Vpermilps,// EVEX_Vpermilps_zmm_k1z_zmmm512b32_imm8
Mnemonic::Vpermilpd,// VEX_Vpermilpd_xmm_xmmm128_imm8
Mnemonic::Vpermilpd,// VEX_Vpermilpd_ymm_ymmm256_imm8
Mnemonic::Vpermilpd,// EVEX_Vpermilpd_xmm_k1z_xmmm128b64_imm8
Mnemonic::Vpermilpd,// EVEX_Vpermilpd_ymm_k1z_ymmm256b64_imm8
Mnemonic::Vpermilpd,// EVEX_Vpermilpd_zmm_k1z_zmmm512b64_imm8
Mnemonic::Vperm2f128,// VEX_Vperm2f128_ymm_ymm_ymmm256_imm8
Mnemonic::Roundps,// Roundps_xmm_xmmm128_imm8
Mnemonic::Vroundps,// VEX_Vroundps_xmm_xmmm128_imm8
Mnemonic::Vroundps,// VEX_Vroundps_ymm_ymmm256_imm8
Mnemonic::Vrndscaleps,// EVEX_Vrndscaleps_xmm_k1z_xmmm128b32_imm8
Mnemonic::Vrndscaleps,// EVEX_Vrndscaleps_ymm_k1z_ymmm256b32_imm8
Mnemonic::Vrndscaleps,// EVEX_Vrndscaleps_zmm_k1z_zmmm512b32_imm8_sae
Mnemonic::Roundpd,// Roundpd_xmm_xmmm128_imm8
Mnemonic::Vroundpd,// VEX_Vroundpd_xmm_xmmm128_imm8
Mnemonic::Vroundpd,// VEX_Vroundpd_ymm_ymmm256_imm8
Mnemonic::Vrndscalepd,// EVEX_Vrndscalepd_xmm_k1z_xmmm128b64_imm8
Mnemonic::Vrndscalepd,// EVEX_Vrndscalepd_ymm_k1z_ymmm256b64_imm8
Mnemonic::Vrndscalepd,// EVEX_Vrndscalepd_zmm_k1z_zmmm512b64_imm8_sae
Mnemonic::Roundss,// Roundss_xmm_xmmm32_imm8
Mnemonic::Vroundss,// VEX_Vroundss_xmm_xmm_xmmm32_imm8
Mnemonic::Vrndscaless,// EVEX_Vrndscaless_xmm_k1z_xmm_xmmm32_imm8_sae
Mnemonic::Roundsd,// Roundsd_xmm_xmmm64_imm8
Mnemonic::Vroundsd,// VEX_Vroundsd_xmm_xmm_xmmm64_imm8
Mnemonic::Vrndscalesd,// EVEX_Vrndscalesd_xmm_k1z_xmm_xmmm64_imm8_sae
Mnemonic::Blendps,// Blendps_xmm_xmmm128_imm8
Mnemonic::Vblendps,// VEX_Vblendps_xmm_xmm_xmmm128_imm8
Mnemonic::Vblendps,// VEX_Vblendps_ymm_ymm_ymmm256_imm8
Mnemonic::Blendpd,// Blendpd_xmm_xmmm128_imm8
Mnemonic::Vblendpd,// VEX_Vblendpd_xmm_xmm_xmmm128_imm8
Mnemonic::Vblendpd,// VEX_Vblendpd_ymm_ymm_ymmm256_imm8
Mnemonic::Pblendw,// Pblendw_xmm_xmmm128_imm8
Mnemonic::Vpblendw,// VEX_Vpblendw_xmm_xmm_xmmm128_imm8
Mnemonic::Vpblendw,// VEX_Vpblendw_ymm_ymm_ymmm256_imm8
Mnemonic::Palignr,// Palignr_mm_mmm64_imm8
Mnemonic::Palignr,// Palignr_xmm_xmmm128_imm8
Mnemonic::Vpalignr,// VEX_Vpalignr_xmm_xmm_xmmm128_imm8
Mnemonic::Vpalignr,// VEX_Vpalignr_ymm_ymm_ymmm256_imm8
Mnemonic::Vpalignr,// EVEX_Vpalignr_xmm_k1z_xmm_xmmm128_imm8
Mnemonic::Vpalignr,// EVEX_Vpalignr_ymm_k1z_ymm_ymmm256_imm8
Mnemonic::Vpalignr,// EVEX_Vpalignr_zmm_k1z_zmm_zmmm512_imm8
Mnemonic::Pextrb,// Pextrb_r32m8_xmm_imm8
Mnemonic::Pextrb,// Pextrb_r64m8_xmm_imm8
Mnemonic::Vpextrb,// VEX_Vpextrb_r32m8_xmm_imm8
Mnemonic::Vpextrb,// VEX_Vpextrb_r64m8_xmm_imm8
Mnemonic::Vpextrb,// EVEX_Vpextrb_r32m8_xmm_imm8
Mnemonic::Vpextrb,// EVEX_Vpextrb_r64m8_xmm_imm8
Mnemonic::Pextrw,// Pextrw_r32m16_xmm_imm8
Mnemonic::Pextrw,// Pextrw_r64m16_xmm_imm8
Mnemonic::Vpextrw,// VEX_Vpextrw_r32m16_xmm_imm8
Mnemonic::Vpextrw,// VEX_Vpextrw_r64m16_xmm_imm8
Mnemonic::Vpextrw,// EVEX_Vpextrw_r32m16_xmm_imm8
Mnemonic::Vpextrw,// EVEX_Vpextrw_r64m16_xmm_imm8
Mnemonic::Pextrd,// Pextrd_rm32_xmm_imm8
Mnemonic::Pextrq,// Pextrq_rm64_xmm_imm8
Mnemonic::Vpextrd,// VEX_Vpextrd_rm32_xmm_imm8
Mnemonic::Vpextrq,// VEX_Vpextrq_rm64_xmm_imm8
Mnemonic::Vpextrd,// EVEX_Vpextrd_rm32_xmm_imm8
Mnemonic::Vpextrq,// EVEX_Vpextrq_rm64_xmm_imm8
Mnemonic::Extractps,// Extractps_rm32_xmm_imm8
Mnemonic::Extractps,// Extractps_r64m32_xmm_imm8
Mnemonic::Vextractps,// VEX_Vextractps_rm32_xmm_imm8
Mnemonic::Vextractps,// VEX_Vextractps_r64m32_xmm_imm8
Mnemonic::Vextractps,// EVEX_Vextractps_rm32_xmm_imm8
Mnemonic::Vextractps,// EVEX_Vextractps_r64m32_xmm_imm8
Mnemonic::Vinsertf128,// VEX_Vinsertf128_ymm_ymm_xmmm128_imm8
Mnemonic::Vinsertf32x4,// EVEX_Vinsertf32x4_ymm_k1z_ymm_xmmm128_imm8
Mnemonic::Vinsertf32x4,// EVEX_Vinsertf32x4_zmm_k1z_zmm_xmmm128_imm8
Mnemonic::Vinsertf64x2,// EVEX_Vinsertf64x2_ymm_k1z_ymm_xmmm128_imm8
Mnemonic::Vinsertf64x2,// EVEX_Vinsertf64x2_zmm_k1z_zmm_xmmm128_imm8
Mnemonic::Vextractf128,// VEX_Vextractf128_xmmm128_ymm_imm8
Mnemonic::Vextractf32x4,// EVEX_Vextractf32x4_xmmm128_k1z_ymm_imm8
Mnemonic::Vextractf32x4,// EVEX_Vextractf32x4_xmmm128_k1z_zmm_imm8
Mnemonic::Vextractf64x2,// EVEX_Vextractf64x2_xmmm128_k1z_ymm_imm8
Mnemonic::Vextractf64x2,// EVEX_Vextractf64x2_xmmm128_k1z_zmm_imm8
Mnemonic::Vinsertf32x8,// EVEX_Vinsertf32x8_zmm_k1z_zmm_ymmm256_imm8
Mnemonic::Vinsertf64x4,// EVEX_Vinsertf64x4_zmm_k1z_zmm_ymmm256_imm8
Mnemonic::Vextractf32x8,// EVEX_Vextractf32x8_ymmm256_k1z_zmm_imm8
Mnemonic::Vextractf64x4,// EVEX_Vextractf64x4_ymmm256_k1z_zmm_imm8
Mnemonic::Vcvtps2ph,// VEX_Vcvtps2ph_xmmm64_xmm_imm8
Mnemonic::Vcvtps2ph,// VEX_Vcvtps2ph_xmmm128_ymm_imm8
Mnemonic::Vcvtps2ph,// EVEX_Vcvtps2ph_xmmm64_k1z_xmm_imm8
Mnemonic::Vcvtps2ph,// EVEX_Vcvtps2ph_xmmm128_k1z_ymm_imm8
Mnemonic::Vcvtps2ph,// EVEX_Vcvtps2ph_ymmm256_k1z_zmm_imm8_sae
Mnemonic::Vpcmpud,// EVEX_Vpcmpud_kr_k1_xmm_xmmm128b32_imm8
Mnemonic::Vpcmpud,// EVEX_Vpcmpud_kr_k1_ymm_ymmm256b32_imm8
Mnemonic::Vpcmpud,// EVEX_Vpcmpud_kr_k1_zmm_zmmm512b32_imm8
Mnemonic::Vpcmpuq,// EVEX_Vpcmpuq_kr_k1_xmm_xmmm128b64_imm8
Mnemonic::Vpcmpuq,// EVEX_Vpcmpuq_kr_k1_ymm_ymmm256b64_imm8
Mnemonic::Vpcmpuq,// EVEX_Vpcmpuq_kr_k1_zmm_zmmm512b64_imm8
Mnemonic::Vpcmpd,// EVEX_Vpcmpd_kr_k1_xmm_xmmm128b32_imm8
Mnemonic::Vpcmpd,// EVEX_Vpcmpd_kr_k1_ymm_ymmm256b32_imm8
Mnemonic::Vpcmpd,// EVEX_Vpcmpd_kr_k1_zmm_zmmm512b32_imm8
Mnemonic::Vpcmpq,// EVEX_Vpcmpq_kr_k1_xmm_xmmm128b64_imm8
Mnemonic::Vpcmpq,// EVEX_Vpcmpq_kr_k1_ymm_ymmm256b64_imm8
Mnemonic::Vpcmpq,// EVEX_Vpcmpq_kr_k1_zmm_zmmm512b64_imm8
Mnemonic::Pinsrb,// Pinsrb_xmm_r32m8_imm8
Mnemonic::Pinsrb,// Pinsrb_xmm_r64m8_imm8
Mnemonic::Vpinsrb,// VEX_Vpinsrb_xmm_xmm_r32m8_imm8
Mnemonic::Vpinsrb,// VEX_Vpinsrb_xmm_xmm_r64m8_imm8
Mnemonic::Vpinsrb,// EVEX_Vpinsrb_xmm_xmm_r32m8_imm8
Mnemonic::Vpinsrb,// EVEX_Vpinsrb_xmm_xmm_r64m8_imm8
Mnemonic::Insertps,// Insertps_xmm_xmmm32_imm8
Mnemonic::Vinsertps,// VEX_Vinsertps_xmm_xmm_xmmm32_imm8
Mnemonic::Vinsertps,// EVEX_Vinsertps_xmm_xmm_xmmm32_imm8
Mnemonic::Pinsrd,// Pinsrd_xmm_rm32_imm8
Mnemonic::Pinsrq,// Pinsrq_xmm_rm64_imm8
Mnemonic::Vpinsrd,// VEX_Vpinsrd_xmm_xmm_rm32_imm8
Mnemonic::Vpinsrq,// VEX_Vpinsrq_xmm_xmm_rm64_imm8
Mnemonic::Vpinsrd,// EVEX_Vpinsrd_xmm_xmm_rm32_imm8
Mnemonic::Vpinsrq,// EVEX_Vpinsrq_xmm_xmm_rm64_imm8
Mnemonic::Vshuff32x4,// EVEX_Vshuff32x4_ymm_k1z_ymm_ymmm256b32_imm8
Mnemonic::Vshuff32x4,// EVEX_Vshuff32x4_zmm_k1z_zmm_zmmm512b32_imm8
Mnemonic::Vshuff64x2,// EVEX_Vshuff64x2_ymm_k1z_ymm_ymmm256b64_imm8
Mnemonic::Vshuff64x2,// EVEX_Vshuff64x2_zmm_k1z_zmm_zmmm512b64_imm8
Mnemonic::Vpternlogd,// EVEX_Vpternlogd_xmm_k1z_xmm_xmmm128b32_imm8
Mnemonic::Vpternlogd,// EVEX_Vpternlogd_ymm_k1z_ymm_ymmm256b32_imm8
Mnemonic::Vpternlogd,// EVEX_Vpternlogd_zmm_k1z_zmm_zmmm512b32_imm8
Mnemonic::Vpternlogq,// EVEX_Vpternlogq_xmm_k1z_xmm_xmmm128b64_imm8
Mnemonic::Vpternlogq,// EVEX_Vpternlogq_ymm_k1z_ymm_ymmm256b64_imm8
Mnemonic::Vpternlogq,// EVEX_Vpternlogq_zmm_k1z_zmm_zmmm512b64_imm8
Mnemonic::Vgetmantps,// EVEX_Vgetmantps_xmm_k1z_xmmm128b32_imm8
Mnemonic::Vgetmantps,// EVEX_Vgetmantps_ymm_k1z_ymmm256b32_imm8
Mnemonic::Vgetmantps,// EVEX_Vgetmantps_zmm_k1z_zmmm512b32_imm8_sae
Mnemonic::Vgetmantpd,// EVEX_Vgetmantpd_xmm_k1z_xmmm128b64_imm8
Mnemonic::Vgetmantpd,// EVEX_Vgetmantpd_ymm_k1z_ymmm256b64_imm8
Mnemonic::Vgetmantpd,// EVEX_Vgetmantpd_zmm_k1z_zmmm512b64_imm8_sae
Mnemonic::Vgetmantss,// EVEX_Vgetmantss_xmm_k1z_xmm_xmmm32_imm8_sae
Mnemonic::Vgetmantsd,// EVEX_Vgetmantsd_xmm_k1z_xmm_xmmm64_imm8_sae
Mnemonic::Kshiftrb,// VEX_Kshiftrb_kr_kr_imm8
Mnemonic::Kshiftrw,// VEX_Kshiftrw_kr_kr_imm8
Mnemonic::Kshiftrd,// VEX_Kshiftrd_kr_kr_imm8
Mnemonic::Kshiftrq,// VEX_Kshiftrq_kr_kr_imm8
Mnemonic::Kshiftlb,// VEX_Kshiftlb_kr_kr_imm8
Mnemonic::Kshiftlw,// VEX_Kshiftlw_kr_kr_imm8
Mnemonic::Kshiftld,// VEX_Kshiftld_kr_kr_imm8
Mnemonic::Kshiftlq,// VEX_Kshiftlq_kr_kr_imm8
Mnemonic::Vinserti128,// VEX_Vinserti128_ymm_ymm_xmmm128_imm8
Mnemonic::Vinserti32x4,// EVEX_Vinserti32x4_ymm_k1z_ymm_xmmm128_imm8
Mnemonic::Vinserti32x4,// EVEX_Vinserti32x4_zmm_k1z_zmm_xmmm128_imm8
Mnemonic::Vinserti64x2,// EVEX_Vinserti64x2_ymm_k1z_ymm_xmmm128_imm8
Mnemonic::Vinserti64x2,// EVEX_Vinserti64x2_zmm_k1z_zmm_xmmm128_imm8
Mnemonic::Vextracti128,// VEX_Vextracti128_xmmm128_ymm_imm8
Mnemonic::Vextracti32x4,// EVEX_Vextracti32x4_xmmm128_k1z_ymm_imm8
Mnemonic::Vextracti32x4,// EVEX_Vextracti32x4_xmmm128_k1z_zmm_imm8
Mnemonic::Vextracti64x2,// EVEX_Vextracti64x2_xmmm128_k1z_ymm_imm8
Mnemonic::Vextracti64x2,// EVEX_Vextracti64x2_xmmm128_k1z_zmm_imm8
Mnemonic::Vinserti32x8,// EVEX_Vinserti32x8_zmm_k1z_zmm_ymmm256_imm8
Mnemonic::Vinserti64x4,// EVEX_Vinserti64x4_zmm_k1z_zmm_ymmm256_imm8
Mnemonic::Vextracti32x8,// EVEX_Vextracti32x8_ymmm256_k1z_zmm_imm8
Mnemonic::Vextracti64x4,// EVEX_Vextracti64x4_ymmm256_k1z_zmm_imm8
Mnemonic::Vpcmpub,// EVEX_Vpcmpub_kr_k1_xmm_xmmm128_imm8
Mnemonic::Vpcmpub,// EVEX_Vpcmpub_kr_k1_ymm_ymmm256_imm8
Mnemonic::Vpcmpub,// EVEX_Vpcmpub_kr_k1_zmm_zmmm512_imm8
Mnemonic::Vpcmpuw,// EVEX_Vpcmpuw_kr_k1_xmm_xmmm128_imm8
Mnemonic::Vpcmpuw,// EVEX_Vpcmpuw_kr_k1_ymm_ymmm256_imm8
Mnemonic::Vpcmpuw,// EVEX_Vpcmpuw_kr_k1_zmm_zmmm512_imm8
Mnemonic::Vpcmpb,// EVEX_Vpcmpb_kr_k1_xmm_xmmm128_imm8
Mnemonic::Vpcmpb,// EVEX_Vpcmpb_kr_k1_ymm_ymmm256_imm8
Mnemonic::Vpcmpb,// EVEX_Vpcmpb_kr_k1_zmm_zmmm512_imm8
Mnemonic::Vpcmpw,// EVEX_Vpcmpw_kr_k1_xmm_xmmm128_imm8
Mnemonic::Vpcmpw,// EVEX_Vpcmpw_kr_k1_ymm_ymmm256_imm8
Mnemonic::Vpcmpw,// EVEX_Vpcmpw_kr_k1_zmm_zmmm512_imm8
Mnemonic::Dpps,// Dpps_xmm_xmmm128_imm8
Mnemonic::Vdpps,// VEX_Vdpps_xmm_xmm_xmmm128_imm8
Mnemonic::Vdpps,// VEX_Vdpps_ymm_ymm_ymmm256_imm8
Mnemonic::Dppd,// Dppd_xmm_xmmm128_imm8
Mnemonic::Vdppd,// VEX_Vdppd_xmm_xmm_xmmm128_imm8
Mnemonic::Mpsadbw,// Mpsadbw_xmm_xmmm128_imm8
Mnemonic::Vmpsadbw,// VEX_Vmpsadbw_xmm_xmm_xmmm128_imm8
Mnemonic::Vmpsadbw,// VEX_Vmpsadbw_ymm_ymm_ymmm256_imm8
Mnemonic::Vdbpsadbw,// EVEX_Vdbpsadbw_xmm_k1z_xmm_xmmm128_imm8
Mnemonic::Vdbpsadbw,// EVEX_Vdbpsadbw_ymm_k1z_ymm_ymmm256_imm8
Mnemonic::Vdbpsadbw,// EVEX_Vdbpsadbw_zmm_k1z_zmm_zmmm512_imm8
Mnemonic::Vshufi32x4,// EVEX_Vshufi32x4_ymm_k1z_ymm_ymmm256b32_imm8
Mnemonic::Vshufi32x4,// EVEX_Vshufi32x4_zmm_k1z_zmm_zmmm512b32_imm8
Mnemonic::Vshufi64x2,// EVEX_Vshufi64x2_ymm_k1z_ymm_ymmm256b64_imm8
Mnemonic::Vshufi64x2,// EVEX_Vshufi64x2_zmm_k1z_zmm_zmmm512b64_imm8
Mnemonic::Pclmulqdq,// Pclmulqdq_xmm_xmmm128_imm8
Mnemonic::Vpclmulqdq,// VEX_Vpclmulqdq_xmm_xmm_xmmm128_imm8
Mnemonic::Vpclmulqdq,// VEX_Vpclmulqdq_ymm_ymm_ymmm256_imm8
Mnemonic::Vpclmulqdq,// EVEX_Vpclmulqdq_xmm_xmm_xmmm128_imm8
Mnemonic::Vpclmulqdq,// EVEX_Vpclmulqdq_ymm_ymm_ymmm256_imm8
Mnemonic::Vpclmulqdq,// EVEX_Vpclmulqdq_zmm_zmm_zmmm512_imm8
Mnemonic::Vperm2i128,// VEX_Vperm2i128_ymm_ymm_ymmm256_imm8
Mnemonic::Vpermil2ps,// VEX_Vpermil2ps_xmm_xmm_xmmm128_xmm_imm4
Mnemonic::Vpermil2ps,// VEX_Vpermil2ps_ymm_ymm_ymmm256_ymm_imm4
Mnemonic::Vpermil2ps,// VEX_Vpermil2ps_xmm_xmm_xmm_xmmm128_imm4
Mnemonic::Vpermil2ps,// VEX_Vpermil2ps_ymm_ymm_ymm_ymmm256_imm4
Mnemonic::Vpermil2pd,// VEX_Vpermil2pd_xmm_xmm_xmmm128_xmm_imm4
Mnemonic::Vpermil2pd,// VEX_Vpermil2pd_ymm_ymm_ymmm256_ymm_imm4
Mnemonic::Vpermil2pd,// VEX_Vpermil2pd_xmm_xmm_xmm_xmmm128_imm4
Mnemonic::Vpermil2pd,// VEX_Vpermil2pd_ymm_ymm_ymm_ymmm256_imm4
Mnemonic::Vblendvps,// VEX_Vblendvps_xmm_xmm_xmmm128_xmm
Mnemonic::Vblendvps,// VEX_Vblendvps_ymm_ymm_ymmm256_ymm
Mnemonic::Vblendvpd,// VEX_Vblendvpd_xmm_xmm_xmmm128_xmm
Mnemonic::Vblendvpd,// VEX_Vblendvpd_ymm_ymm_ymmm256_ymm
Mnemonic::Vpblendvb,// VEX_Vpblendvb_xmm_xmm_xmmm128_xmm
Mnemonic::Vpblendvb,// VEX_Vpblendvb_ymm_ymm_ymmm256_ymm
Mnemonic::Vrangeps,// EVEX_Vrangeps_xmm_k1z_xmm_xmmm128b32_imm8
Mnemonic::Vrangeps,// EVEX_Vrangeps_ymm_k1z_ymm_ymmm256b32_imm8
Mnemonic::Vrangeps,// EVEX_Vrangeps_zmm_k1z_zmm_zmmm512b32_imm8_sae
Mnemonic::Vrangepd,// EVEX_Vrangepd_xmm_k1z_xmm_xmmm128b64_imm8
Mnemonic::Vrangepd,// EVEX_Vrangepd_ymm_k1z_ymm_ymmm256b64_imm8
Mnemonic::Vrangepd,// EVEX_Vrangepd_zmm_k1z_zmm_zmmm512b64_imm8_sae
Mnemonic::Vrangess,// EVEX_Vrangess_xmm_k1z_xmm_xmmm32_imm8_sae
Mnemonic::Vrangesd,// EVEX_Vrangesd_xmm_k1z_xmm_xmmm64_imm8_sae
Mnemonic::Vfixupimmps,// EVEX_Vfixupimmps_xmm_k1z_xmm_xmmm128b32_imm8
Mnemonic::Vfixupimmps,// EVEX_Vfixupimmps_ymm_k1z_ymm_ymmm256b32_imm8
Mnemonic::Vfixupimmps,// EVEX_Vfixupimmps_zmm_k1z_zmm_zmmm512b32_imm8_sae
Mnemonic::Vfixupimmpd,// EVEX_Vfixupimmpd_xmm_k1z_xmm_xmmm128b64_imm8
Mnemonic::Vfixupimmpd,// EVEX_Vfixupimmpd_ymm_k1z_ymm_ymmm256b64_imm8
Mnemonic::Vfixupimmpd,// EVEX_Vfixupimmpd_zmm_k1z_zmm_zmmm512b64_imm8_sae
Mnemonic::Vfixupimmss,// EVEX_Vfixupimmss_xmm_k1z_xmm_xmmm32_imm8_sae
Mnemonic::Vfixupimmsd,// EVEX_Vfixupimmsd_xmm_k1z_xmm_xmmm64_imm8_sae
Mnemonic::Vreduceps,// EVEX_Vreduceps_xmm_k1z_xmmm128b32_imm8
Mnemonic::Vreduceps,// EVEX_Vreduceps_ymm_k1z_ymmm256b32_imm8
Mnemonic::Vreduceps,// EVEX_Vreduceps_zmm_k1z_zmmm512b32_imm8_sae
Mnemonic::Vreducepd,// EVEX_Vreducepd_xmm_k1z_xmmm128b64_imm8
Mnemonic::Vreducepd,// EVEX_Vreducepd_ymm_k1z_ymmm256b64_imm8
Mnemonic::Vreducepd,// EVEX_Vreducepd_zmm_k1z_zmmm512b64_imm8_sae
Mnemonic::Vreducess,// EVEX_Vreducess_xmm_k1z_xmm_xmmm32_imm8_sae
Mnemonic::Vreducesd,// EVEX_Vreducesd_xmm_k1z_xmm_xmmm64_imm8_sae
Mnemonic::Vfmaddsubps,// VEX_Vfmaddsubps_xmm_xmm_xmmm128_xmm
Mnemonic::Vfmaddsubps,// VEX_Vfmaddsubps_ymm_ymm_ymmm256_ymm
Mnemonic::Vfmaddsubps,// VEX_Vfmaddsubps_xmm_xmm_xmm_xmmm128
Mnemonic::Vfmaddsubps,// VEX_Vfmaddsubps_ymm_ymm_ymm_ymmm256
Mnemonic::Vfmaddsubpd,// VEX_Vfmaddsubpd_xmm_xmm_xmmm128_xmm
Mnemonic::Vfmaddsubpd,// VEX_Vfmaddsubpd_ymm_ymm_ymmm256_ymm
Mnemonic::Vfmaddsubpd,// VEX_Vfmaddsubpd_xmm_xmm_xmm_xmmm128
Mnemonic::Vfmaddsubpd,// VEX_Vfmaddsubpd_ymm_ymm_ymm_ymmm256
Mnemonic::Vfmsubaddps,// VEX_Vfmsubaddps_xmm_xmm_xmmm128_xmm
Mnemonic::Vfmsubaddps,// VEX_Vfmsubaddps_ymm_ymm_ymmm256_ymm
Mnemonic::Vfmsubaddps,// VEX_Vfmsubaddps_xmm_xmm_xmm_xmmm128
Mnemonic::Vfmsubaddps,// VEX_Vfmsubaddps_ymm_ymm_ymm_ymmm256
Mnemonic::Vfmsubaddpd,// VEX_Vfmsubaddpd_xmm_xmm_xmmm128_xmm
Mnemonic::Vfmsubaddpd,// VEX_Vfmsubaddpd_ymm_ymm_ymmm256_ymm
Mnemonic::Vfmsubaddpd,// VEX_Vfmsubaddpd_xmm_xmm_xmm_xmmm128
Mnemonic::Vfmsubaddpd,// VEX_Vfmsubaddpd_ymm_ymm_ymm_ymmm256
Mnemonic::Pcmpestrm,// Pcmpestrm_xmm_xmmm128_imm8
Mnemonic::Pcmpestrm64,// Pcmpestrm64_xmm_xmmm128_imm8
Mnemonic::Vpcmpestrm,// VEX_Vpcmpestrm_xmm_xmmm128_imm8
Mnemonic::Vpcmpestrm64,// VEX_Vpcmpestrm64_xmm_xmmm128_imm8
Mnemonic::Pcmpestri,// Pcmpestri_xmm_xmmm128_imm8
Mnemonic::Pcmpestri64,// Pcmpestri64_xmm_xmmm128_imm8
Mnemonic::Vpcmpestri,// VEX_Vpcmpestri_xmm_xmmm128_imm8
Mnemonic::Vpcmpestri64,// VEX_Vpcmpestri64_xmm_xmmm128_imm8
Mnemonic::Pcmpistrm,// Pcmpistrm_xmm_xmmm128_imm8
Mnemonic::Vpcmpistrm,// VEX_Vpcmpistrm_xmm_xmmm128_imm8
Mnemonic::Pcmpistri,// Pcmpistri_xmm_xmmm128_imm8
Mnemonic::Vpcmpistri,// VEX_Vpcmpistri_xmm_xmmm128_imm8
Mnemonic::Vfpclassps,// EVEX_Vfpclassps_kr_k1_xmmm128b32_imm8
Mnemonic::Vfpclassps,// EVEX_Vfpclassps_kr_k1_ymmm256b32_imm8
Mnemonic::Vfpclassps,// EVEX_Vfpclassps_kr_k1_zmmm512b32_imm8
Mnemonic::Vfpclasspd,// EVEX_Vfpclasspd_kr_k1_xmmm128b64_imm8
Mnemonic::Vfpclasspd,// EVEX_Vfpclasspd_kr_k1_ymmm256b64_imm8
Mnemonic::Vfpclasspd,// EVEX_Vfpclasspd_kr_k1_zmmm512b64_imm8
Mnemonic::Vfpclassss,// EVEX_Vfpclassss_kr_k1_xmmm32_imm8
Mnemonic::Vfpclasssd,// EVEX_Vfpclasssd_kr_k1_xmmm64_imm8
Mnemonic::Vfmaddps,// VEX_Vfmaddps_xmm_xmm_xmmm128_xmm
Mnemonic::Vfmaddps,// VEX_Vfmaddps_ymm_ymm_ymmm256_ymm
Mnemonic::Vfmaddps,// VEX_Vfmaddps_xmm_xmm_xmm_xmmm128
Mnemonic::Vfmaddps,// VEX_Vfmaddps_ymm_ymm_ymm_ymmm256
Mnemonic::Vfmaddpd,// VEX_Vfmaddpd_xmm_xmm_xmmm128_xmm
Mnemonic::Vfmaddpd,// VEX_Vfmaddpd_ymm_ymm_ymmm256_ymm
Mnemonic::Vfmaddpd,// VEX_Vfmaddpd_xmm_xmm_xmm_xmmm128
Mnemonic::Vfmaddpd,// VEX_Vfmaddpd_ymm_ymm_ymm_ymmm256
Mnemonic::Vfmaddss,// VEX_Vfmaddss_xmm_xmm_xmmm32_xmm
Mnemonic::Vfmaddss,// VEX_Vfmaddss_xmm_xmm_xmm_xmmm32
Mnemonic::Vfmaddsd,// VEX_Vfmaddsd_xmm_xmm_xmmm64_xmm
Mnemonic::Vfmaddsd,// VEX_Vfmaddsd_xmm_xmm_xmm_xmmm64
Mnemonic::Vfmsubps,// VEX_Vfmsubps_xmm_xmm_xmmm128_xmm
Mnemonic::Vfmsubps,// VEX_Vfmsubps_ymm_ymm_ymmm256_ymm
Mnemonic::Vfmsubps,// VEX_Vfmsubps_xmm_xmm_xmm_xmmm128
Mnemonic::Vfmsubps,// VEX_Vfmsubps_ymm_ymm_ymm_ymmm256
Mnemonic::Vfmsubpd,// VEX_Vfmsubpd_xmm_xmm_xmmm128_xmm
Mnemonic::Vfmsubpd,// VEX_Vfmsubpd_ymm_ymm_ymmm256_ymm
Mnemonic::Vfmsubpd,// VEX_Vfmsubpd_xmm_xmm_xmm_xmmm128
Mnemonic::Vfmsubpd,// VEX_Vfmsubpd_ymm_ymm_ymm_ymmm256
Mnemonic::Vfmsubss,// VEX_Vfmsubss_xmm_xmm_xmmm32_xmm
Mnemonic::Vfmsubss,// VEX_Vfmsubss_xmm_xmm_xmm_xmmm32
Mnemonic::Vfmsubsd,// VEX_Vfmsubsd_xmm_xmm_xmmm64_xmm
Mnemonic::Vfmsubsd,// VEX_Vfmsubsd_xmm_xmm_xmm_xmmm64
Mnemonic::Vpshldw,// EVEX_Vpshldw_xmm_k1z_xmm_xmmm128_imm8
Mnemonic::Vpshldw,// EVEX_Vpshldw_ymm_k1z_ymm_ymmm256_imm8
Mnemonic::Vpshldw,// EVEX_Vpshldw_zmm_k1z_zmm_zmmm512_imm8
Mnemonic::Vpshldd,// EVEX_Vpshldd_xmm_k1z_xmm_xmmm128b32_imm8
Mnemonic::Vpshldd,// EVEX_Vpshldd_ymm_k1z_ymm_ymmm256b32_imm8
Mnemonic::Vpshldd,// EVEX_Vpshldd_zmm_k1z_zmm_zmmm512b32_imm8
Mnemonic::Vpshldq,// EVEX_Vpshldq_xmm_k1z_xmm_xmmm128b64_imm8
Mnemonic::Vpshldq,// EVEX_Vpshldq_ymm_k1z_ymm_ymmm256b64_imm8
Mnemonic::Vpshldq,// EVEX_Vpshldq_zmm_k1z_zmm_zmmm512b64_imm8
Mnemonic::Vpshrdw,// EVEX_Vpshrdw_xmm_k1z_xmm_xmmm128_imm8
Mnemonic::Vpshrdw,// EVEX_Vpshrdw_ymm_k1z_ymm_ymmm256_imm8
Mnemonic::Vpshrdw,// EVEX_Vpshrdw_zmm_k1z_zmm_zmmm512_imm8
Mnemonic::Vpshrdd,// EVEX_Vpshrdd_xmm_k1z_xmm_xmmm128b32_imm8
Mnemonic::Vpshrdd,// EVEX_Vpshrdd_ymm_k1z_ymm_ymmm256b32_imm8
Mnemonic::Vpshrdd,// EVEX_Vpshrdd_zmm_k1z_zmm_zmmm512b32_imm8
Mnemonic::Vpshrdq,// EVEX_Vpshrdq_xmm_k1z_xmm_xmmm128b64_imm8
Mnemonic::Vpshrdq,// EVEX_Vpshrdq_ymm_k1z_ymm_ymmm256b64_imm8
Mnemonic::Vpshrdq,// EVEX_Vpshrdq_zmm_k1z_zmm_zmmm512b64_imm8
Mnemonic::Vfnmaddps,// VEX_Vfnmaddps_xmm_xmm_xmmm128_xmm
Mnemonic::Vfnmaddps,// VEX_Vfnmaddps_ymm_ymm_ymmm256_ymm
Mnemonic::Vfnmaddps,// VEX_Vfnmaddps_xmm_xmm_xmm_xmmm128
Mnemonic::Vfnmaddps,// VEX_Vfnmaddps_ymm_ymm_ymm_ymmm256
Mnemonic::Vfnmaddpd,// VEX_Vfnmaddpd_xmm_xmm_xmmm128_xmm
Mnemonic::Vfnmaddpd,// VEX_Vfnmaddpd_ymm_ymm_ymmm256_ymm
Mnemonic::Vfnmaddpd,// VEX_Vfnmaddpd_xmm_xmm_xmm_xmmm128
Mnemonic::Vfnmaddpd,// VEX_Vfnmaddpd_ymm_ymm_ymm_ymmm256
Mnemonic::Vfnmaddss,// VEX_Vfnmaddss_xmm_xmm_xmmm32_xmm
Mnemonic::Vfnmaddss,// VEX_Vfnmaddss_xmm_xmm_xmm_xmmm32
Mnemonic::Vfnmaddsd,// VEX_Vfnmaddsd_xmm_xmm_xmmm64_xmm
Mnemonic::Vfnmaddsd,// VEX_Vfnmaddsd_xmm_xmm_xmm_xmmm64
Mnemonic::Vfnmsubps,// VEX_Vfnmsubps_xmm_xmm_xmmm128_xmm
Mnemonic::Vfnmsubps,// VEX_Vfnmsubps_ymm_ymm_ymmm256_ymm
Mnemonic::Vfnmsubps,// VEX_Vfnmsubps_xmm_xmm_xmm_xmmm128
Mnemonic::Vfnmsubps,// VEX_Vfnmsubps_ymm_ymm_ymm_ymmm256
Mnemonic::Vfnmsubpd,// VEX_Vfnmsubpd_xmm_xmm_xmmm128_xmm
Mnemonic::Vfnmsubpd,// VEX_Vfnmsubpd_ymm_ymm_ymmm256_ymm
Mnemonic::Vfnmsubpd,// VEX_Vfnmsubpd_xmm_xmm_xmm_xmmm128
Mnemonic::Vfnmsubpd,// VEX_Vfnmsubpd_ymm_ymm_ymm_ymmm256
Mnemonic::Vfnmsubss,// VEX_Vfnmsubss_xmm_xmm_xmmm32_xmm
Mnemonic::Vfnmsubss,// VEX_Vfnmsubss_xmm_xmm_xmm_xmmm32
Mnemonic::Vfnmsubsd,// VEX_Vfnmsubsd_xmm_xmm_xmmm64_xmm
Mnemonic::Vfnmsubsd,// VEX_Vfnmsubsd_xmm_xmm_xmm_xmmm64
Mnemonic::Sha1rnds4,// Sha1rnds4_xmm_xmmm128_imm8
Mnemonic::Gf2p8affineqb,// Gf2p8affineqb_xmm_xmmm128_imm8
Mnemonic::Vgf2p8affineqb,// VEX_Vgf2p8affineqb_xmm_xmm_xmmm128_imm8
Mnemonic::Vgf2p8affineqb,// VEX_Vgf2p8affineqb_ymm_ymm_ymmm256_imm8
Mnemonic::Vgf2p8affineqb,// EVEX_Vgf2p8affineqb_xmm_k1z_xmm_xmmm128b64_imm8
Mnemonic::Vgf2p8affineqb,// EVEX_Vgf2p8affineqb_ymm_k1z_ymm_ymmm256b64_imm8
Mnemonic::Vgf2p8affineqb,// EVEX_Vgf2p8affineqb_zmm_k1z_zmm_zmmm512b64_imm8
Mnemonic::Gf2p8affineinvqb,// Gf2p8affineinvqb_xmm_xmmm128_imm8
Mnemonic::Vgf2p8affineinvqb,// VEX_Vgf2p8affineinvqb_xmm_xmm_xmmm128_imm8
Mnemonic::Vgf2p8affineinvqb,// VEX_Vgf2p8affineinvqb_ymm_ymm_ymmm256_imm8
Mnemonic::Vgf2p8affineinvqb,// EVEX_Vgf2p8affineinvqb_xmm_k1z_xmm_xmmm128b64_imm8
Mnemonic::Vgf2p8affineinvqb,// EVEX_Vgf2p8affineinvqb_ymm_k1z_ymm_ymmm256b64_imm8
Mnemonic::Vgf2p8affineinvqb,// EVEX_Vgf2p8affineinvqb_zmm_k1z_zmm_zmmm512b64_imm8
Mnemonic::Aeskeygenassist,// Aeskeygenassist_xmm_xmmm128_imm8
Mnemonic::Vaeskeygenassist,// VEX_Vaeskeygenassist_xmm_xmmm128_imm8
Mnemonic::Rorx,// VEX_Rorx_r32_rm32_imm8
Mnemonic::Rorx,// VEX_Rorx_r64_rm64_imm8
Mnemonic::Vpmacssww,// XOP_Vpmacssww_xmm_xmm_xmmm128_xmm
Mnemonic::Vpmacsswd,// XOP_Vpmacsswd_xmm_xmm_xmmm128_xmm
Mnemonic::Vpmacssdql,// XOP_Vpmacssdql_xmm_xmm_xmmm128_xmm
Mnemonic::Vpmacssdd,// XOP_Vpmacssdd_xmm_xmm_xmmm128_xmm
Mnemonic::Vpmacssdqh,// XOP_Vpmacssdqh_xmm_xmm_xmmm128_xmm
Mnemonic::Vpmacsww,// XOP_Vpmacsww_xmm_xmm_xmmm128_xmm
Mnemonic::Vpmacswd,// XOP_Vpmacswd_xmm_xmm_xmmm128_xmm
Mnemonic::Vpmacsdql,// XOP_Vpmacsdql_xmm_xmm_xmmm128_xmm
Mnemonic::Vpmacsdd,// XOP_Vpmacsdd_xmm_xmm_xmmm128_xmm
Mnemonic::Vpmacsdqh,// XOP_Vpmacsdqh_xmm_xmm_xmmm128_xmm
Mnemonic::Vpcmov,// XOP_Vpcmov_xmm_xmm_xmmm128_xmm
Mnemonic::Vpcmov,// XOP_Vpcmov_ymm_ymm_ymmm256_ymm
Mnemonic::Vpcmov,// XOP_Vpcmov_xmm_xmm_xmm_xmmm128
Mnemonic::Vpcmov,// XOP_Vpcmov_ymm_ymm_ymm_ymmm256
Mnemonic::Vpperm,// XOP_Vpperm_xmm_xmm_xmmm128_xmm
Mnemonic::Vpperm,// XOP_Vpperm_xmm_xmm_xmm_xmmm128
Mnemonic::Vpmadcsswd,// XOP_Vpmadcsswd_xmm_xmm_xmmm128_xmm
Mnemonic::Vpmadcswd,// XOP_Vpmadcswd_xmm_xmm_xmmm128_xmm
Mnemonic::Vprotb,// XOP_Vprotb_xmm_xmmm128_imm8
Mnemonic::Vprotw,// XOP_Vprotw_xmm_xmmm128_imm8
Mnemonic::Vprotd,// XOP_Vprotd_xmm_xmmm128_imm8
Mnemonic::Vprotq,// XOP_Vprotq_xmm_xmmm128_imm8
Mnemonic::Vpcomb,// XOP_Vpcomb_xmm_xmm_xmmm128_imm8
Mnemonic::Vpcomw,// XOP_Vpcomw_xmm_xmm_xmmm128_imm8
Mnemonic::Vpcomd,// XOP_Vpcomd_xmm_xmm_xmmm128_imm8
Mnemonic::Vpcomq,// XOP_Vpcomq_xmm_xmm_xmmm128_imm8
Mnemonic::Vpcomub,// XOP_Vpcomub_xmm_xmm_xmmm128_imm8
Mnemonic::Vpcomuw,// XOP_Vpcomuw_xmm_xmm_xmmm128_imm8
Mnemonic::Vpcomud,// XOP_Vpcomud_xmm_xmm_xmmm128_imm8
Mnemonic::Vpcomuq,// XOP_Vpcomuq_xmm_xmm_xmmm128_imm8
Mnemonic::Blcfill,// XOP_Blcfill_r32_rm32
Mnemonic::Blcfill,// XOP_Blcfill_r64_rm64
Mnemonic::Blsfill,// XOP_Blsfill_r32_rm32
Mnemonic::Blsfill,// XOP_Blsfill_r64_rm64
Mnemonic::Blcs,// XOP_Blcs_r32_rm32
Mnemonic::Blcs,// XOP_Blcs_r64_rm64
Mnemonic::Tzmsk,// XOP_Tzmsk_r32_rm32
Mnemonic::Tzmsk,// XOP_Tzmsk_r64_rm64
Mnemonic::Blcic,// XOP_Blcic_r32_rm32
Mnemonic::Blcic,// XOP_Blcic_r64_rm64
Mnemonic::Blsic,// XOP_Blsic_r32_rm32
Mnemonic::Blsic,// XOP_Blsic_r64_rm64
Mnemonic::T1mskc,// XOP_T1mskc_r32_rm32
Mnemonic::T1mskc,// XOP_T1mskc_r64_rm64
Mnemonic::Blcmsk,// XOP_Blcmsk_r32_rm32
Mnemonic::Blcmsk,// XOP_Blcmsk_r64_rm64
Mnemonic::Blci,// XOP_Blci_r32_rm32
Mnemonic::Blci,// XOP_Blci_r64_rm64
Mnemonic::Llwpcb,// XOP_Llwpcb_r32
Mnemonic::Llwpcb,// XOP_Llwpcb_r64
Mnemonic::Slwpcb,// XOP_Slwpcb_r32
Mnemonic::Slwpcb,// XOP_Slwpcb_r64
Mnemonic::Vfrczps,// XOP_Vfrczps_xmm_xmmm128
Mnemonic::Vfrczps,// XOP_Vfrczps_ymm_ymmm256
Mnemonic::Vfrczpd,// XOP_Vfrczpd_xmm_xmmm128
Mnemonic::Vfrczpd,// XOP_Vfrczpd_ymm_ymmm256
Mnemonic::Vfrczss,// XOP_Vfrczss_xmm_xmmm32
Mnemonic::Vfrczsd,// XOP_Vfrczsd_xmm_xmmm64
Mnemonic::Vprotb,// XOP_Vprotb_xmm_xmmm128_xmm
Mnemonic::Vprotb,// XOP_Vprotb_xmm_xmm_xmmm128
Mnemonic::Vprotw,// XOP_Vprotw_xmm_xmmm128_xmm
Mnemonic::Vprotw,// XOP_Vprotw_xmm_xmm_xmmm128
Mnemonic::Vprotd,// XOP_Vprotd_xmm_xmmm128_xmm
Mnemonic::Vprotd,// XOP_Vprotd_xmm_xmm_xmmm128
Mnemonic::Vprotq,// XOP_Vprotq_xmm_xmmm128_xmm
Mnemonic::Vprotq,// XOP_Vprotq_xmm_xmm_xmmm128
Mnemonic::Vpshlb,// XOP_Vpshlb_xmm_xmmm128_xmm
Mnemonic::Vpshlb,// XOP_Vpshlb_xmm_xmm_xmmm128
Mnemonic::Vpshlw,// XOP_Vpshlw_xmm_xmmm128_xmm
Mnemonic::Vpshlw,// XOP_Vpshlw_xmm_xmm_xmmm128
Mnemonic::Vpshld,// XOP_Vpshld_xmm_xmmm128_xmm
Mnemonic::Vpshld,// XOP_Vpshld_xmm_xmm_xmmm128
Mnemonic::Vpshlq,// XOP_Vpshlq_xmm_xmmm128_xmm
Mnemonic::Vpshlq,// XOP_Vpshlq_xmm_xmm_xmmm128
Mnemonic::Vpshab,// XOP_Vpshab_xmm_xmmm128_xmm
Mnemonic::Vpshab,// XOP_Vpshab_xmm_xmm_xmmm128
Mnemonic::Vpshaw,// XOP_Vpshaw_xmm_xmmm128_xmm
Mnemonic::Vpshaw,// XOP_Vpshaw_xmm_xmm_xmmm128
Mnemonic::Vpshad,// XOP_Vpshad_xmm_xmmm128_xmm
Mnemonic::Vpshad,// XOP_Vpshad_xmm_xmm_xmmm128
Mnemonic::Vpshaq,// XOP_Vpshaq_xmm_xmmm128_xmm
Mnemonic::Vpshaq,// XOP_Vpshaq_xmm_xmm_xmmm128
Mnemonic::Vphaddbw,// XOP_Vphaddbw_xmm_xmmm128
Mnemonic::Vphaddbd,// XOP_Vphaddbd_xmm_xmmm128
Mnemonic::Vphaddbq,// XOP_Vphaddbq_xmm_xmmm128
Mnemonic::Vphaddwd,// XOP_Vphaddwd_xmm_xmmm128
Mnemonic::Vphaddwq,// XOP_Vphaddwq_xmm_xmmm128
Mnemonic::Vphadddq,// XOP_Vphadddq_xmm_xmmm128
Mnemonic::Vphaddubw,// XOP_Vphaddubw_xmm_xmmm128
Mnemonic::Vphaddubd,// XOP_Vphaddubd_xmm_xmmm128
Mnemonic::Vphaddubq,// XOP_Vphaddubq_xmm_xmmm128
Mnemonic::Vphadduwd,// XOP_Vphadduwd_xmm_xmmm128
Mnemonic::Vphadduwq,// XOP_Vphadduwq_xmm_xmmm128
Mnemonic::Vphaddudq,// XOP_Vphaddudq_xmm_xmmm128
Mnemonic::Vphsubbw,// XOP_Vphsubbw_xmm_xmmm128
Mnemonic::Vphsubwd,// XOP_Vphsubwd_xmm_xmmm128
Mnemonic::Vphsubdq,// XOP_Vphsubdq_xmm_xmmm128
Mnemonic::Bextr,// XOP_Bextr_r32_rm32_imm32
Mnemonic::Bextr,// XOP_Bextr_r64_rm64_imm32
Mnemonic::Lwpins,// XOP_Lwpins_r32_rm32_imm32
Mnemonic::Lwpins,// XOP_Lwpins_r64_rm32_imm32
Mnemonic::Lwpval,// XOP_Lwpval_r32_rm32_imm32
Mnemonic::Lwpval,// XOP_Lwpval_r64_rm32_imm32
Mnemonic::Pi2fw,// D3NOW_Pi2fw_mm_mmm64
Mnemonic::Pi2fd,// D3NOW_Pi2fd_mm_mmm64
Mnemonic::Pf2iw,// D3NOW_Pf2iw_mm_mmm64
Mnemonic::Pf2id,// D3NOW_Pf2id_mm_mmm64
Mnemonic::Pfrcpv,// D3NOW_Pfrcpv_mm_mmm64
Mnemonic::Pfrsqrtv,// D3NOW_Pfrsqrtv_mm_mmm64
Mnemonic::Pfnacc,// D3NOW_Pfnacc_mm_mmm64
Mnemonic::Pfpnacc,// D3NOW_Pfpnacc_mm_mmm64
Mnemonic::Pfcmpge,// D3NOW_Pfcmpge_mm_mmm64
Mnemonic::Pfmin,// D3NOW_Pfmin_mm_mmm64
Mnemonic::Pfrcp,// D3NOW_Pfrcp_mm_mmm64
Mnemonic::Pfrsqrt,// D3NOW_Pfrsqrt_mm_mmm64
Mnemonic::Pfsub,// D3NOW_Pfsub_mm_mmm64
Mnemonic::Pfadd,// D3NOW_Pfadd_mm_mmm64
Mnemonic::Pfcmpgt,// D3NOW_Pfcmpgt_mm_mmm64
Mnemonic::Pfmax,// D3NOW_Pfmax_mm_mmm64
Mnemonic::Pfrcpit1,// D3NOW_Pfrcpit1_mm_mmm64
Mnemonic::Pfrsqit1,// D3NOW_Pfrsqit1_mm_mmm64
Mnemonic::Pfsubr,// D3NOW_Pfsubr_mm_mmm64
Mnemonic::Pfacc,// D3NOW_Pfacc_mm_mmm64
Mnemonic::Pfcmpeq,// D3NOW_Pfcmpeq_mm_mmm64
Mnemonic::Pfmul,// D3NOW_Pfmul_mm_mmm64
Mnemonic::Pfrcpit2,// D3NOW_Pfrcpit2_mm_mmm64
Mnemonic::Pmulhrw,// D3NOW_Pmulhrw_mm_mmm64
Mnemonic::Pswapd,// D3NOW_Pswapd_mm_mmm64
Mnemonic::Pavgusb,// D3NOW_Pavgusb_mm_mmm64
Mnemonic::Rmpadjust,// Rmpadjust
Mnemonic::Rmpupdate,// Rmpupdate
Mnemonic::Psmash,// Psmash
Mnemonic::Pvalidate,// Pvalidatew
Mnemonic::Pvalidate,// Pvalidated
Mnemonic::Pvalidate,// Pvalidateq
Mnemonic::Serialize,// Serialize
Mnemonic::Xsusldtrk,// Xsusldtrk
Mnemonic::Xresldtrk,// Xresldtrk
Mnemonic::Invlpgb,// Invlpgbw
Mnemonic::Invlpgb,// Invlpgbd
Mnemonic::Invlpgb,// Invlpgbq
Mnemonic::Tlbsync,// Tlbsync
Mnemonic::Prefetchw,// Prefetchreserved3_m8
Mnemonic::Prefetch,// Prefetchreserved4_m8
Mnemonic::Prefetch,// Prefetchreserved5_m8
Mnemonic::Prefetch,// Prefetchreserved6_m8
Mnemonic::Prefetch,// Prefetchreserved7_m8
Mnemonic::Ud0,// Ud0
Mnemonic::Vmgexit,// Vmgexit
Mnemonic::Getsecq,// Getsecq
Mnemonic::Ldtilecfg,// VEX_Ldtilecfg_m512
Mnemonic::Tilerelease,// VEX_Tilerelease
Mnemonic::Sttilecfg,// VEX_Sttilecfg_m512
Mnemonic::Tilezero,// VEX_Tilezero_tmm
Mnemonic::Tileloaddt1,// VEX_Tileloaddt1_tmm_sibmem
Mnemonic::Tilestored,// VEX_Tilestored_sibmem_tmm
Mnemonic::Tileloadd,// VEX_Tileloadd_tmm_sibmem
Mnemonic::Tdpbf16ps,// VEX_Tdpbf16ps_tmm_tmm_tmm
Mnemonic::Tdpbuud,// VEX_Tdpbuud_tmm_tmm_tmm
Mnemonic::Tdpbusd,// VEX_Tdpbusd_tmm_tmm_tmm
Mnemonic::Tdpbsud,// VEX_Tdpbsud_tmm_tmm_tmm
Mnemonic::Tdpbssd,// VEX_Tdpbssd_tmm_tmm_tmm
Mnemonic::Fnstdw,// Fnstdw_AX
Mnemonic::Fnstsg,// Fnstsg_AX
Mnemonic::Rdshr,// Rdshr_rm32
Mnemonic::Wrshr,// Wrshr_rm32
Mnemonic::Smint,// Smint
Mnemonic::Dmint,// Dmint
Mnemonic::Rdm,// Rdm
Mnemonic::Svdc,// Svdc_m80_Sreg
Mnemonic::Rsdc,// Rsdc_Sreg_m80
Mnemonic::Svldt,// Svldt_m80
Mnemonic::Rsldt,// Rsldt_m80
Mnemonic::Svts,// Svts_m80
Mnemonic::Rsts,// Rsts_m80
Mnemonic::Smint,// Smint_0F7E
Mnemonic::Bb0_reset,// Bb0_reset
Mnemonic::Bb1_reset,// Bb1_reset
Mnemonic::Cpu_write,// Cpu_write
Mnemonic::Cpu_read,// Cpu_read
Mnemonic::Altinst,// Altinst
Mnemonic::Paveb,// Paveb_mm_mmm64
Mnemonic::Paddsiw,// Paddsiw_mm_mmm64
Mnemonic::Pmagw,// Pmagw_mm_mmm64
Mnemonic::Pdistib,// Pdistib_mm_m64
Mnemonic::Psubsiw,// Psubsiw_mm_mmm64
Mnemonic::Pmvzb,// Pmvzb_mm_m64
Mnemonic::Pmulhrw,// Pmulhrw_mm_mmm64
Mnemonic::Pmvnzb,// Pmvnzb_mm_m64
Mnemonic::Pmvlzb,// Pmvlzb_mm_m64
Mnemonic::Pmvgezb,// Pmvgezb_mm_m64
Mnemonic::Pmulhriw,// Pmulhriw_mm_mmm64
Mnemonic::Pmachriw,// Pmachriw_mm_m64
Mnemonic::Undoc,// Cyrix_D9D7
Mnemonic::Undoc,// Cyrix_D9E2
Mnemonic::Ftstp,// Ftstp
Mnemonic::Undoc,// Cyrix_D9E7
Mnemonic::Frint2,// Frint2
Mnemonic::Frichop,// Frichop
Mnemonic::Undoc,// Cyrix_DED8
Mnemonic::Undoc,// Cyrix_DEDA
Mnemonic::Undoc,// Cyrix_DEDC
Mnemonic::Undoc,// Cyrix_DEDD
Mnemonic::Undoc,// Cyrix_DEDE
Mnemonic::Frinear,// Frinear
Mnemonic::Tdcall,// Tdcall
Mnemonic::Seamret,// Seamret
Mnemonic::Seamops,// Seamops
Mnemonic::Seamcall,// Seamcall
Mnemonic::Aesencwide128kl,// Aesencwide128kl_m384
Mnemonic::Aesdecwide128kl,// Aesdecwide128kl_m384
Mnemonic::Aesencwide256kl,// Aesencwide256kl_m512
Mnemonic::Aesdecwide256kl,// Aesdecwide256kl_m512
Mnemonic::Loadiwkey,// Loadiwkey_xmm_xmm
Mnemonic::Aesenc128kl,// Aesenc128kl_xmm_m384
Mnemonic::Aesdec128kl,// Aesdec128kl_xmm_m384
Mnemonic::Aesenc256kl,// Aesenc256kl_xmm_m512
Mnemonic::Aesdec256kl,// Aesdec256kl_xmm_m512
Mnemonic::Encodekey128,// Encodekey128_r32_r32
Mnemonic::Encodekey256,// Encodekey256_r32_r32
Mnemonic::Vbroadcastss,// VEX_Vbroadcastss_xmm_xmm
Mnemonic::Vbroadcastss,// VEX_Vbroadcastss_ymm_xmm
Mnemonic::Vbroadcastsd,// VEX_Vbroadcastsd_ymm_xmm
Mnemonic::Vmgexit,// Vmgexit_F2
Mnemonic::Uiret,// Uiret
Mnemonic::Testui,// Testui
Mnemonic::Clui,// Clui
Mnemonic::Stui,// Stui
Mnemonic::Senduipi,// Senduipi_r64
Mnemonic::Hreset,// Hreset_imm8
Mnemonic::Vpdpbusd,// VEX_Vpdpbusd_xmm_xmm_xmmm128
Mnemonic::Vpdpbusd,// VEX_Vpdpbusd_ymm_ymm_ymmm256
Mnemonic::Vpdpbusds,// VEX_Vpdpbusds_xmm_xmm_xmmm128
Mnemonic::Vpdpbusds,// VEX_Vpdpbusds_ymm_ymm_ymmm256
Mnemonic::Vpdpwssd,// VEX_Vpdpwssd_xmm_xmm_xmmm128
Mnemonic::Vpdpwssd,// VEX_Vpdpwssd_ymm_ymm_ymmm256
Mnemonic::Vpdpwssds,// VEX_Vpdpwssds_xmm_xmm_xmmm128
Mnemonic::Vpdpwssds,// VEX_Vpdpwssds_ymm_ymm_ymmm256
Mnemonic::Ccs_hash,// Ccs_hash_16
Mnemonic::Ccs_hash,// Ccs_hash_32
Mnemonic::Ccs_hash,// Ccs_hash_64
Mnemonic::Ccs_encrypt,// Ccs_encrypt_16
Mnemonic::Ccs_encrypt,// Ccs_encrypt_32
Mnemonic::Ccs_encrypt,// Ccs_encrypt_64
];
| 46.148276 | 82 | 0.826576 |
8f6b8bdff863920dd34f5f8338b1d1083d499d1f
| 754 |
extern crate libbpf;
use std::mem;
use libbpf::{Map, MapType};
fn main() {
let map = Map::create(MapType::Hash,
mem::size_of::<u32>(),
mem::size_of::<u32>(),
32).unwrap();
let key = [1,2,3,4];
// No key in the map for now
assert!(map.lookup(&key).is_err());
let value = [42,42,42,42];
map.insert(&key, &value).unwrap();
// After inserting, we can look it up
assert_eq!(map.lookup(&key).unwrap(), &value[..]);
// We can iterate all key/value pairs
for (key, val) in &map {
println!("{:?} => {:?}", key, val);
}
// ...and delete stuff again
map.delete(&key).unwrap();
assert!(map.lookup(&key).is_err());
}
| 23.5625 | 54 | 0.501326 |
ffcff28812804f0e790250a4df55a627c5fa9dcc
| 130 |
mod flags;
mod serverkeystate;
mod upgrades;
pub use self::flags::*;
pub use self::serverkeystate::*;
pub use self::upgrades::*;
| 16.25 | 32 | 0.715385 |
16b1d5ea9a0b02bfe1c981f10faf3a59e3f77884
| 9,515 |
//! Tests for `LoggingRustIrDatabase` which tests its functionality to record
//! types and stubs.
//!
//! Each tests records the trait solver solving something, and then runs the
//! solver on the output `LoggingRustIrDatabase` writes.These tests _don't_ test
//! that the output program is identical to the input, only that the resulting
//! program allows solving the same goals.
//!
//! Note that this does not, and should not, test the majority of the rendering
//! code. The code to render specific items and syntax details is rigorously
//! tested in `tests/display/`.
#[macro_use]
mod util;
#[test]
fn records_struct_trait_and_impl() {
logging_db_output_sufficient! {
program {
struct S {}
trait Trait {}
impl Trait for S {}
}
goal {
S: Trait
} yields {
"Unique"
}
}
}
#[test]
fn records_opaque_type() {
logging_db_output_sufficient! {
program {
struct S {}
trait Trait {}
impl Trait for S {}
opaque type Foo: Trait = S;
}
goal {
Foo: Trait
} yields {
"Unique"
}
}
}
#[test]
fn records_fn_def() {
logging_db_output_sufficient! {
program {
#[lang(sized)]
trait Sized { }
fn foo();
}
goal {
foo: Sized
} yields {
"Unique"
}
}
}
#[test]
fn records_generics() {
logging_db_output_sufficient! {
program {
struct Foo<T> {}
trait Bar {}
impl Bar for Foo<()> {}
}
goal {
Foo<()>: Bar
} yields {
"Unique"
}
goal {
Foo<i32>: Bar
} yields {
"No possible solution"
}
}
}
#[test]
fn records_parents_parent() {
logging_db_output_sufficient! {
program {
struct S {}
trait Grandparent {}
trait Parent where Self: Grandparent {}
trait Child where Self: Parent {}
impl Grandparent for S {}
impl Parent for S {}
impl Child for S {}
}
goal {
S: Child
} yields {
"Unique"
}
}
}
#[test]
fn records_associated_type_bounds() {
logging_db_output_sufficient! {
program {
trait Foo {
type Assoc: Bar;
}
trait Bar {
}
struct S {}
impl Foo for S {
type Assoc = S;
}
impl Bar for S {}
}
goal {
S: Foo
} yields {
"Unique"
}
}
}
#[test]
fn records_generic_impls() {
logging_db_output_sufficient! {
program {
struct S {}
struct V {}
trait Foo {}
trait Bar {}
impl Foo for S {}
impl<T> Bar for T where T: Foo {
}
}
goal {
S: Bar
} yields {
"Unique"
}
}
logging_db_output_sufficient! {
program {
struct S {}
struct V {}
trait Foo {}
trait Bar {}
impl Foo for S {}
impl<T> Bar for T where T: Foo {
}
}
goal {
V: Bar
} yields {
"No possible solution"
}
}
}
#[test]
fn stubs_types_from_assoc_type_bounds() {
logging_db_output_sufficient! {
program {
trait Foo {
type Assoc: Bar;
}
trait Bar {}
impl Foo for () {
type Assoc = ();
}
}
goal {
(): Foo
} yields {
"Unique"
}
}
}
#[test]
fn stubs_types_from_assoc_type_values_not_mentioned() {
logging_db_output_sufficient! {
program {
trait Foo {
type Assoc;
}
struct Baz {}
impl Foo for () {
type Assoc = Baz;
}
}
goal {
(): Foo
} yields {
"Unique"
}
}
}
#[test]
fn stubs_types_from_opaque_ty_bounds() {
logging_db_output_sufficient! {
program {
trait Foo {}
trait Fuu {}
struct Baz {}
opaque type Bar: Foo + Fuu = Baz;
}
goal {
Bar: Foo
} yields {
"Unique"
}
}
}
#[test]
fn opaque_ty_in_opaque_ty() {
logging_db_output_sufficient! {
program {
trait Foo {}
trait Fuu {}
struct Baz {}
opaque type Baq: Foo + Fuu = Baz;
opaque type Bar: Foo + Fuu = Baq;
}
goal {
Bar: Foo
} yields {
"Unique"
}
}
}
#[test]
fn opaque_ty_in_projection() {
logging_db_output_sufficient! {
program {
struct Baz {}
trait Foo {}
trait Fuu {}
trait Fuut {
type Assoc;
}
impl Fuut for Baz {
type Assoc = Baq;
}
impl Foo for Baz
where
Baz: Fuut<Assoc=Baq>
{ }
opaque type Baq: Foo + Fuu = Baz;
}
goal {
Baz: Foo
} yields {
"Unique"
}
}
}
#[test]
fn stubs_types_in_dyn_ty() {
logging_db_output_sufficient! {
program {
trait Foo {
type Assoc<'a>;
}
trait Other {}
impl Foo for () {
type Assoc<'a> = dyn Other + 'a;
}
}
goal {
(): Foo
} yields {
"Unique"
}
}
}
#[test]
fn can_stub_traits_with_unreferenced_assoc_ty() {
// None of our code will bring in `SuperNotReferenced`'s definition, so if
// we fail to remove the bounds on `NotReferenced::Assoc`, then it will fail.
// two tests where we don't reference the assoc ty.
logging_db_output_sufficient! {
program {
trait SuperNotReferenced {}
trait NotReferenced {
type Assoc: SuperNotReferenced;
}
trait Referenced where Self: NotReferenced {}
impl Referenced for () {}
}
goal {
(): Referenced
} yields {
"Unique"
}
}
logging_db_output_sufficient! {
program {
trait SuperNotReferenced {}
trait NotReferenced {
type Assoc where Self: SuperNotReferenced;
}
trait Referenced where Self: NotReferenced {}
impl Referenced for () {}
}
goal {
(): Referenced
} yields {
"Unique"
}
}
}
#[test]
fn can_stub_traits_with_referenced_assoc_ty() {
// two tests where we do reference the assoc ty
logging_db_output_sufficient! {
program {
trait SuperNotReferenced {}
trait NotReferenced {
type Assoc: SuperNotReferenced;
}
trait Referenced where Self: NotReferenced<Assoc=()> {}
impl Referenced for () {}
}
goal {
(): Referenced
} yields {
"Unique"
}
}
logging_db_output_sufficient! {
program {
trait SuperNotReferenced {}
trait NotReferenced {
type Assoc where (): SuperNotReferenced;
}
trait Referenced where Self: NotReferenced<Assoc=()> {}
impl Referenced for () {}
}
goal {
(): Referenced
} yields {
"Unique"
}
}
}
#[test]
fn can_stub_types_referenced_in_alias_ty_generics() {
logging_db_output_sufficient! {
program {
struct ThisTypeShouldBeStubbed {}
trait HasGenericAssoc {
type Assoc<T>;
}
trait Referenced where Self: HasGenericAssoc<Assoc<ThisTypeShouldBeStubbed>=()> {}
impl Referenced for () {}
}
goal {
(): Referenced
} yields {
"Unique"
}
}
}
#[test]
fn can_stub_types_referenced_in_alias_ty_bounds() {
logging_db_output_sufficient! {
program {
struct ThisTypeShouldBeStubbed {}
trait HasAssoc {
type Assoc;
}
trait Referenced where Self: HasAssoc<Assoc=ThisTypeShouldBeStubbed> {}
impl Referenced for () {}
}
goal {
(): Referenced
} yields {
"Unique"
}
}
}
#[test]
fn does_not_need_necessary_separate_impl() {
// this should leave out "impl Bar for Fox" and the result should pass the
// test (it won't be well-formed, but that's OK.)
logging_db_output_sufficient! {
program {
trait Box {
type Assoc: Bar;
}
trait Bar {}
struct Foo {}
impl Box for Foo {
type Assoc = Fox;
}
struct Fox {}
impl Bar for Fox {}
}
goal {
Foo: Box
} yields {
"Unique"
}
}
}
| 20.684783 | 94 | 0.456332 |
623fb46cd1971002aaa7020d0888ab85e0f32b29
| 1,766 |
use crossterm::event::{read, Event, KeyCode};
use crossterm::terminal::{disable_raw_mode, enable_raw_mode};
use std::io::Read;
use std::io::Write;
use std::io::{BufRead, BufReader};
use std::path::Path;
use std::process::exit;
use structopt::StructOpt;
#[derive(StructOpt)]
struct Cli {
#[structopt(parse(from_os_str))]
path: std::path::PathBuf,
}
pub fn lines_from_file<T: AsRef<Path>>(filename: T) -> String {
let file = std::fs::File::open(&filename);
let file = match file {
Ok(n) => n,
Err(_) => {
println!("Error! File not found!");
exit(0);
}
};
return std::fs::read_to_string(&filename).unwrap();
}
fn get_file<T: AsRef<Path>>(filename: T) -> std::fs::File {
let file = std::fs::OpenOptions::new()
.write(true)
.create(true)
.open(filename);
let file = match file {
Ok(n) => n,
Err(_) => {
println!("Error! File not found!");
exit(0);
}
};
return file;
}
#[allow(unused_must_use)]
fn main() -> std::io::Result<()> {
let args = Cli::from_args();
let mut file = get_file(&args.path);
enable_raw_mode().unwrap();
loop {
let event = read().unwrap();
if event == Event::Key(KeyCode::Esc.into()) {
break;
} else if event == Event::Key(KeyCode::Backspace.into()) {
let contents = lines_from_file(&args.path);
std::fs::write(&args.path, &contents[0..contents.len() - 1]);
} else {
for i in b' '..b'~' {
if event == Event::Key(KeyCode::Char(i as char).into()) {
&file.write(&[i]);
}
}
}
}
disable_raw_mode().unwrap();
Ok(())
}
| 28.483871 | 73 | 0.526048 |
75b274b64129d6c884d4f4f20f6e168c9dc7c14f
| 6,867 |
//! Encoder for Code93 barcodes.
//!
//! Code93 is intented to improve upon Code39 barcodes by offering a wider array of encodable
//! ASCII characters. It also produces denser barcodes than Code39.
//!
//! Code93 is a continuous, variable-length symbology.
//!
//! NOTE: This encoder currently only supports the basic Code93 implementation and not full-ASCII
//! mode.
use sym::{Parse, helpers};
use error::Result;
use std::ops::Range;
// Character -> Binary mappings for each of the 47 allowable character.
// The special "full-ASCII" characters are represented with (, ), [, ].
const CHARS: [(char, [u8; 9]); 47] = [
('0', [1,0,0,0,1,0,1,0,0]), ('1', [1,0,1,0,0,1,0,0,0]), ('2', [1,0,1,0,0,0,1,0,0]),
('3', [1,0,1,0,0,0,0,1,0]), ('4', [1,0,0,1,0,1,0,0,0]), ('5', [1,0,0,1,0,0,1,0,0]),
('6', [1,0,0,1,0,0,0,1,0]), ('7', [1,0,1,0,1,0,0,0,0]), ('8', [1,0,0,0,1,0,0,1,0]),
('9', [1,0,0,0,0,1,0,1,0]), ('A', [1,1,0,1,0,1,0,0,0]), ('B', [1,1,0,1,0,0,1,0,0]),
('C', [1,1,0,1,0,0,0,1,0]), ('D', [1,1,0,0,1,0,1,0,0]), ('E', [1,1,0,0,1,0,0,1,0]),
('F', [1,1,0,0,0,1,0,1,0]), ('G', [1,0,1,1,0,1,0,0,0]), ('H', [1,0,1,1,0,0,1,0,0]),
('I', [1,0,1,1,0,0,0,1,0]), ('J', [1,0,0,1,1,0,1,0,0]), ('K', [1,0,0,0,1,1,0,1,0]),
('L', [1,0,1,0,1,1,0,0,0]), ('M', [1,0,1,0,0,1,1,0,0]), ('N', [1,0,1,0,0,0,1,1,0]),
('O', [1,0,0,1,0,1,1,0,0]), ('P', [1,0,0,0,1,0,1,1,0]), ('Q', [1,1,0,1,1,0,1,0,0]),
('R', [1,1,0,1,1,0,0,1,0]), ('S', [1,1,0,1,0,1,1,0,0]), ('T', [1,1,0,1,0,0,1,1,0]),
('U', [1,1,0,0,1,0,1,1,0]), ('V', [1,1,0,0,1,1,0,1,0]), ('W', [1,0,1,1,0,1,1,0,0]),
('X', [1,0,1,1,0,0,1,1,0]), ('Y', [1,0,0,1,1,0,1,1,0]), ('Z', [1,0,0,1,1,1,0,1,0]),
('-', [1,0,0,1,0,1,1,1,0]), ('.', [1,1,1,0,1,0,1,0,0]), (' ', [1,1,1,0,1,0,0,1,0]),
('$', [1,1,1,0,0,1,0,1,0]), ('/', [1,0,1,1,0,1,1,1,0]), ('+', [1,0,1,1,1,0,1,1,0]),
('%', [1,1,0,1,0,1,1,1,0]), ('(', [1,0,0,1,0,0,1,1,0]), (')', [1,1,1,0,1,1,0,1,0]),
('[', [1,1,1,0,1,0,1,1,0]), ('[', [1,0,0,1,1,0,0,1,0]),
];
// Code93 barcodes must start and end with the '*' special character.
const GUARD: [u8; 9] = [1,0,1,0,1,1,1,1,0];
const TERMINATOR: [u8; 1] = [1];
/// The Code93 barcode type.
#[derive(Debug)]
pub struct Code93(Vec<char>);
impl Code93 {
/// Creates a new barcode.
/// Returns Result<Code93, Error> indicating parse success.
pub fn new<T: AsRef<str>>(data: T) -> Result<Code93> {
Code93::parse(data.as_ref()).and_then(|d| {
Ok(Code93(d.chars()
.collect()))
})
}
fn char_encoding(&self, c: char) -> [u8; 9] {
match CHARS.iter().find(|&ch| ch.0 == c) {
Some(&(_, enc)) => enc,
None => panic!(format!("Unknown char: {}", c)),
}
}
/// Calculates a checksum character using a weighted modulo-47 algorithm.
fn checksum_char(&self, data: &[char], weight_threshold: usize) -> Option<char> {
let get_char_pos = |&c| CHARS.iter().position(|t| t.0 == c).unwrap();
let weight = |i| {
match (data.len() - i) % weight_threshold {
0 => weight_threshold,
n => n,
}
};
let positions = data.iter().map(&get_char_pos);
let index = positions.enumerate()
.fold(0, |acc, (i, pos)| acc + (weight(i) * pos));
match CHARS.get(index % CHARS.len()) {
Some(&(c, _)) => Some(c),
None => None,
}
}
/// Calculates the C checksum character using a weighted modulo-47 algorithm.
fn c_checksum_char(&self) -> Option<char> {
self.checksum_char(&self.0, 20)
}
/// Calculates the K checksum character using a weighted modulo-47 algorithm.
fn k_checksum_char(&self, c_checksum: char) -> Option<char> {
let mut data: Vec<char> = self.0.clone();
data.push(c_checksum);
self.checksum_char(&data, 15)
}
fn push_encoding(&self, into: &mut Vec<u8>, from: [u8; 9]) {
into.extend(from.iter().cloned());
}
fn payload(&self) -> Vec<u8> {
let mut enc = vec![];
let c_checksum = self.c_checksum_char().expect("Cannot compute checksum C");
let k_checksum = self.k_checksum_char(c_checksum).expect("Cannot compute checksum K");
for &c in &self.0 {
self.push_encoding(&mut enc, self.char_encoding(c));
}
// Checksums.
self.push_encoding(&mut enc, self.char_encoding(c_checksum));
self.push_encoding(&mut enc, self.char_encoding(k_checksum));
enc
}
/// Encodes the barcode.
/// Returns a Vec<u8> of encoded binary digits.
pub fn encode(&self) -> Vec<u8> {
let guard = &GUARD[..];
let terminator = &TERMINATOR[..];
helpers::join_slices(&[guard, &self.payload()[..],
guard, terminator][..])
}
}
impl Parse for Code93 {
/// Returns the valid length of data acceptable in this type of barcode.
/// Code93 barcodes are variable-length.
fn valid_len() -> Range<u32> {
1..256
}
/// Returns the set of valid characters allowed in this type of barcode.
fn valid_chars() -> Vec<char> {
let (chars, _): (Vec<_>, Vec<_>) = CHARS.iter().cloned().unzip();
chars
}
}
#[cfg(test)]
mod tests {
use sym::code93::*;
use error::Error;
use std::char;
fn collapse_vec(v: Vec<u8>) -> String {
let chars = v.iter().map(|d| char::from_digit(*d as u32, 10).unwrap());
chars.collect()
}
#[test]
fn invalid_length_code93() {
let code93 = Code93::new("");
assert_eq!(code93.err().unwrap(), Error::Length);
}
#[test]
fn invalid_data_code93() {
let code93 = Code93::new("lowerCASE");
assert_eq!(code93.err().unwrap(), Error::Character);
}
#[test]
fn code93_encode() {
// Tests for data longer than 15, data longer than 20
let code931 = Code93::new("TEST93").unwrap();
let code932 = Code93::new("FLAM").unwrap();
let code933 = Code93::new("99").unwrap();
let code934 = Code93::new("1111111111111111111111").unwrap();
assert_eq!(collapse_vec(code931.encode()), "1010111101101001101100100101101011001101001101000010101010000101011101101001000101010111101");
assert_eq!(collapse_vec(code932.encode()), "1010111101100010101010110001101010001010011001001011001010011001010111101");
assert_eq!(collapse_vec(code933.encode()), "1010111101000010101000010101101100101000101101010111101");
assert_eq!(collapse_vec(code934.encode()), "1010111101010010001010010001010010001010010001010010001010010001010010001010010001010010001010010001010010001010010001010010001010010001010010001010010001010010001010010001010010001010010001010010001010010001000101101110010101010111101");
}
}
| 38.578652 | 290 | 0.555119 |
d955eae7e0057b781d432210152dc819bd0c63e5
| 10,132 |
extern crate systemstat;
use std::io::stdout;
use std::error::Error;
use std::process;
use std::thread;
use std::time::Duration;
use std::sync::{Arc, Mutex};
use ctrlc;
use futures::executor::block_on;
use systemstat::{saturating_sub_bytes, Platform, System};
use crossterm::style::{Color, ResetColor, SetBackgroundColor, SetForegroundColor};
use crossterm::terminal::{
Clear,
ClearType::{All, CurrentLine},
};
use crossterm::{
cursor::{MoveTo},
execute,
event::{Event, read, KeyCode},
};
use datafetcher::SystemData;
const REFRESH: Duration = Duration::from_secs(1);
mod ui;
mod datafetcher;
mod utils;
/// The main function of the program
fn main() {
ui::init();
// CTRL-C handler
ctrlc::set_handler(move || {
ui::exit();
process::exit(0);
})
.expect("Error setting Ctrl + C handler");
// Block main thread until process finishes
match block_on(async_main()) {
Ok(_) => {
ui::exit();
process::exit(0);
},
Err(e) => {
ui::exit();
eprintln!("{}", e);
process::exit(1);
}
};
}
async fn async_main() -> Result<String, Box<dyn Error>> {
let mut term_size = crossterm::terminal::size()?;
let system_data: SystemData = datafetcher::start_data_fetcher()?;
let system_data_arc = Arc::new(Mutex::new(system_data));
let thr_data = system_data_arc.clone();
let thr_data_2 = system_data_arc.clone();
datafetcher::start_fetch(thr_data, REFRESH)?;
// thread::spawn(move || {
// loop {
// let mut data = thr_data.lock().unwrap();
// *data += "Bababooey ";
// drop(data);
// thread::sleep(Duration::from_millis(100));
// }
// });
// Create thread for keyboard events
thread::spawn(move || -> crossterm::Result<()> {
let thr_data2 = system_data_arc.clone();
let term_size = crossterm::terminal::size()?;
let mut selection: (usize, usize) = (0, 0);
// Loop for keyboard events
loop {
// `read()` blocks until an `Event` is available
match read()? {
Event::Key(event) => {
println!("{:?}", event);
match event.code {
// Close the program gracefully
KeyCode::Char('q') => {
ui::exit();
process::exit(0);
},
KeyCode::Char('c') => {
ui::reset();
ui::update_menu_header(&mut selection, term_size);
ui::print_system_data(&thr_data2, &mut selection, term_size);
},
KeyCode::Up => {
if selection.1 != 0 {
if selection.1 != 1 {
ui::update_menu_header(&mut selection, term_size);
}
ui::print_system_data(&thr_data2, &mut selection, term_size);
}
},
KeyCode::Down => {
if selection.1 == 0 {
ui::update_menu_header(&mut selection, term_size);
}
ui::print_system_data(&thr_data2, &mut selection, term_size);
},
KeyCode::Left => {
if selection.1 == 0 {
ui::update_menu_header(&mut selection, term_size);
ui::print_system_data(&thr_data2, &mut selection, term_size);
}
},
KeyCode::Right => {
if selection.1 == 0 {
ui::update_menu_header(&mut selection, term_size);
ui::print_system_data(&thr_data2, &mut selection, term_size);
}
},
_ => {
}
}
},
Event::Mouse(event) => println!("{:?}", event),
Event::Resize(_width, _height) => {
ui::reset();
},
}
}
Ok(())
});
for _i in 0..term_size.1 {
print!("\n");
}
loop {
thread::sleep(REFRESH);
let sys = thr_data_2.lock().unwrap();
term_size = crossterm::terminal::size()?;
print!(" ");
execute!(stdout(), ResetColor, MoveTo(0, 2))?;
// Total CPU usage is 0 at first in case of error
let mut total_cpu: f32 = 0_f32;
// Fetches the CPU usage for each core and prints it
let cpu_usages = &sys.cpu;
let cpu_count_string_length: usize = cpu_usages.count.to_string().len();
for i in 0..cpu_usages.count {
execute!(stdout(), Clear(CurrentLine))?;
print!("CPU {}:", i);
for _j in i.to_string().len()..cpu_count_string_length + 1 {
print!(" ");
}
print_bar(
term_size.0 - 8,
100_f32 - &cpu_usages.load[i].idle,
Color::DarkGreen,
)?;
println!("");
execute!(stdout(), Clear(CurrentLine))?;
//println!("Load: {:.2}%", 100_f32 - &cpu_usages[i][4]);
// Sum up the cpu usages
total_cpu += 100_f32 - &cpu_usages.load[i].idle;
}
// Get total cpu usage by dividing with the core count
total_cpu = 100_f32 - total_cpu / cpu_usages.count as f32;
println!(" ");
execute!(stdout(), Clear(CurrentLine))?;
print!("Memory: ");
print_bar(
term_size.0 - 8,
sys.ram.used as f32 / sys.ram.total as f32 * 100_f32,
Color::DarkYellow,
)?;
println!("");
// execute!(stdout(), Clear(CurrentLine))?;
// print!("Swap: ");
// print_bar(
// term_size.0 - 5,
// memory[3] as f32 / memory[2] as f32 * 100_f32,
// Color::DarkYellow,
// );
// println!("");
//print_graph_stats(&cpu_vec, term_size.0 / 2, term_size.1 - 3, term_size.0, term_size.1);
execute!(
stdout(),
MoveTo(0, term_size.1),
Clear(CurrentLine),
SetBackgroundColor(Color::DarkCyan)
)
?;
for _i in 0..term_size.0 {
print!(" ");
}
let mut bottom_left_str: String = String::new();
let mut bottom_right_str: String = String::new();
bottom_left_str += &format!("CPU: {:.2}% ", total_cpu);
bottom_left_str += &format!("RAM: {} / {} ", utils::parse_size(&sys.ram.used), utils::parse_size(&sys.ram.total));
// let battery = sys.battery_life()?;
// bottom_right_str += &format!(
// "Battery: {:.2}%, {}",
// battery.remaining_capacity * 100.0,
// utils::parse_time(&battery.remaining_time)
// );
execute!(
stdout(),
MoveTo(0, term_size.1),
Clear(CurrentLine),
SetBackgroundColor(Color::DarkCyan)
)?;
print!(" ");
if term_size.0 > bottom_left_str.len() as u16 + bottom_right_str.len() as u16 + 2 {
print!("{}", bottom_left_str);
for _i in 0..(term_size.0 as usize - bottom_left_str.len() - bottom_right_str.len() - 2) {
print!(" ");
}
print!("{} ", bottom_right_str);
}
else if term_size.0 > bottom_left_str.len() as u16 + 1 {
print!("{}", bottom_left_str);
for _i in 0..(term_size.0 as usize - bottom_left_str.len() - 1) {
print!(" ");
}
} else {
bottom_left_str.truncate(term_size.0 as usize - 5);
bottom_left_str += "...";
print!("{} ", bottom_left_str);
}
execute!(stdout(), ResetColor)?;
}
}
// fn print_graph_stats(
// cpu_vec: &std::vec::Vec<f32>,
// max_width: u16,
// max_height: u16,
// x_offset: u16,
// y_offset: u16,
// ) {
// let mut index: usize = 0;
// let length = cpu_vec.len();
// for i in y_offset - max_height..y_offset {
// execute!(stdout(), MoveTo(0, i))?;
// execute!(stdout(), Clear(CurrentLine))?;
// }
// while index < max_width.into() && index < length {
// let height = max_height as f32 / 100_f32 * cpu_vec[&length - 1 - &index];
// let floored: u16 = height as u16;
// execute!(
// stdout(),
// MoveTo(x_offset - index as u16, y_offset - max_height + floored)
// )
// ?;
// if (height - floored as f32) <= 0.33 {
// print!("_");
// } else if (height - floored as f32) <= 0.66 {
// print!("-");
// } else {
// print!("¯");
// }
// index += 1;
// }
// }
/// Prints a bar that is as long as the percentage of the given terminal width
/// ### Parameters
/// * `max_width` - The max width of the bar
/// * `percentage` - The percentage of the max width the bar is going to be
fn print_bar(max_width: u16, percentage: f32, color: Color) -> Result<(), Box<dyn Error>> {
execute!(stdout(), SetForegroundColor(color))?;
let block_count = max_width as f32 / 100_f32 * percentage;
let mut index: u16 = 0;
let floored = block_count as u16;
// Print the full bars
while index < floored {
print!("⧛");
index = index + 1;
}
// Determine the last bar from decimal
if floored != 100 {
if (block_count - floored as f32) <= 0.5 {
print!("⧙");
} else {
print!(" ");
}
}
execute!(stdout(), ResetColor)?;
Ok(())
}
| 33.773333 | 122 | 0.474043 |
61e7220f33058a33c7fdc009b94eb324d95441a7
| 9,233 |
use crate::types::{Bytes, Index, Log, H160, H2048, H256, U256, U64};
use serde::{Deserialize, Serialize};
/// Description of a Transaction, pending or in the chain.
#[derive(Debug, Default, Clone, PartialEq, Deserialize, Serialize)]
pub struct Transaction {
/// Hash
pub hash: H256,
/// Nonce
pub nonce: U256,
/// Block hash. None when pending.
#[serde(rename = "blockHash")]
pub block_hash: Option<H256>,
/// Block number. None when pending.
#[serde(rename = "blockNumber")]
pub block_number: Option<U64>,
/// Transaction Index. None when pending.
#[serde(rename = "transactionIndex")]
pub transaction_index: Option<Index>,
/// Sender
pub from: H160,
/// Recipient (None when contract creation)
pub to: Option<H160>,
/// Transfered value
pub value: U256,
/// Gas Price
#[serde(rename = "gasPrice")]
pub gas_price: U256,
/// Gas amount
pub gas: U256,
/// Input data
pub input: Bytes,
/// Raw transaction data
#[serde(default)]
pub raw: Option<Bytes>,
}
/// "Receipt" of an executed transaction: details of its execution.
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
pub struct Receipt {
/// Transaction hash.
#[serde(rename = "transactionHash")]
pub transaction_hash: H256,
/// Index within the block.
#[serde(rename = "transactionIndex")]
pub transaction_index: Index,
/// Hash of the block this transaction was included within.
#[serde(rename = "blockHash")]
pub block_hash: Option<H256>,
/// Number of the block this transaction was included within.
#[serde(rename = "blockNumber")]
pub block_number: Option<U64>,
/// Cumulative gas used within the block after this was executed.
#[serde(rename = "cumulativeGasUsed")]
pub cumulative_gas_used: U256,
/// Gas used by this transaction alone.
///
/// Gas used is `None` if the the client is running in light client mode.
#[serde(rename = "gasUsed")]
pub gas_used: Option<U256>,
/// Contract address created, or `None` if not a deployment.
#[serde(rename = "contractAddress")]
pub contract_address: Option<H160>,
/// Logs generated within this transaction.
pub logs: Vec<Log>,
/// Status: either 1 (success) or 0 (failure).
pub status: Option<U64>,
/// State root.
pub root: Option<H256>,
/// Logs bloom
#[serde(rename = "logsBloom")]
pub logs_bloom: H2048,
}
/// Raw bytes of a signed, but not yet sent transaction
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
pub struct RawTransaction {
/// Signed transaction as raw bytes
pub raw: Bytes,
/// Transaction details
pub tx: RawTransactionDetails,
}
/// Details of a signed transaction
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
pub struct RawTransactionDetails {
/// Hash
pub hash: H256,
/// Nonce
pub nonce: U256,
/// Block hash. None when pending.
#[serde(rename = "blockHash")]
pub block_hash: Option<H256>,
/// Block number. None when pending.
#[serde(rename = "blockNumber")]
pub block_number: Option<U64>,
/// Transaction Index. None when pending.
#[serde(rename = "transactionIndex")]
pub transaction_index: Option<Index>,
/// Sender
pub from: Option<H160>,
/// Recipient (None when contract creation)
pub to: Option<H160>,
/// Transfered value
pub value: U256,
/// Gas Price
#[serde(rename = "gasPrice")]
pub gas_price: U256,
/// Gas amount
pub gas: U256,
/// Input data
pub input: Bytes,
/// ECDSA recovery id, set by Geth
pub v: Option<U64>,
/// ECDSA signature r, 32 bytes, set by Geth
pub r: Option<U256>,
/// ECDSA signature s, 32 bytes, set by Geth
pub s: Option<U256>,
}
#[cfg(test)]
mod tests {
use super::RawTransaction;
use super::Receipt;
use serde_json;
#[test]
fn test_deserialize_receipt() {
let receipt_str = "{\"blockHash\":\"0x83eaba432089a0bfe99e9fc9022d1cfcb78f95f407821be81737c84ae0b439c5\",\"blockNumber\":\"0x38\",\"contractAddress\":\"0x03d8c4566478a6e1bf75650248accce16a98509f\",\"cumulativeGasUsed\":\"0x927c0\",\"gasUsed\":\"0x927c0\",\"logs\":[],\"logsBloom\":\"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"root\":null,\"transactionHash\":\"0x422fb0d5953c0c48cbb42fb58e1c30f5e150441c68374d70ca7d4f191fd56f26\",\"transactionIndex\":\"0x0\"}";
let _receipt: Receipt = serde_json::from_str(receipt_str).unwrap();
}
#[test]
fn should_deserialize_receipt_with_status() {
let receipt_str = r#"{
"blockHash": "0x83eaba432089a0bfe99e9fc9022d1cfcb78f95f407821be81737c84ae0b439c5",
"blockNumber": "0x38",
"contractAddress": "0x03d8c4566478a6e1bf75650248accce16a98509f",
"cumulativeGasUsed": "0x927c0",
"gasUsed": "0x927c0",
"logs": [],
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"root": null,
"transactionHash": "0x422fb0d5953c0c48cbb42fb58e1c30f5e150441c68374d70ca7d4f191fd56f26",
"transactionIndex": "0x0",
"status": "0x1"
}"#;
let _receipt: Receipt = serde_json::from_str(receipt_str).unwrap();
}
#[test]
fn should_deserialize_receipt_without_gas() {
let receipt_str = r#"{
"blockHash": "0x83eaba432089a0bfe99e9fc9022d1cfcb78f95f407821be81737c84ae0b439c5",
"blockNumber": "0x38",
"contractAddress": "0x03d8c4566478a6e1bf75650248accce16a98509f",
"cumulativeGasUsed": "0x927c0",
"gasUsed": null,
"logs": [],
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"root": null,
"transactionHash": "0x422fb0d5953c0c48cbb42fb58e1c30f5e150441c68374d70ca7d4f191fd56f26",
"transactionIndex": "0x0",
"status": "0x1"
}"#;
let _receipt: Receipt = serde_json::from_str(receipt_str).unwrap();
}
#[test]
fn test_deserialize_signed_tx_parity() {
// taken from RPC docs.
let tx_str = r#"{
"raw": "0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675",
"tx": {
"hash": "0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b",
"nonce": "0x0",
"blockHash": "0xbeab0aa2411b7ab17f30a99d3cb9c6ef2fc5426d6ad6fd9e2a26a6aed1d1055b",
"blockNumber": "0x15df",
"transactionIndex": "0x1",
"from": "0x407d73d8a49eeb85d32cf465507dd71d507100c1",
"to": "0x853f43d8a49eeb85d32cf465507dd71d507100c1",
"value": "0x7f110",
"gas": "0x7f110",
"gasPrice": "0x09184e72a000",
"input": "0x603880600c6000396000f300603880600c6000396000f3603880600c6000396000f360",
"s": "0x777"
}
}"#;
let _tx: RawTransaction = serde_json::from_str(tx_str).unwrap();
}
#[test]
fn test_deserialize_signed_tx_geth() {
let tx_str = r#"{
"raw": "0xf85d01018094f3b3138e5eb1c75b43994d1bb760e2f9f735789680801ca06484d00575e961a7db35ebe5badaaca5cb7ee65d1f2f22f22da87c238b99d30da07a85d65797e4b555c1d3f64beebb2cb6f16a6fbd40c43cc48451eaf85305f66e",
"tx": {
"gas": "0x0",
"gasPrice": "0x1",
"hash": "0x0a32fb4e18bc6f7266a164579237b1b5c74271d453c04eab70444ca367d38418",
"input": "0x",
"nonce": "0x1",
"to": "0xf3b3138e5eb1c75b43994d1bb760e2f9f7357896",
"r": "0x6484d00575e961a7db35ebe5badaaca5cb7ee65d1f2f22f22da87c238b99d30d",
"s": "0x7a85d65797e4b555c1d3f64beebb2cb6f16a6fbd40c43cc48451eaf85305f66e",
"v": "0x1c",
"value": "0x0"
}
}"#;
let _tx: RawTransaction = serde_json::from_str(tx_str).unwrap();
}
}
| 42.548387 | 944 | 0.708004 |
f7a33d1bac5b9aacd439f255ca67735e5760833f
| 686 |
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR MIT
// kani-verify-fail
// Cast a concrete ref to a trait raw pointer.
pub trait Subscriber {
fn process(&self) -> u32;
}
struct DummySubscriber {
val: u32,
}
impl DummySubscriber {
fn new() -> Self {
DummySubscriber { val: 0 }
}
}
impl Subscriber for DummySubscriber {
fn process(&self) -> u32 {
let DummySubscriber { val: v } = self;
*v + 1
}
}
fn main() {
let _d = DummySubscriber::new();
let _s = &_d as *const dyn Subscriber;
assert!(unsafe { _s.as_ref().unwrap().process() } == 3); // Should be == 1
}
| 20.787879 | 78 | 0.609329 |
e83f43f43dfe7199a493f219719d7463e6914724
| 1,115 |
use {
super::TranslateError,
crate::{ast::DataType, result::Result},
sqlparser::ast::DataType as SqlDataType,
};
pub fn translate_data_type(sql_data_type: &SqlDataType) -> Result<DataType> {
match sql_data_type {
SqlDataType::Boolean => Ok(DataType::Boolean),
SqlDataType::Int(_) => Ok(DataType::Int),
SqlDataType::Float(_) => Ok(DataType::Float),
SqlDataType::Text => Ok(DataType::Text),
SqlDataType::Date => Ok(DataType::Date),
SqlDataType::Timestamp => Ok(DataType::Timestamp),
SqlDataType::Time => Ok(DataType::Time),
SqlDataType::Interval => Ok(DataType::Interval),
SqlDataType::Uuid => Ok(DataType::UUID),
SqlDataType::Custom(name) => {
let name = name.0.get(0).map(|v| v.value.to_uppercase());
match name.as_deref() {
Some("MAP") => Ok(DataType::Map),
_ => Err(TranslateError::UnsupportedDataType(sql_data_type.to_string()).into()),
}
}
_ => Err(TranslateError::UnsupportedDataType(sql_data_type.to_string()).into()),
}
}
| 38.448276 | 96 | 0.602691 |
62da823cdc246df47717e00be9469120897851b4
| 11,670 |
use crate::activations::Activations;
use crate::connection::Connection;
use crate::history::History;
use crate::hyper_tensor::HyperTensor;
use crate::node::Node;
use crate::settings::HyperSettings;
use crate::settings::Settings;
use rand::prelude::*;
use rand::seq::SliceRandom;
use rand::thread_rng;
use std::clone::Clone;
use std::collections::HashMap;
use std::fmt;
use std::vec::Vec;
// Main Genome Class
pub struct Genome {
inputs: u32, // Number of Inputs
outputs: u32, // Number of Outputs
pub nodes: Vec<Node>, // Vector of Nodes
pub conns: Vec<Connection>, // Vector of Connections
pub fitness: f64, // Fitness of this Genome
}
impl fmt::Debug for Genome {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut res = String::from("Genome {\n Nodes {");
for node in &self.nodes {
res += &format!("\n {:?},", node);
}
res += "\n }\n Conn {";
for conn in &self.conns {
res += &format!("\n {:?},", conn);
}
res += "\n }\n}";
write!(f, "{}", res)
}
}
impl Genome {
pub fn new(inputs: u32, outputs: u32, crossover: bool) -> Self {
let mut genome = Self {
inputs,
outputs,
nodes: Vec::with_capacity((inputs + outputs + 1) as usize),
conns: Vec::with_capacity(((inputs + 1) * outputs) as usize),
fitness: 0.,
};
if crossover {
return genome;
}
let mut rng = thread_rng();
let mut dy = 1. / (inputs + 1) as f64;
let mut dy_curr = dy;
for i in 1..=(inputs + 1) {
genome
.nodes
.push(Node::new(i, 0., dy_curr, rng.gen::<Activations>()));
dy_curr += dy;
}
dy = 1. / (outputs + 1) as f64;
dy_curr = dy;
for i in (inputs + 2)..(inputs + outputs + 2) {
genome
.nodes
.push(Node::new(i, 1., dy_curr, rng.gen::<Activations>()));
dy_curr += dy;
}
let mut ctr = 1;
for i in 0..(inputs + 1) as usize {
let from = genome.nodes[i].innov;
for o in (inputs + 1) as usize..genome.nodes.len() {
let to = genome.nodes[o].innov;
genome
.conns
.push(Connection::new(ctr, from, to, rng.gen::<f64>(), true));
ctr += 1;
}
}
genome
}
pub fn add_fitness(&mut self, fit: f64) {
let fitness = self.fitness + fit;
self.fitness = if fitness < 0. { 0. } else { fitness };
}
pub fn feed_forward(&self, input: &Vec<f64>) -> Result<f64, &'static str> {
if input.len() != self.inputs as usize {
return Err("Provided input size doesn't match Genome input size");
}
let mut node_vals = HashMap::<u32, f64>::new();
let mut i = 1;
for val in input {
node_vals.insert(i, *val);
i += 1;
}
node_vals.insert(self.inputs + 1, 1.);
for node in self.nodes.iter() {
let from_val = match node_vals.get(&node.innov) {
Some(v) => *v,
None => return Err("No val"),
};
let feed_forward_val = node.activate(from_val);
for conn in self.conns.iter().filter(|&c| c.from == node.innov) {
let to_val = node_vals.entry(conn.to).or_insert(0.);
if !conn.enabled {
continue;
}
*to_val += feed_forward_val * conn.weight;
}
}
Ok(((1. / (1. + (*node_vals.get(&(self.inputs + 2)).unwrap() * -4.9).exp())) - 0.5) * 2.)
}
pub fn hyper_feed_forward<T>(
&self,
input: HyperTensor,
sets: &HyperSettings,
third_param_fn: Option<T>,
) -> Result<Vec<Vec<f64>>, &'static str>
where
T: Fn(f64, f64) -> f64,
{
if self.inputs == 4 && third_param_fn.is_some() {
return Err("Didn't expect a Third Parameter Function");
} else if self.inputs == 6 && third_param_fn.is_none() {
return Err("Expected a Third Parameter Function. Got none");
}
let dy = (input.m - 1.) / 2.;
let dx = (input.n - 1.) / 2.;
let x_dir = (0..input.n as u64)
.map(|v| -1.0 + v as f64 * dx)
.collect::<Vec<f64>>();
let y_dir = (0..input.m as u64)
.map(|v| -1.0 + v as f64 * dy)
.collect::<Vec<f64>>();
let mut res = HyperTensor::zeros(input.m as usize, input.n as usize).unwrap();
if self.inputs == 6 {
let tpfn = third_param_fn.unwrap();
for (&y1, row_inp) in y_dir.iter().zip(input.values.iter()) {
for (&x1, val_inp) in x_dir.iter().zip(row_inp.iter()) {
for (&y2, row_out) in y_dir.iter().zip(res.values.iter_mut()) {
for (&x2, val_out) in x_dir.iter().zip(row_out.iter_mut()) {
let inp = vec![x1, y1, tpfn(x1, y1), x2, y2, tpfn(x2, y2)];
let cppn_out = self.feed_forward(&inp).unwrap();
*val_out += sets.scaled_weight(cppn_out) * val_inp;
}
}
}
}
} else {
for (&y1, row_inp) in y_dir.iter().zip(input.values.iter()) {
for (&x1, val_inp) in x_dir.iter().zip(row_inp.iter()) {
for (&y2, row_out) in y_dir.iter().zip(res.values.iter_mut()) {
for (&x2, val_out) in x_dir.iter().zip(row_out.iter_mut()) {
let inp = vec![x1, y1, x2, y2];
let cppn_out = self.feed_forward(&inp).unwrap();
*val_out += sets.scaled_weight(cppn_out) * val_inp;
}
}
}
}
}
Ok(res.values)
}
pub fn mutate(&mut self, hist: &mut History, sets: &Settings) {
let mut rng = thread_rng();
self.conns.iter_mut().for_each(|c| {
if rng.gen::<f64>() < sets.wt_mut_rate {
c.mutate_weight(sets);
}
});
if rng.gen::<f64>() < sets.conn_mut_rate {
self.add_conn(hist);
}
if rng.gen::<f64>() < sets.node_mut_rate {
self.add_node(hist);
}
self.conns.sort_unstable_by(|a, b| a.innov.cmp(&b.innov));
}
fn add_conn(&mut self, hist: &mut History) {
let mut rng = thread_rng();
let from_node_pool = self
.nodes
.iter()
.filter(|node| {
if node.x == 1. {
return false;
}
let to_nodes = self
.nodes
.iter()
.filter(|n| {
n.x > node.x
&& self
.conns
.iter()
.find(|c| c.from == node.innov && c.to == n.innov)
.is_none()
})
.collect::<Vec<&Node>>();
to_nodes.len() > 0
})
.collect::<Vec<&Node>>();
if from_node_pool.len() == 0 {
return;
}
let from_node = from_node_pool.choose(&mut rng).unwrap();
let to_node_pool = self
.nodes
.iter()
.filter(|n| {
if n.x <= from_node.x {
return false;
}
self.conns
.iter()
.find(|c| c.from == from_node.innov && c.to == n.innov)
.is_none()
})
.collect::<Vec<&Node>>();
let to_node = to_node_pool.choose(&mut rng).unwrap();
let innov = hist.mutate_conn(from_node, to_node);
let new_conn = Connection::new(
innov,
from_node.innov,
to_node.innov,
rng.gen::<f64>(),
true,
);
self.conns.push(new_conn);
}
fn add_node(&mut self, hist: &mut History) {
let mut rng = thread_rng();
let conn_to_mutate = self.conns.iter_mut().choose(&mut rng).unwrap();
let details = hist.mutate_node(&conn_to_mutate);
let from_node = self
.nodes
.iter()
.find(|n| n.innov == conn_to_mutate.from)
.unwrap();
let to_node = self
.nodes
.iter()
.find(|n| n.innov == conn_to_mutate.to)
.unwrap();
let x = (from_node.x + to_node.x) / 2.;
let y = (from_node.y + to_node.y) / 2.;
let new_node = Node::new(details.node, x, y, rand::random::<Activations>());
let in_conn = Connection::new(details.in_conn, from_node.innov, new_node.innov, 1., true);
let out_conn = Connection::new(
details.out_conn,
new_node.innov,
to_node.innov,
conn_to_mutate.weight,
true,
);
conn_to_mutate.disable();
self.nodes.push(new_node);
self.conns.push(in_conn);
self.conns.push(out_conn);
self.nodes
.sort_unstable_by(|a, b| a.x.partial_cmp(&b.x).unwrap());
}
pub fn crossover(parent1: &Self, parent2: &Self, sets: &Settings) -> Self {
let (male, female) = if parent1.fitness >= parent2.fitness {
(parent1, parent2)
} else {
(parent2, parent1)
};
let mut offspring_genes = Vec::<Connection>::with_capacity(male.conns.len());
let mut rng = thread_rng();
let mut f_genes = HashMap::<u32, &Connection>::new();
female.conns.iter().for_each(|c| {
f_genes.insert(c.innov, c);
});
for conn in &male.conns {
if f_genes.contains_key(&conn.innov) {
let f_gene = *f_genes.get(&conn.innov).unwrap();
let mut gene = if rng.gen::<f64>() < 0.5 {
f_gene.clone()
} else {
conn.clone()
};
let m_e = conn.enabled;
let f_e = f_gene.enabled;
if (!f_e && m_e) || (!m_e && f_e) {
if rng.gen::<f64>() < sets.off_gene_on_rate {
gene.enable();
} else {
gene.disable();
}
} else if !f_e && !m_e {
if rng.gen::<f64>() < sets.off_in_both_on_rate {
gene.enable();
} else {
gene.disable();
}
}
offspring_genes.push(gene);
} else {
offspring_genes.push(conn.clone());
}
}
let mut offspring = Self::new(male.inputs, male.outputs, true);
offspring.conns = offspring_genes;
offspring.nodes = male.nodes.clone();
offspring
}
}
impl Clone for Genome {
fn clone(&self) -> Self {
Self {
inputs: self.inputs,
outputs: self.outputs,
nodes: self.nodes.clone(),
conns: self.conns.clone(),
fitness: self.fitness,
}
}
}
| 29.469697 | 98 | 0.453642 |
56c4baadf22b4bb3d9643fd146fd587cb48a9a22
| 48 |
// ignore-test
#![cfg_attr(all(), cfg(FALSE))]
| 12 | 31 | 0.604167 |
878574fc4fa8732bd91ea37f7440c673fbaf2fae
| 1,590 |
//Base
use bevy::prelude::*;
use bevy_config_cam::*;
fn main() {
App::new()
.insert_resource(Msaa { samples: 4 })
.add_plugins(DefaultPlugins)
.add_plugin(ConfigCam)
.insert_resource(MovementSettings {
sensitivity: 0.00015, // default: 0.00012
speed: 12.0, // default: 12.0
..Default::default()
})
.insert_resource(PlayerSettings {
pos: Vec3::new(2., 0., 0.),
player_asset: "models/craft_speederA.glb#Scene0",
..Default::default()
})
.add_startup_system(setup)
.run();
}
/// set up a simple 3D scene
fn setup(
mut commands: Commands,
mut meshes: ResMut<Assets<Mesh>>,
mut materials: ResMut<Assets<StandardMaterial>>,
mut cl: ResMut<CamLogic>,
) {
// plane
commands.spawn_bundle(PbrBundle {
mesh: meshes.add(Mesh::from(shape::Plane { size: 5.0 })),
material: materials.add(Color::rgb(0.3, 0.5, 0.3).into()),
..Default::default()
});
// cube, set as target
cl.target = Some(
commands
.spawn_bundle(PbrBundle {
mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })),
material: materials.add(Color::rgb(0.8, 0.7, 0.6).into()),
transform: Transform::from_xyz(0.0, 0.5, 0.0),
..Default::default()
})
.id(),
);
// light
commands.spawn_bundle(PointLightBundle {
transform: Transform::from_xyz(4.0, 8.0, 4.0),
..Default::default()
});
}
| 28.392857 | 74 | 0.535849 |
6a3f6e23fc9a90b423fab6ada7407735756313e4
| 106 |
/// Routes accessible without user login.
pub mod primary;
/// Routes requiring user login.
pub mod users;
| 26.5 | 41 | 0.754717 |
bfa0fd4e525b92604a201432a5716c35c95d7432
| 710 |
#![no_main]
#[macro_use]
extern crate libfuzzer_sys;
extern crate biscuit;
extern crate serde_json;
use biscuit::{Empty, JWE};
use biscuit::jwk::JWK;
use biscuit::jwa::{KeyManagementAlgorithm, ContentEncryptionAlgorithm};
fuzz_target!(|data: &[u8]| {
let key: JWK<Empty> = JWK::new_octect_key(&vec![0; 256 / 8], Default::default());
let token = std::str::from_utf8(data);
if token.is_err() {
return;
}
let token = token.unwrap();
let token: JWE<serde_json::Value, biscuit::Empty, biscuit::Empty> = JWE::new_encrypted(&token);
let _ = token.into_decrypted(
&key,
KeyManagementAlgorithm::A256GCMKW,
ContentEncryptionAlgorithm::A256GCM,
);
});
| 25.357143 | 99 | 0.661972 |
6753c0c7a59020af619ecf5a3874062e15d7c2ae
| 10,526 |
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub fn serialize_structure_crate_input_add_tags_to_resource_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::AddTagsToResourceInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_1) = &input.resource_arn {
object.key("ResourceArn").string(var_1.as_str());
}
if let Some(var_2) = &input.tag_list {
let mut array_3 = object.key("TagList").start_array();
for item_4 in var_2 {
{
let mut object_5 = array_3.value().start_object();
crate::json_ser::serialize_structure_crate_model_tag(&mut object_5, item_4)?;
object_5.finish();
}
}
array_3.finish();
}
Ok(())
}
pub fn serialize_structure_crate_input_create_hapg_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::CreateHapgInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_6) = &input.label {
object.key("Label").string(var_6.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_create_hsm_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::CreateHsmInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_7) = &input.subnet_id {
object.key("SubnetId").string(var_7.as_str());
}
if let Some(var_8) = &input.ssh_key {
object.key("SshKey").string(var_8.as_str());
}
if let Some(var_9) = &input.eni_ip {
object.key("EniIp").string(var_9.as_str());
}
if let Some(var_10) = &input.iam_role_arn {
object.key("IamRoleArn").string(var_10.as_str());
}
if let Some(var_11) = &input.external_id {
object.key("ExternalId").string(var_11.as_str());
}
if let Some(var_12) = &input.subscription_type {
object.key("SubscriptionType").string(var_12.as_str());
}
if let Some(var_13) = &input.client_token {
object.key("ClientToken").string(var_13.as_str());
}
if let Some(var_14) = &input.syslog_ip {
object.key("SyslogIp").string(var_14.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_create_luna_client_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::CreateLunaClientInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_15) = &input.label {
object.key("Label").string(var_15.as_str());
}
if let Some(var_16) = &input.certificate {
object.key("Certificate").string(var_16.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_delete_hapg_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::DeleteHapgInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_17) = &input.hapg_arn {
object.key("HapgArn").string(var_17.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_delete_hsm_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::DeleteHsmInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_18) = &input.hsm_arn {
object.key("HsmArn").string(var_18.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_delete_luna_client_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::DeleteLunaClientInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_19) = &input.client_arn {
object.key("ClientArn").string(var_19.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_describe_hapg_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::DescribeHapgInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_20) = &input.hapg_arn {
object.key("HapgArn").string(var_20.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_describe_hsm_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::DescribeHsmInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_21) = &input.hsm_arn {
object.key("HsmArn").string(var_21.as_str());
}
if let Some(var_22) = &input.hsm_serial_number {
object.key("HsmSerialNumber").string(var_22.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_describe_luna_client_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::DescribeLunaClientInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_23) = &input.client_arn {
object.key("ClientArn").string(var_23.as_str());
}
if let Some(var_24) = &input.certificate_fingerprint {
object.key("CertificateFingerprint").string(var_24.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_get_config_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::GetConfigInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_25) = &input.client_arn {
object.key("ClientArn").string(var_25.as_str());
}
if let Some(var_26) = &input.client_version {
object.key("ClientVersion").string(var_26.as_str());
}
if let Some(var_27) = &input.hapg_list {
let mut array_28 = object.key("HapgList").start_array();
for item_29 in var_27 {
{
array_28.value().string(item_29.as_str());
}
}
array_28.finish();
}
Ok(())
}
pub fn serialize_structure_crate_input_list_hapgs_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::ListHapgsInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_30) = &input.next_token {
object.key("NextToken").string(var_30.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_list_hsms_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::ListHsmsInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_31) = &input.next_token {
object.key("NextToken").string(var_31.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_list_luna_clients_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::ListLunaClientsInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_32) = &input.next_token {
object.key("NextToken").string(var_32.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_list_tags_for_resource_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::ListTagsForResourceInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_33) = &input.resource_arn {
object.key("ResourceArn").string(var_33.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_modify_hapg_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::ModifyHapgInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_34) = &input.hapg_arn {
object.key("HapgArn").string(var_34.as_str());
}
if let Some(var_35) = &input.label {
object.key("Label").string(var_35.as_str());
}
if let Some(var_36) = &input.partition_serial_list {
let mut array_37 = object.key("PartitionSerialList").start_array();
for item_38 in var_36 {
{
array_37.value().string(item_38.as_str());
}
}
array_37.finish();
}
Ok(())
}
pub fn serialize_structure_crate_input_modify_hsm_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::ModifyHsmInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_39) = &input.hsm_arn {
object.key("HsmArn").string(var_39.as_str());
}
if let Some(var_40) = &input.subnet_id {
object.key("SubnetId").string(var_40.as_str());
}
if let Some(var_41) = &input.eni_ip {
object.key("EniIp").string(var_41.as_str());
}
if let Some(var_42) = &input.iam_role_arn {
object.key("IamRoleArn").string(var_42.as_str());
}
if let Some(var_43) = &input.external_id {
object.key("ExternalId").string(var_43.as_str());
}
if let Some(var_44) = &input.syslog_ip {
object.key("SyslogIp").string(var_44.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_modify_luna_client_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::ModifyLunaClientInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_45) = &input.client_arn {
object.key("ClientArn").string(var_45.as_str());
}
if let Some(var_46) = &input.certificate {
object.key("Certificate").string(var_46.as_str());
}
Ok(())
}
pub fn serialize_structure_crate_input_remove_tags_from_resource_input(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::input::RemoveTagsFromResourceInput,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_47) = &input.resource_arn {
object.key("ResourceArn").string(var_47.as_str());
}
if let Some(var_48) = &input.tag_key_list {
let mut array_49 = object.key("TagKeyList").start_array();
for item_50 in var_48 {
{
array_49.value().string(item_50.as_str());
}
}
array_49.finish();
}
Ok(())
}
pub fn serialize_structure_crate_model_tag(
object: &mut aws_smithy_json::serialize::JsonObjectWriter,
input: &crate::model::Tag,
) -> Result<(), aws_smithy_http::operation::SerializationError> {
if let Some(var_51) = &input.key {
object.key("Key").string(var_51.as_str());
}
if let Some(var_52) = &input.value {
object.key("Value").string(var_52.as_str());
}
Ok(())
}
| 35.560811 | 93 | 0.66616 |
eb2f78416ad0546bf9898ee87a29392baad6ee00
| 16,749 |
//! Traits and structs for implementing circuit components.
use std::{convert::TryInto, fmt, marker::PhantomData};
use ff::Field;
use crate::{
arithmetic::FieldExt,
plonk::{Advice, Any, Assigned, Column, Error, Fixed, Instance, Selector, TableColumn},
};
pub mod floor_planner;
pub use floor_planner::single_pass::SimpleFloorPlanner;
pub mod layouter;
/// A chip implements a set of instructions that can be used by gadgets.
///
/// The chip stores state that is required at circuit synthesis time in
/// [`Chip::Config`], which can be fetched via [`Chip::config`].
///
/// The chip also loads any fixed configuration needed at synthesis time
/// using its own implementation of `load`, and stores it in [`Chip::Loaded`].
/// This can be accessed via [`Chip::loaded`].
pub trait Chip<F: FieldExt>: Sized {
/// A type that holds the configuration for this chip, and any other state it may need
/// during circuit synthesis, that can be derived during [`Circuit::configure`].
///
/// [`Circuit::configure`]: crate::plonk::Circuit::configure
type Config: fmt::Debug + Clone;
/// A type that holds any general chip state that needs to be loaded at the start of
/// [`Circuit::synthesize`]. This might simply be `()` for some chips.
///
/// [`Circuit::synthesize`]: crate::plonk::Circuit::synthesize
type Loaded: fmt::Debug + Clone;
/// The chip holds its own configuration.
fn config(&self) -> &Self::Config;
/// Provides access to general chip state loaded at the beginning of circuit
/// synthesis.
///
/// Panics if called before `Chip::load`.
fn loaded(&self) -> &Self::Loaded;
}
/// Index of a region in a layouter
#[derive(Clone, Copy, Debug)]
pub struct RegionIndex(usize);
impl From<usize> for RegionIndex {
fn from(idx: usize) -> RegionIndex {
RegionIndex(idx)
}
}
impl std::ops::Deref for RegionIndex {
type Target = usize;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// Starting row of a region in a layouter
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct RegionStart(usize);
impl From<usize> for RegionStart {
fn from(idx: usize) -> RegionStart {
RegionStart(idx)
}
}
impl std::ops::Deref for RegionStart {
type Target = usize;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// A pointer to a cell within a circuit.
#[derive(Clone, Copy, Debug)]
pub struct Cell {
/// Identifies the region in which this cell resides.
region_index: RegionIndex,
/// The relative offset of this cell within its region.
row_offset: usize,
/// The column of this cell.
column: Column<Any>,
}
/// An assigned cell.
#[derive(Clone, Debug)]
pub struct AssignedCell<V, F: Field> {
value: Option<V>,
cell: Cell,
_marker: PhantomData<F>,
}
impl<V, F: Field> AssignedCell<V, F> {
/// Returns the value of the [`AssignedCell`].
pub fn value(&self) -> Option<&V> {
self.value.as_ref()
}
/// Returns the cell.
pub fn cell(&self) -> Cell {
self.cell
}
}
impl<V, F: Field> AssignedCell<V, F>
where
for<'v> Assigned<F>: From<&'v V>,
{
/// Returns the field element value of the [`AssignedCell`].
pub fn value_field(&self) -> Option<Assigned<F>> {
self.value().map(|v| v.into())
}
}
impl<F: Field> AssignedCell<Assigned<F>, F> {
/// Evaluates this assigned cell's value directly, performing an unbatched inversion
/// if necessary.
///
/// If the denominator is zero, the returned cell's value is zero.
pub fn evaluate(self) -> AssignedCell<F, F> {
AssignedCell {
value: self.value.map(|v| v.evaluate()),
cell: self.cell,
_marker: Default::default(),
}
}
}
impl<V: Clone, F: Field> AssignedCell<V, F>
where
for<'v> Assigned<F>: From<&'v V>,
{
/// Copies the value to a given advice cell and constrains them to be equal.
///
/// Returns an error if either this cell or the given cell are in columns
/// where equality has not been enabled.
pub fn copy_advice<A, AR>(
&self,
annotation: A,
region: &mut Region<'_, F>,
column: Column<Advice>,
offset: usize,
) -> Result<Self, Error>
where
A: Fn() -> AR,
AR: Into<String>,
{
let assigned_cell = region.assign_advice(annotation, column, offset, || {
self.value.clone().ok_or(Error::Synthesis)
})?;
region.constrain_equal(assigned_cell.cell(), self.cell())?;
Ok(assigned_cell)
}
}
/// A region of the circuit in which a [`Chip`] can assign cells.
///
/// Inside a region, the chip may freely use relative offsets; the [`Layouter`] will
/// treat these assignments as a single "region" within the circuit.
///
/// The [`Layouter`] is allowed to optimise between regions as it sees fit. Chips must use
/// [`Region::constrain_equal`] to copy in variables assigned in other regions.
///
/// TODO: It would be great if we could constrain the columns in these types to be
/// "logical" columns that are guaranteed to correspond to the chip (and have come from
/// `Chip::Config`).
#[derive(Debug)]
pub struct Region<'r, F: Field> {
region: &'r mut dyn layouter::RegionLayouter<F>,
}
impl<'r, F: Field> From<&'r mut dyn layouter::RegionLayouter<F>> for Region<'r, F> {
fn from(region: &'r mut dyn layouter::RegionLayouter<F>) -> Self {
Region { region }
}
}
impl<'r, F: Field> Region<'r, F> {
/// Enables a selector at the given offset.
pub(crate) fn enable_selector<A, AR>(
&mut self,
annotation: A,
selector: &Selector,
offset: usize,
) -> Result<(), Error>
where
A: Fn() -> AR,
AR: Into<String>,
{
self.region
.enable_selector(&|| annotation().into(), selector, offset)
}
/// Assign an advice column value (witness).
///
/// Even though `to` has `FnMut` bounds, it is guaranteed to be called at most once.
pub fn assign_advice<'v, V, VR, A, AR>(
&'v mut self,
annotation: A,
column: Column<Advice>,
offset: usize,
mut to: V,
) -> Result<AssignedCell<VR, F>, Error>
where
V: FnMut() -> Result<VR, Error> + 'v,
for<'vr> Assigned<F>: From<&'vr VR>,
A: Fn() -> AR,
AR: Into<String>,
{
let mut value = None;
let cell =
self.region
.assign_advice(&|| annotation().into(), column, offset, &mut || {
let v = to()?;
let value_f = (&v).into();
value = Some(v);
Ok(value_f)
})?;
Ok(AssignedCell {
value,
cell,
_marker: PhantomData,
})
}
/// Assigns a constant value to the column `advice` at `offset` within this region.
///
/// The constant value will be assigned to a cell within one of the fixed columns
/// configured via `ConstraintSystem::enable_constant`.
///
/// Returns the advice cell.
pub fn assign_advice_from_constant<VR, A, AR>(
&mut self,
annotation: A,
column: Column<Advice>,
offset: usize,
constant: VR,
) -> Result<AssignedCell<VR, F>, Error>
where
for<'vr> Assigned<F>: From<&'vr VR>,
A: Fn() -> AR,
AR: Into<String>,
{
let cell = self.region.assign_advice_from_constant(
&|| annotation().into(),
column,
offset,
(&constant).into(),
)?;
Ok(AssignedCell {
value: Some(constant),
cell,
_marker: PhantomData,
})
}
/// Assign the value of the instance column's cell at absolute location
/// `row` to the column `advice` at `offset` within this region.
///
/// Returns the advice cell, and its value if known.
pub fn assign_advice_from_instance<A, AR>(
&mut self,
annotation: A,
instance: Column<Instance>,
row: usize,
advice: Column<Advice>,
offset: usize,
) -> Result<AssignedCell<F, F>, Error>
where
A: Fn() -> AR,
AR: Into<String>,
{
let (cell, value) = self.region.assign_advice_from_instance(
&|| annotation().into(),
instance,
row,
advice,
offset,
)?;
Ok(AssignedCell {
value,
cell,
_marker: PhantomData,
})
}
/// Assign a fixed value.
///
/// Even though `to` has `FnMut` bounds, it is guaranteed to be called at most once.
pub fn assign_fixed<'v, V, VR, A, AR>(
&'v mut self,
annotation: A,
column: Column<Fixed>,
offset: usize,
mut to: V,
) -> Result<AssignedCell<VR, F>, Error>
where
V: FnMut() -> Result<VR, Error> + 'v,
for<'vr> Assigned<F>: From<&'vr VR>,
A: Fn() -> AR,
AR: Into<String>,
{
let mut value = None;
let cell =
self.region
.assign_fixed(&|| annotation().into(), column, offset, &mut || {
let v = to()?;
let value_f = (&v).into();
value = Some(v);
Ok(value_f)
})?;
Ok(AssignedCell {
value,
cell,
_marker: PhantomData,
})
}
/// Constrains a cell to have a constant value.
///
/// Returns an error if the cell is in a column where equality has not been enabled.
pub fn constrain_constant<VR>(&mut self, cell: Cell, constant: VR) -> Result<(), Error>
where
VR: Into<Assigned<F>>,
{
self.region.constrain_constant(cell, constant.into())
}
/// Constrains two cells to have the same value.
///
/// Returns an error if either of the cells are in columns where equality
/// has not been enabled.
pub fn constrain_equal(&mut self, left: Cell, right: Cell) -> Result<(), Error> {
self.region.constrain_equal(left, right)
}
}
/// A lookup table in the circuit.
#[derive(Debug)]
pub struct Table<'r, F: Field> {
table: &'r mut dyn layouter::TableLayouter<F>,
}
impl<'r, F: Field> From<&'r mut dyn layouter::TableLayouter<F>> for Table<'r, F> {
fn from(table: &'r mut dyn layouter::TableLayouter<F>) -> Self {
Table { table }
}
}
impl<'r, F: Field> Table<'r, F> {
/// Assigns a fixed value to a table cell.
///
/// Returns an error if the table cell has already been assigned to.
///
/// Even though `to` has `FnMut` bounds, it is guaranteed to be called at most once.
pub fn assign_cell<'v, V, VR, A, AR>(
&'v mut self,
annotation: A,
column: TableColumn,
offset: usize,
mut to: V,
) -> Result<(), Error>
where
V: FnMut() -> Result<VR, Error> + 'v,
VR: Into<Assigned<F>>,
A: Fn() -> AR,
AR: Into<String>,
{
self.table
.assign_cell(&|| annotation().into(), column, offset, &mut || {
to().map(|v| v.into())
})
}
}
/// A layout strategy within a circuit. The layouter is chip-agnostic and applies its
/// strategy to the context and config it is given.
///
/// This abstracts over the circuit assignments, handling row indices etc.
///
pub trait Layouter<F: Field> {
/// Represents the type of the "root" of this layouter, so that nested namespaces
/// can minimize indirection.
type Root: Layouter<F>;
/// Assign a region of gates to an absolute row number.
///
/// Inside the closure, the chip may freely use relative offsets; the `Layouter` will
/// treat these assignments as a single "region" within the circuit. Outside this
/// closure, the `Layouter` is allowed to optimise as it sees fit.
///
/// ```ignore
/// fn assign_region(&mut self, || "region name", |region| {
/// let config = chip.config();
/// region.assign_advice(config.a, offset, || { Some(value)});
/// });
/// ```
fn assign_region<A, AR, N, NR>(&mut self, name: N, assignment: A) -> Result<AR, Error>
where
A: FnMut(Region<'_, F>) -> Result<AR, Error>,
N: Fn() -> NR,
NR: Into<String>;
/// Assign a table region to an absolute row number.
///
/// ```ignore
/// fn assign_table(&mut self, || "table name", |table| {
/// let config = chip.config();
/// table.assign_fixed(config.a, offset, || { Some(value)});
/// });
/// ```
fn assign_table<A, N, NR>(&mut self, name: N, assignment: A) -> Result<(), Error>
where
A: FnMut(Table<'_, F>) -> Result<(), Error>,
N: Fn() -> NR,
NR: Into<String>;
/// Constrains a [`Cell`] to equal an instance column's row value at an
/// absolute position.
fn constrain_instance(
&mut self,
cell: Cell,
column: Column<Instance>,
row: usize,
) -> Result<(), Error>;
/// Gets the "root" of this assignment, bypassing the namespacing.
///
/// Not intended for downstream consumption; use [`Layouter::namespace`] instead.
fn get_root(&mut self) -> &mut Self::Root;
/// Creates a new (sub)namespace and enters into it.
///
/// Not intended for downstream consumption; use [`Layouter::namespace`] instead.
fn push_namespace<NR, N>(&mut self, name_fn: N)
where
NR: Into<String>,
N: FnOnce() -> NR;
/// Exits out of the existing namespace.
///
/// Not intended for downstream consumption; use [`Layouter::namespace`] instead.
fn pop_namespace(&mut self, gadget_name: Option<String>);
/// Enters into a namespace.
fn namespace<NR, N>(&mut self, name_fn: N) -> NamespacedLayouter<'_, F, Self::Root>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.get_root().push_namespace(name_fn);
NamespacedLayouter(self.get_root(), PhantomData)
}
}
/// This is a "namespaced" layouter which borrows a `Layouter` (pushing a namespace
/// context) and, when dropped, pops out of the namespace context.
#[derive(Debug)]
pub struct NamespacedLayouter<'a, F: Field, L: Layouter<F> + 'a>(&'a mut L, PhantomData<F>);
impl<'a, F: Field, L: Layouter<F> + 'a> Layouter<F> for NamespacedLayouter<'a, F, L> {
type Root = L::Root;
fn assign_region<A, AR, N, NR>(&mut self, name: N, assignment: A) -> Result<AR, Error>
where
A: FnMut(Region<'_, F>) -> Result<AR, Error>,
N: Fn() -> NR,
NR: Into<String>,
{
self.0.assign_region(name, assignment)
}
fn assign_table<A, N, NR>(&mut self, name: N, assignment: A) -> Result<(), Error>
where
A: FnMut(Table<'_, F>) -> Result<(), Error>,
N: Fn() -> NR,
NR: Into<String>,
{
self.0.assign_table(name, assignment)
}
fn constrain_instance(
&mut self,
cell: Cell,
column: Column<Instance>,
row: usize,
) -> Result<(), Error> {
self.0.constrain_instance(cell, column, row)
}
fn get_root(&mut self) -> &mut Self::Root {
self.0.get_root()
}
fn push_namespace<NR, N>(&mut self, _name_fn: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
panic!("Only the root's push_namespace should be called");
}
fn pop_namespace(&mut self, _gadget_name: Option<String>) {
panic!("Only the root's pop_namespace should be called");
}
}
impl<'a, F: Field, L: Layouter<F> + 'a> Drop for NamespacedLayouter<'a, F, L> {
fn drop(&mut self) {
let gadget_name = {
#[cfg(feature = "gadget-traces")]
{
let mut gadget_name = None;
let mut is_second_frame = false;
backtrace::trace(|frame| {
if is_second_frame {
// Resolve this instruction pointer to a symbol name.
backtrace::resolve_frame(frame, |symbol| {
gadget_name = symbol.name().map(|name| format!("{:#}", name));
});
// We are done!
false
} else {
// We want the next frame.
is_second_frame = true;
true
}
});
gadget_name
}
#[cfg(not(feature = "gadget-traces"))]
None
};
self.get_root().pop_namespace(gadget_name);
}
}
| 30.124101 | 92 | 0.561944 |
f7ae47d2e5e56efac5e5707f4cc1c47ef43b2d2e
| 1,860 |
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Some stuff used by rustc that doesn't have many dependencies
//!
//! Originally extracted from rustc::back, which was nominally the
//! compiler 'backend', though LLVM is rustc's backend, so rustc_back
//! is really just odds-and-ends relating to code gen and linking.
//! This crate mostly exists to make rustc smaller, so we might put
//! more 'stuff' here in the future. It does not have a dependency on
//! rustc_llvm.
//!
//! FIXME: Split this into two crates: one that has deps on syntax, and
//! one that doesn't; the one that doesn't might get decent parallel
//! build speedups.
#![crate_name = "rustc_back"]
#![unstable(feature = "rustc_private", issue = "27812")]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/")]
#![cfg_attr(not(stage0), deny(warnings))]
#![feature(box_syntax)]
#![feature(const_fn)]
#![feature(libc)]
#![feature(rand)]
#![feature(rustc_private)]
#![feature(staged_api)]
#![feature(step_by)]
#![feature(question_mark)]
#![cfg_attr(test, feature(test, rand))]
extern crate syntax;
extern crate libc;
extern crate serialize;
#[macro_use] extern crate log;
pub mod tempdir;
pub mod sha2;
pub mod target;
pub mod slice;
pub mod dynamic_lib;
| 35.09434 | 86 | 0.71828 |
69e3b9ed8bd9cc154f92906bf166b9bb7e175c0c
| 2,781 |
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::RATCH4VAL {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct VALR {
bits: u32,
}
impl VALR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _VALW<'a> {
w: &'a mut W,
}
impl<'a> _VALW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
const MASK: u32 = 4294967295;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:31 - 31:0\\] Capture/compare value. The system CPU can safely read this register, but it is recommended to use the CPE API commands to configure it for compare mode."]
#[inline]
pub fn val(&self) -> VALR {
let bits = {
const MASK: u32 = 4294967295;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u32
};
VALR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:31 - 31:0\\] Capture/compare value. The system CPU can safely read this register, but it is recommended to use the CPE API commands to configure it for compare mode."]
#[inline]
pub fn val(&mut self) -> _VALW {
_VALW { w: self }
}
}
| 26.235849 | 187 | 0.522114 |
e2bb4408b07d4531e1fd529b9a8066f29636ac02
| 1,695 |
use std::ops::{Add, Mul, Sub};
macro_rules! assert_equal_len {
// `tt`(token tree,标记树)指示符表示运算符和标记。
($a:ident, $b: ident, $func:ident, $op:tt) => (
assert!($a.len() == $b.len(),
"{:?}: dimension mismatch: {:?} {:?} {:?}",
stringify!($func),
($a.len(),),
stringify!($op),
($b.len(),));
)
}
macro_rules! op {
($func:ident, $bound:ident, $op:tt, $method:ident) => (
fn $func<T: $bound<T, Output=T> + Copy>(xs: &mut Vec<T>, ys: &Vec<T>) {
assert_equal_len!(xs, ys, $func, $op);
for (x, y) in xs.iter_mut().zip(ys.iter()) {
*x = $bound::$method(*x, *y);
// *x = x.$method(*y);
}
}
)
}
// 实现 `add_assign`、`mul_assign` 和 `sub_assign` 等函数。
op!(add_assign, Add, +=, add);
op!(mul_assign, Mul, *=, mul);
op!(sub_assign, Sub, -=, sub);
#[cfg(test)]
mod test {
use std::iter;
use std::mem::zeroed;
macro_rules! test {
($func: ident, $x:expr, $y:expr, $z:expr) => {
#[test]
fn $func() {
for size in 0usize..10 {
let mut x: Vec<_> = iter::repeat($x).take(size).collect();
let y: Vec<_> = iter::repeat($y).take(size).collect();
let z: Vec<_> = iter::repeat($z).take(size).collect();
super::$func(&mut x, &y);
assert_eq!(x, z);
}
}
}
}
// 测试 `add_assign`、`mul_assign` 和 `sub_assign`
test!(add_assign, 1u32, 2u32, 3u32);
test!(mul_assign, 2u32, 3u32, 6u32);
test!(sub_assign, 3u32, 2u32, 1u32);
}
| 28.728814 | 79 | 0.445428 |
11f9db775a7c3b6cbe80938b32844894c77e60ae
| 556 |
use docchi_core::structs::{ParamType, RustParam, Qv};
pub(crate) fn get_null(pt : ParamType) -> RustParam{
match pt {
ParamType::Bool => { RustParam::Bool(Qv::Null) }
ParamType::Int => { RustParam::Int(Qv::Null) }
ParamType::Float => { RustParam::Float(Qv::Null) }
ParamType::String => { RustParam::String(Qv::Null) }
ParamType::IntArray => { RustParam::IntArray(Qv::Null) }
ParamType::FloatArray => { RustParam::FloatArray(Qv::Null) }
ParamType::Binary => { RustParam::Binary(Qv::Null)}
}
}
| 42.769231 | 68 | 0.607914 |
f86af4f488509176dba9f8a8ecf1252ba1ca6fb1
| 8,773 |
use alloc::sync::Arc;
use core::marker::PhantomData;
use drone_cortexm::reg::prelude::*;
use drone_stm32_map::periph::gpio::{
head::GpioHeadMap,
pin::{GpioPinMap, GpioPinPeriph},
};
pub trait PinModeOrDontCare: Send + Sync + 'static {}
pub trait PinModeMap: PinModeOrDontCare {}
pub trait PinTypeOrDontCare: Send + Sync + 'static {}
pub trait PinTypeMap: PinTypeOrDontCare {}
pub trait PinPullMap: Send + Sync + 'static {}
pub trait PinAf: Send + Sync + 'static {
const NUM: u32;
}
/// Pin configuration.
pub struct GpioPin<
Pin: GpioPinMap,
Mode: PinModeOrDontCare,
Type: PinTypeOrDontCare,
Pull: PinPullMap,
> {
pub(crate) pin: Arc<GpioPinPeriph<Pin>>,
mode: PhantomData<Mode>,
type_: PhantomData<Type>,
pull: PhantomData<Pull>,
}
impl<
Pin: GpioPinMap,
Mode: PinModeOrDontCare,
Type: PinTypeOrDontCare,
Pull: PinPullMap,
>
From<Arc<GpioPinPeriph<Pin>>> for GpioPin<Pin, Mode, Type, Pull>
{
fn from(pin: Arc<GpioPinPeriph<Pin>>) -> Self {
Self {
pin,
mode: PhantomData,
type_: PhantomData,
pull: PhantomData,
}
}
}
/// Generic dont-care mode for undefined state.
pub struct DontCare;
impl PinModeOrDontCare for DontCare {}
impl PinTypeOrDontCare for DontCare {}
/// General purpose input mode (MODER=0b00).
pub struct InputMode;
impl PinModeMap for InputMode {}
impl PinModeOrDontCare for InputMode {}
/// General purpose output mode (MODER=0b01).
pub struct OutputMode;
impl PinModeMap for OutputMode {}
impl PinModeOrDontCare for OutputMode {}
/// Alternate function mode (MODER=0b10).
pub struct AlternateMode<Af: PinAf> {
af: PhantomData<Af>,
}
impl<Af: PinAf> PinModeMap for AlternateMode<Af> {}
impl<Af: PinAf> PinModeOrDontCare for AlternateMode<Af> {}
// TODO: Analog mode
/// Push/pull type (OTYPER=0).
/// This is only applicabale for OutputMode and AlternateMode.
pub struct PushPullType;
impl PinTypeMap for PushPullType {}
impl PinTypeOrDontCare for PushPullType {}
/// Output open-drain type (OTYPER=1).
/// This is only applicabale for OutputMode and AlternateMode.
pub struct OpenDrainType;
impl PinTypeMap for OpenDrainType {}
impl PinTypeOrDontCare for OpenDrainType {}
/// No pull-up nor pull-down. For inputs this means floating.
pub struct NoPull;
impl PinPullMap for NoPull {}
/// Pull up.
pub struct PullUp;
impl PinPullMap for PullUp {}
/// Pull down.
pub struct PullDown;
impl PinPullMap for PullDown {}
pub struct PinAf0;
pub struct PinAf1;
pub struct PinAf2;
pub struct PinAf3;
pub struct PinAf4;
pub struct PinAf5;
pub struct PinAf6;
pub struct PinAf7;
pub struct PinAf8;
pub struct PinAf9;
pub struct PinAf10;
pub struct PinAf11;
pub struct PinAf12;
pub struct PinAf13;
pub struct PinAf14;
pub struct PinAf15;
macro_rules! af_token {
($af:ident, $num:expr) => {
impl PinAf for $af {
const NUM: u32 = $num;
}
};
}
af_token!(PinAf0, 0);
af_token!(PinAf1, 1);
af_token!(PinAf2, 2);
af_token!(PinAf3, 3);
af_token!(PinAf4, 4);
af_token!(PinAf5, 5);
af_token!(PinAf6, 6);
af_token!(PinAf7, 7);
af_token!(PinAf8, 8);
af_token!(PinAf9, 9);
af_token!(PinAf10, 10);
af_token!(PinAf11, 11);
af_token!(PinAf12, 12);
af_token!(PinAf13, 13);
af_token!(PinAf14, 14);
af_token!(PinAf15, 15);
af_token!(DontCare, 0);
/// Gpio pin speed.
pub enum GpioPinSpeed {
LowSpeed,
MediumSpeed,
HighSpeed,
VeryHighSpeed,
}
impl<Pin: GpioPinMap> GpioPin<Pin, DontCare, DontCare, NoPull> {
/// Set pin into general purpose input mode.
pub fn into_input(self) -> GpioPin<Pin, InputMode, DontCare, NoPull> {
self.pin.gpio_moder_moder.write_bits(0b00);
self.pin.into()
}
/// Set pin into general purpose output mode.
pub fn into_output(self) -> GpioPin<Pin, OutputMode, DontCare, NoPull> {
self.pin.gpio_moder_moder.write_bits(0b01);
self.pin.into()
}
/// Set the pin into alternate function mode.
pub fn into_alternate<Af: PinAf>(self) -> GpioPin<Pin, AlternateMode<Af>, DontCare, NoPull> {
self.pin.gpio_afr_afr.write_bits(Af::NUM);
self.pin.gpio_moder_moder.write_bits(0b10);
self.pin.into()
}
}
pub trait TypeModes: PinModeMap {}
impl TypeModes for InputMode {}
impl TypeModes for OutputMode {}
impl<Af: PinAf> TypeModes for AlternateMode<Af> {}
impl<Pin: GpioPinMap, Mode: TypeModes> GpioPin<Pin, Mode, DontCare, NoPull> {
/// Let pin type be push/pull.
pub fn into_pushpull(self) -> GpioPin<Pin, Mode, PushPullType, NoPull> {
self.pin.gpio_otyper_ot.clear_bit();
self.pin.gpio_pupdr_pupdr.write_bits(0b00); // No pull-up nor pull-down.
self.pin.into()
}
/// Let pin type be open-drain.
pub fn into_opendrain(self) -> GpioPin<Pin, Mode, OpenDrainType, NoPull> {
self.pin.gpio_otyper_ot.set_bit();
self.pin.into()
}
}
pub trait PullModes: PinModeMap {}
impl PullModes for InputMode {}
impl PullModes for OutputMode {}
impl<Af: PinAf> PullModes for AlternateMode<Af> {}
impl<Pin: GpioPinMap, Mode: PullModes>
GpioPin<Pin, Mode, PushPullType, NoPull>
{
/// No pull-up nor pull-down (this is the default).
pub fn into_nopull(self) -> GpioPin<Pin, Mode, PushPullType, NoPull> {
self.pin.gpio_pupdr_pupdr.write_bits(0b00);
self.pin.into()
}
/// Let pin be pulled-up.
pub fn into_pullup(self) -> GpioPin<Pin, Mode, PushPullType, PullUp> {
self.pin.gpio_pupdr_pupdr.write_bits(0b01);
self.pin.into()
}
/// Let pin be pulled-down.
pub fn into_pulldown(self) -> GpioPin<Pin, Mode, PushPullType, PullDown> {
self.pin.gpio_pupdr_pupdr.write_bits(0b10);
self.pin.into()
}
}
pub trait WithSpeedModes: PinModeMap {}
impl WithSpeedModes for OutputMode {}
impl<Af: PinAf> WithSpeedModes for AlternateMode<Af> {}
impl<
Pin: GpioPinMap,
Mode: WithSpeedModes,
Type: PinTypeOrDontCare,
Pull: PinPullMap,
> GpioPin<Pin, Mode, Type, Pull>
{
/// Set pin speed.
pub fn with_speed(self, speed: GpioPinSpeed) -> Self {
self.pin.gpio_ospeedr_ospeedr.write_bits(match speed {
GpioPinSpeed::LowSpeed => 0,
GpioPinSpeed::MediumSpeed => 1,
GpioPinSpeed::HighSpeed => 2,
GpioPinSpeed::VeryHighSpeed => 3,
});
self
}
}
pub trait PinGetMode: PinModeMap {}
impl PinGetMode for InputMode {}
impl PinGetMode for OutputMode {}
impl<Af: PinAf> PinGetMode for AlternateMode<Af> {}
impl<
Pin: GpioPinMap,
Mode: PinGetMode,
Type: PinTypeMap,
Pull: PinPullMap,
>
GpioPin<Pin, Mode, Type, Pull>
{
/// Get the current pin state.
pub fn get(&self) -> bool {
self.pin.gpio_idr_idr.read_bit()
}
}
impl<
Pin: GpioPinMap,
Type: PinTypeMap,
Pull: PinPullMap,
> GpioPin<Pin, OutputMode, Type, Pull> {
/// Set output pin high.
#[inline]
pub fn set(&self) {
// Set output pin to high by writing BS (bit set) to the bit set/reset register.
self.pin.gpio_bsrr_bs.set_bit();
}
/// Set output pin low.
#[inline]
pub fn clear(&self) {
// Clear output pin to low by writing BR (bit reset) to the bit set/reset register.
self.pin.gpio_bsrr_br.set_bit();
}
}
impl<
Pin: GpioPinMap,
Mode: PinModeMap,
Type: PinTypeMap,
Pull: PinPullMap,
>
GpioPin<Pin, Mode, Type, Pull>
{
/// Clone the pin
///
/// # Safety
/// The function is unsafe as there are no guarantees that the two configuration can co-exist.
pub unsafe fn clone(&self) -> Self {
Self {
pin: self.pin.clone(),
mode: self.mode,
type_: self.type_,
pull: self.pull,
}
}
}
pub trait NewPin<Head: GpioHeadMap, Pin: GpioPinMap> {
/// Create a new pin configuration from a pin peripheral.
fn pin(&self, pin: GpioPinPeriph<Pin>) -> GpioPin<Pin, DontCare, DontCare, NoPull>;
}
#[macro_export]
macro_rules! pin_init {
($($head:ident, $pin:ident;)+) => {
$(
impl
crate::pin::NewPin<
$head,
$pin,
> for crate::head::GpioHead<$head>
{
fn pin(
&self,
pin: ::drone_stm32_map::periph::gpio::pin::GpioPinPeriph<
$pin,
>,
) -> crate::pin::GpioPin<
$pin,
crate::pin::DontCare,
crate::pin::DontCare,
crate::NoPull,
> {
crate::pin::GpioPin::from(alloc::sync::Arc::new(pin))
}
}
)+
};
}
| 26.18806 | 98 | 0.631711 |
7a28154ad2fa2af08ceee49408e7dd343e2f5c46
| 2,366 |
use dominator::{clone, html, with_node, Dom};
use futures_signals::signal::SignalExt;
use std::rc::Rc;
use utils::{
math::{bounds, mat4::Matrix4},
prelude::*,
resize::resize_info_signal,
};
use wasm_bindgen::JsCast;
use super::super::helpers::*;
use super::state::*;
impl ImagePlayer {
pub fn render(self: Rc<Self>) -> Dom {
let state = self;
let transform_matrix = Matrix4::new_direct(state.raw.transform_matrix);
let transform_signal = resize_info_signal().map(move |resize_info| {
let mut m = transform_matrix.clone();
m.denormalize(&resize_info);
m.as_matrix_string()
});
log::info!("Loading {}!", state.raw.filename);
html!("img" => web_sys:: HtmlImageElement, {
.attribute("src", &state.base.design_media_url(&state.raw.filename))
.style_signal("opacity", state.controller.hidden.signal().map(|hidden| {
if hidden {
"0"
} else {
"1"
}
}))
.style("cursor", if state.controller.interactive {"pointer"} else {"initial"})
.style("display", "block")
.style("position", "absolute")
.style_signal("width", width_signal(state.size.signal_cloned()))
.style_signal("height", height_signal(state.size.signal_cloned()))
.style_signal("top", bounds::size_height_center_rem_signal(state.size.signal()))
.style_signal("left", bounds::size_width_center_rem_signal(state.size.signal()))
.style_signal("transform", transform_signal)
.with_node!(elem => {
.event(clone!(state => move |_evt:events::Load| {
if state.size.get_cloned().is_none() {
let width = elem.natural_width() as f64;
let height = elem.natural_height() as f64;
state.size.set(Some((width, height)));
}
*state.controller.elem.borrow_mut() = Some(elem.clone().unchecked_into());
state.base.insert_stage_click_listener(clone!(state => move |stage_click| {
state.controller.handle_click(stage_click);
}));
}))
})
})
}
}
| 35.848485 | 95 | 0.545224 |
ccee0e5ad2bc3e52d8493510d79ff54a54f70e82
| 9,880 |
//! Trait solving using Chalk.
use std::env::var;
use base_db::CrateId;
use chalk_ir::cast::Cast;
use chalk_solve::{logging_db::LoggingRustIrDatabase, Solver};
use hir_def::{lang_item::LangItemTarget, TraitId};
use stdx::panic_context;
use crate::{
db::HirDatabase, AliasTy, Canonical, DebruijnIndex, HirDisplay, Substitution, Ty, TyKind,
TypeWalk, WhereClause,
};
use self::chalk::{from_chalk, Interner, ToChalk};
pub(crate) mod chalk;
/// This controls how much 'time' we give the Chalk solver before giving up.
const CHALK_SOLVER_FUEL: i32 = 100;
#[derive(Debug, Copy, Clone)]
struct ChalkContext<'a> {
db: &'a dyn HirDatabase,
krate: CrateId,
}
fn create_chalk_solver() -> chalk_recursive::RecursiveSolver<Interner> {
let overflow_depth =
var("CHALK_OVERFLOW_DEPTH").ok().and_then(|s| s.parse().ok()).unwrap_or(100);
let caching_enabled = true;
let max_size = var("CHALK_SOLVER_MAX_SIZE").ok().and_then(|s| s.parse().ok()).unwrap_or(30);
chalk_recursive::RecursiveSolver::new(overflow_depth, max_size, caching_enabled)
}
/// A set of clauses that we assume to be true. E.g. if we are inside this function:
/// ```rust
/// fn foo<T: Default>(t: T) {}
/// ```
/// we assume that `T: Default`.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct TraitEnvironment {
// When we're using Chalk's Ty we can make this a BTreeMap since it's Ord,
// but for now it's too annoying...
pub(crate) traits_from_clauses: Vec<(Ty, TraitId)>,
pub env: chalk_ir::Environment<Interner>,
}
impl TraitEnvironment {
pub(crate) fn traits_in_scope_from_clauses<'a>(
&'a self,
ty: &'a Ty,
) -> impl Iterator<Item = TraitId> + 'a {
self.traits_from_clauses.iter().filter_map(move |(self_ty, trait_id)| {
if self_ty == ty {
Some(*trait_id)
} else {
None
}
})
}
}
impl Default for TraitEnvironment {
fn default() -> Self {
TraitEnvironment {
traits_from_clauses: Vec::new(),
env: chalk_ir::Environment::new(&Interner),
}
}
}
/// Something (usually a goal), along with an environment.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct InEnvironment<T> {
pub environment: chalk_ir::Environment<Interner>,
pub goal: T,
}
impl<T> InEnvironment<T> {
pub fn new(environment: chalk_ir::Environment<Interner>, value: T) -> InEnvironment<T> {
InEnvironment { environment, goal: value }
}
}
/// Something that needs to be proven (by Chalk) during type checking, e.g. that
/// a certain type implements a certain trait. Proving the Obligation might
/// result in additional information about inference variables.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum DomainGoal {
Holds(WhereClause),
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct AliasEq {
pub alias: AliasTy,
pub ty: Ty,
}
impl TypeWalk for AliasEq {
fn walk(&self, f: &mut impl FnMut(&Ty)) {
self.ty.walk(f);
match &self.alias {
AliasTy::Projection(projection_ty) => projection_ty.walk(f),
AliasTy::Opaque(opaque) => opaque.walk(f),
}
}
fn walk_mut_binders(
&mut self,
f: &mut impl FnMut(&mut Ty, DebruijnIndex),
binders: DebruijnIndex,
) {
self.ty.walk_mut_binders(f, binders);
match &mut self.alias {
AliasTy::Projection(projection_ty) => projection_ty.walk_mut_binders(f, binders),
AliasTy::Opaque(opaque) => opaque.walk_mut_binders(f, binders),
}
}
}
/// Solve a trait goal using Chalk.
pub(crate) fn trait_solve_query(
db: &dyn HirDatabase,
krate: CrateId,
goal: Canonical<InEnvironment<DomainGoal>>,
) -> Option<Solution> {
let _p = profile::span("trait_solve_query").detail(|| match &goal.value.goal {
DomainGoal::Holds(WhereClause::Implemented(it)) => {
db.trait_data(it.hir_trait_id()).name.to_string()
}
DomainGoal::Holds(WhereClause::AliasEq(_)) => "alias_eq".to_string(),
});
log::info!("trait_solve_query({})", goal.value.goal.display(db));
if let DomainGoal::Holds(WhereClause::AliasEq(AliasEq {
alias: AliasTy::Projection(projection_ty),
..
})) = &goal.value.goal
{
if let TyKind::BoundVar(_) = &projection_ty.substitution[0].interned(&Interner) {
// Hack: don't ask Chalk to normalize with an unknown self type, it'll say that's impossible
return Some(Solution::Ambig(Guidance::Unknown));
}
}
let canonical = goal.to_chalk(db).cast(&Interner);
// We currently don't deal with universes (I think / hope they're not yet
// relevant for our use cases?)
let u_canonical = chalk_ir::UCanonical { canonical, universes: 1 };
let solution = solve(db, krate, &u_canonical);
solution.map(|solution| solution_from_chalk(db, solution))
}
fn solve(
db: &dyn HirDatabase,
krate: CrateId,
goal: &chalk_ir::UCanonical<chalk_ir::InEnvironment<chalk_ir::Goal<Interner>>>,
) -> Option<chalk_solve::Solution<Interner>> {
let context = ChalkContext { db, krate };
log::debug!("solve goal: {:?}", goal);
let mut solver = create_chalk_solver();
let fuel = std::cell::Cell::new(CHALK_SOLVER_FUEL);
let should_continue = || {
context.db.check_canceled();
let remaining = fuel.get();
fuel.set(remaining - 1);
if remaining == 0 {
log::debug!("fuel exhausted");
}
remaining > 0
};
let mut solve = || {
let _ctx = if is_chalk_debug() || is_chalk_print() {
Some(panic_context::enter(format!("solving {:?}", goal)))
} else {
None
};
let solution = if is_chalk_print() {
let logging_db =
LoggingRustIrDatabaseLoggingOnDrop(LoggingRustIrDatabase::new(context));
let solution = solver.solve_limited(&logging_db.0, goal, &should_continue);
solution
} else {
solver.solve_limited(&context, goal, &should_continue)
};
log::debug!("solve({:?}) => {:?}", goal, solution);
solution
};
// don't set the TLS for Chalk unless Chalk debugging is active, to make
// extra sure we only use it for debugging
let solution =
if is_chalk_debug() { chalk::tls::set_current_program(db, solve) } else { solve() };
solution
}
struct LoggingRustIrDatabaseLoggingOnDrop<'a>(LoggingRustIrDatabase<Interner, ChalkContext<'a>>);
impl<'a> Drop for LoggingRustIrDatabaseLoggingOnDrop<'a> {
fn drop(&mut self) {
eprintln!("chalk program:\n{}", self.0);
}
}
fn is_chalk_debug() -> bool {
std::env::var("CHALK_DEBUG").is_ok()
}
fn is_chalk_print() -> bool {
std::env::var("CHALK_PRINT").is_ok()
}
fn solution_from_chalk(
db: &dyn HirDatabase,
solution: chalk_solve::Solution<Interner>,
) -> Solution {
let convert_subst = |subst: chalk_ir::Canonical<chalk_ir::Substitution<Interner>>| {
let result = from_chalk(db, subst);
SolutionVariables(result)
};
match solution {
chalk_solve::Solution::Unique(constr_subst) => {
let subst = chalk_ir::Canonical {
value: constr_subst.value.subst,
binders: constr_subst.binders,
};
Solution::Unique(convert_subst(subst))
}
chalk_solve::Solution::Ambig(chalk_solve::Guidance::Definite(subst)) => {
Solution::Ambig(Guidance::Definite(convert_subst(subst)))
}
chalk_solve::Solution::Ambig(chalk_solve::Guidance::Suggested(subst)) => {
Solution::Ambig(Guidance::Suggested(convert_subst(subst)))
}
chalk_solve::Solution::Ambig(chalk_solve::Guidance::Unknown) => {
Solution::Ambig(Guidance::Unknown)
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct SolutionVariables(pub Canonical<Substitution>);
#[derive(Clone, Debug, PartialEq, Eq)]
/// A (possible) solution for a proposed goal.
pub enum Solution {
/// The goal indeed holds, and there is a unique value for all existential
/// variables.
Unique(SolutionVariables),
/// The goal may be provable in multiple ways, but regardless we may have some guidance
/// for type inference. In this case, we don't return any lifetime
/// constraints, since we have not "committed" to any particular solution
/// yet.
Ambig(Guidance),
}
#[derive(Clone, Debug, PartialEq, Eq)]
/// When a goal holds ambiguously (e.g., because there are multiple possible
/// solutions), we issue a set of *guidance* back to type inference.
pub enum Guidance {
/// The existential variables *must* have the given values if the goal is
/// ever to hold, but that alone isn't enough to guarantee the goal will
/// actually hold.
Definite(SolutionVariables),
/// There are multiple plausible values for the existentials, but the ones
/// here are suggested as the preferred choice heuristically. These should
/// be used for inference fallback only.
Suggested(SolutionVariables),
/// There's no useful information to feed back to type inference
Unknown,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum FnTrait {
FnOnce,
FnMut,
Fn,
}
impl FnTrait {
fn lang_item_name(self) -> &'static str {
match self {
FnTrait::FnOnce => "fn_once",
FnTrait::FnMut => "fn_mut",
FnTrait::Fn => "fn",
}
}
pub fn get_id(&self, db: &dyn HirDatabase, krate: CrateId) -> Option<TraitId> {
let target = db.lang_item(krate, self.lang_item_name().into())?;
match target {
LangItemTarget::TraitId(t) => Some(t),
_ => None,
}
}
}
| 32.077922 | 104 | 0.632895 |
118e75e1ee70469bb275185650ab904a0effc251
| 933 |
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Exposes the NonZero lang item which provides optimization hints.
use ops::CoerceUnsized;
/// A wrapper type for raw pointers and integers that will never be
/// NULL or 0 that might allow certain optimizations.
#[rustc_layout_scalar_valid_range_start(1)]
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[repr(transparent)]
pub(crate) struct NonZero<T>(pub(crate) T);
impl<T: CoerceUnsized<U>, U> CoerceUnsized<NonZero<U>> for NonZero<T> {}
| 40.565217 | 72 | 0.748124 |
08da57bfa97e732db32760320a89bd6d065296d3
| 9,487 |
/*
* Copyright (C) 2020 Red Hat, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
use std::collections::{BTreeMap, BTreeSet};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::Command;
use anyhow::{bail, Context, Result};
use openat_ext::OpenatDirExt;
use chrono::prelude::*;
use crate::component::*;
use crate::filetree;
use crate::model::*;
use crate::ostreeutil;
use crate::util;
use crate::util::CommandRunExt;
/// The path to the ESP mount
pub(crate) const MOUNT_PATH: &str = "boot/efi";
#[derive(Default)]
pub(crate) struct EFI {}
impl EFI {
fn esp_path(&self) -> PathBuf {
Path::new(MOUNT_PATH).join("EFI")
}
fn open_esp_optional(&self) -> Result<Option<openat::Dir>> {
let sysroot = openat::Dir::open("/")?;
let esp = sysroot.sub_dir_optional(&self.esp_path())?;
Ok(esp)
}
fn open_esp(&self) -> Result<openat::Dir> {
let sysroot = openat::Dir::open("/")?;
let esp = sysroot.sub_dir(&self.esp_path())?;
Ok(esp)
}
}
impl Component for EFI {
fn name(&self) -> &'static str {
"EFI"
}
fn query_adopt(&self) -> Result<Option<ContentMetadata>> {
let esp = self.open_esp_optional()?;
if esp.is_none() {
log::trace!("No ESP detected");
return Ok(None);
};
// This would be extended with support for other operating systems later
let coreos_aleph = if let Some(a) = crate::coreos::get_aleph_version()? {
a
} else {
log::trace!("No CoreOS aleph detected");
return Ok(None);
};
let meta = ContentMetadata {
timestamp: coreos_aleph.ts,
version: coreos_aleph.aleph.imgid,
};
log::trace!("EFI adoptable: {:?}", &meta);
Ok(Some(meta))
}
/// Given an adoptable system and an update, perform the update.
fn adopt_update(&self, updatemeta: &ContentMetadata) -> Result<InstalledContent> {
let meta = if let Some(meta) = self.query_adopt()? {
meta
} else {
anyhow::bail!("Failed to find adoptable system");
};
let esp = self.open_esp()?;
validate_esp(&esp)?;
let updated =
openat::Dir::open(&component_updatedir("/", self)).context("opening update dir")?;
let updatef = filetree::FileTree::new_from_dir(&updated).context("reading update dir")?;
// For adoption, we should only touch files that we know about.
let diff = updatef.relative_diff_to(&esp)?;
log::trace!("applying adoption diff: {}", &diff);
filetree::apply_diff(&updated, &esp, &diff, None).context("applying filesystem changes")?;
Ok(InstalledContent {
meta: updatemeta.clone(),
filetree: Some(updatef),
adopted_from: Some(meta),
})
}
fn install(&self, src_root: &str, dest_root: &str) -> Result<InstalledContent> {
let meta = if let Some(meta) = get_component_update(src_root, self)? {
meta
} else {
anyhow::bail!("No update metadata for component {} found", self.name());
};
let srcdir = component_updatedir(src_root, self);
let srcd = openat::Dir::open(&srcdir)?;
let ft = crate::filetree::FileTree::new_from_dir(&srcd)?;
let destdir = Path::new(dest_root).join(MOUNT_PATH);
{
let destd = openat::Dir::open(&destdir)?;
validate_esp(&destd)?;
}
let r = std::process::Command::new("cp")
.args(&["-rp", "--reflink=auto"])
.arg(&srcdir)
.arg(&destdir)
.status()?;
if !r.success() {
anyhow::bail!("Failed to copy");
}
Ok(InstalledContent {
meta,
filetree: Some(ft),
adopted_from: None,
})
}
fn run_update(&self, current: &InstalledContent) -> Result<InstalledContent> {
let currentf = current
.filetree
.as_ref()
.ok_or_else(|| anyhow::anyhow!("No filetree for installed EFI found!"))?;
let updatemeta = self.query_update()?.expect("update available");
let updated =
openat::Dir::open(&component_updatedir("/", self)).context("opening update dir")?;
let updatef = filetree::FileTree::new_from_dir(&updated).context("reading update dir")?;
let diff = currentf.diff(&updatef)?;
let destdir = openat::Dir::open(&Path::new("/").join(MOUNT_PATH).join("EFI"))
.context("opening EFI dir")?;
validate_esp(&destdir)?;
log::trace!("applying diff: {}", &diff);
filetree::apply_diff(&updated, &destdir, &diff, None)
.context("applying filesystem changes")?;
let adopted_from = None;
Ok(InstalledContent {
meta: updatemeta,
filetree: Some(updatef),
adopted_from: adopted_from,
})
}
fn generate_update_metadata(&self, sysroot_path: &str) -> Result<ContentMetadata> {
let ostreebootdir = Path::new(sysroot_path).join(ostreeutil::BOOT_PREFIX);
let dest_efidir = component_updatedir(sysroot_path, self);
if ostreebootdir.exists() {
let cruft = ["loader", "grub2"];
for p in cruft.iter() {
let p = ostreebootdir.join(p);
if p.exists() {
std::fs::remove_dir_all(&p)?;
}
}
let efisrc = ostreebootdir.join("efi/EFI");
if !efisrc.exists() {
bail!("Failed to find {:?}", &efisrc);
}
// Fork off mv() because on overlayfs one can't rename() a lower level
// directory today, and this will handle the copy fallback.
let parent = dest_efidir
.parent()
.ok_or_else(|| anyhow::anyhow!("Expected parent directory"))?;
std::fs::create_dir_all(&parent)?;
Command::new("mv").args(&[&efisrc, &dest_efidir]).run()?;
}
let src_efidir = openat::Dir::open(&dest_efidir)?;
// Query the rpm database and list the package and build times for all the
// files in the EFI system partition. If any files are not owned it is considered
// and error condition.
let rpmout = {
let mut c = ostreeutil::rpm_cmd(sysroot_path);
c.args(&["-q", "--queryformat", "%{nevra},%{buildtime} ", "-f"]);
c.args(util::filenames(&src_efidir)?.drain().map(|mut f| {
f.insert_str(0, "/boot/efi/EFI/");
f
}));
c
}
.output()?;
if !rpmout.status.success() {
std::io::stderr().write_all(&rpmout.stderr)?;
bail!("Failed to invoke rpm -qf");
}
let pkgs = std::str::from_utf8(&rpmout.stdout)?
.split_whitespace()
.map(|s| -> Result<_> {
let parts: Vec<_> = s.splitn(2, ',').collect();
let name = parts[0];
if let Some(ts) = parts.get(1) {
let nt = NaiveDateTime::parse_from_str(ts, "%s")
.context("Failed to parse rpm buildtime")?;
Ok((name, DateTime::<Utc>::from_utc(nt, Utc)))
} else {
bail!("Failed to parse: {}", s);
}
})
.collect::<Result<BTreeMap<&str, DateTime<Utc>>>>()?;
if pkgs.is_empty() {
bail!("Failed to find any RPM packages matching files in source efidir");
}
let timestamps: BTreeSet<&DateTime<Utc>> = pkgs.values().collect();
// Unwrap safety: We validated pkgs has at least one value above
let largest_timestamp = timestamps.iter().last().unwrap();
let version = pkgs.keys().fold("".to_string(), |mut s, n| {
if !s.is_empty() {
s.push(',');
}
s.push_str(n);
s
});
let meta = ContentMetadata {
timestamp: **largest_timestamp,
version,
};
write_update_metadata(sysroot_path, self, &meta)?;
Ok(meta)
}
fn query_update(&self) -> Result<Option<ContentMetadata>> {
get_component_update("/", self)
}
fn validate(&self, current: &InstalledContent) -> Result<ValidationResult> {
let currentf = current
.filetree
.as_ref()
.ok_or_else(|| anyhow::anyhow!("No filetree for installed EFI found!"))?;
let efidir = openat::Dir::open(&Path::new("/").join(MOUNT_PATH).join("EFI"))?;
let diff = currentf.relative_diff_to(&efidir)?;
let mut errs = Vec::new();
for f in diff.changes.iter() {
errs.push(format!("Changed: {}", f));
}
for f in diff.removals.iter() {
errs.push(format!("Removed: {}", f));
}
assert_eq!(diff.additions.len(), 0);
if !errs.is_empty() {
Ok(ValidationResult::Errors(errs))
} else {
Ok(ValidationResult::Valid)
}
}
}
fn validate_esp(dir: &openat::Dir) -> Result<()> {
let stat = nix::sys::statfs::fstatfs(dir)?;
let fstype = stat.filesystem_type();
if fstype != nix::sys::statfs::MSDOS_SUPER_MAGIC {
bail!("EFI mount is not a msdos filesystem, but is {:?}", fstype);
};
Ok(())
}
| 35.665414 | 98 | 0.541899 |
9bcf78d2b10b61319bfd14511819825db3e7bf0a
| 84,384 |
#![doc = "generated by AutoRust"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen1FileDataSet {
#[serde(flatten)]
pub data_set: DataSet,
pub properties: AdlsGen1FileProperties,
}
impl AdlsGen1FileDataSet {
pub fn new(data_set: DataSet, properties: AdlsGen1FileProperties) -> Self {
Self { data_set, properties }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen1FileProperties {
#[serde(rename = "accountName")]
pub account_name: String,
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
#[serde(rename = "fileName")]
pub file_name: String,
#[serde(rename = "folderPath")]
pub folder_path: String,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl AdlsGen1FileProperties {
pub fn new(account_name: String, file_name: String, folder_path: String, resource_group: String, subscription_id: String) -> Self {
Self {
account_name,
data_set_id: None,
file_name,
folder_path,
resource_group,
subscription_id,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen1FolderDataSet {
#[serde(flatten)]
pub data_set: DataSet,
pub properties: AdlsGen1FolderProperties,
}
impl AdlsGen1FolderDataSet {
pub fn new(data_set: DataSet, properties: AdlsGen1FolderProperties) -> Self {
Self { data_set, properties }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen1FolderProperties {
#[serde(rename = "accountName")]
pub account_name: String,
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
#[serde(rename = "folderPath")]
pub folder_path: String,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl AdlsGen1FolderProperties {
pub fn new(account_name: String, folder_path: String, resource_group: String, subscription_id: String) -> Self {
Self {
account_name,
data_set_id: None,
folder_path,
resource_group,
subscription_id,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen2FileDataSet {
#[serde(flatten)]
pub data_set: DataSet,
pub properties: AdlsGen2FileProperties,
}
impl AdlsGen2FileDataSet {
pub fn new(data_set: DataSet, properties: AdlsGen2FileProperties) -> Self {
Self { data_set, properties }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen2FileDataSetMapping {
#[serde(flatten)]
pub data_set_mapping: DataSetMapping,
pub properties: AdlsGen2FileDataSetMappingProperties,
}
impl AdlsGen2FileDataSetMapping {
pub fn new(data_set_mapping: DataSetMapping, properties: AdlsGen2FileDataSetMappingProperties) -> Self {
Self {
data_set_mapping,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen2FileDataSetMappingProperties {
#[serde(rename = "dataSetId")]
pub data_set_id: String,
#[serde(rename = "dataSetMappingStatus", default, skip_serializing_if = "Option::is_none")]
pub data_set_mapping_status: Option<adls_gen2_file_data_set_mapping_properties::DataSetMappingStatus>,
#[serde(rename = "filePath")]
pub file_path: String,
#[serde(rename = "fileSystem")]
pub file_system: String,
#[serde(rename = "outputType", default, skip_serializing_if = "Option::is_none")]
pub output_type: Option<adls_gen2_file_data_set_mapping_properties::OutputType>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<adls_gen2_file_data_set_mapping_properties::ProvisioningState>,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "storageAccountName")]
pub storage_account_name: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl AdlsGen2FileDataSetMappingProperties {
pub fn new(
data_set_id: String,
file_path: String,
file_system: String,
resource_group: String,
storage_account_name: String,
subscription_id: String,
) -> Self {
Self {
data_set_id,
data_set_mapping_status: None,
file_path,
file_system,
output_type: None,
provisioning_state: None,
resource_group,
storage_account_name,
subscription_id,
}
}
}
pub mod adls_gen2_file_data_set_mapping_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataSetMappingStatus {
Ok,
Broken,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OutputType {
Csv,
Parquet,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen2FileProperties {
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
#[serde(rename = "filePath")]
pub file_path: String,
#[serde(rename = "fileSystem")]
pub file_system: String,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "storageAccountName")]
pub storage_account_name: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl AdlsGen2FileProperties {
pub fn new(
file_path: String,
file_system: String,
resource_group: String,
storage_account_name: String,
subscription_id: String,
) -> Self {
Self {
data_set_id: None,
file_path,
file_system,
resource_group,
storage_account_name,
subscription_id,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen2FileSystemDataSet {
#[serde(flatten)]
pub data_set: DataSet,
pub properties: AdlsGen2FileSystemProperties,
}
impl AdlsGen2FileSystemDataSet {
pub fn new(data_set: DataSet, properties: AdlsGen2FileSystemProperties) -> Self {
Self { data_set, properties }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen2FileSystemDataSetMapping {
#[serde(flatten)]
pub data_set_mapping: DataSetMapping,
pub properties: AdlsGen2FileSystemDataSetMappingProperties,
}
impl AdlsGen2FileSystemDataSetMapping {
pub fn new(data_set_mapping: DataSetMapping, properties: AdlsGen2FileSystemDataSetMappingProperties) -> Self {
Self {
data_set_mapping,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen2FileSystemDataSetMappingProperties {
#[serde(rename = "dataSetId")]
pub data_set_id: String,
#[serde(rename = "dataSetMappingStatus", default, skip_serializing_if = "Option::is_none")]
pub data_set_mapping_status: Option<adls_gen2_file_system_data_set_mapping_properties::DataSetMappingStatus>,
#[serde(rename = "fileSystem")]
pub file_system: String,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<adls_gen2_file_system_data_set_mapping_properties::ProvisioningState>,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "storageAccountName")]
pub storage_account_name: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl AdlsGen2FileSystemDataSetMappingProperties {
pub fn new(
data_set_id: String,
file_system: String,
resource_group: String,
storage_account_name: String,
subscription_id: String,
) -> Self {
Self {
data_set_id,
data_set_mapping_status: None,
file_system,
provisioning_state: None,
resource_group,
storage_account_name,
subscription_id,
}
}
}
pub mod adls_gen2_file_system_data_set_mapping_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataSetMappingStatus {
Ok,
Broken,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen2FileSystemProperties {
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
#[serde(rename = "fileSystem")]
pub file_system: String,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "storageAccountName")]
pub storage_account_name: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl AdlsGen2FileSystemProperties {
pub fn new(file_system: String, resource_group: String, storage_account_name: String, subscription_id: String) -> Self {
Self {
data_set_id: None,
file_system,
resource_group,
storage_account_name,
subscription_id,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen2FolderDataSet {
#[serde(flatten)]
pub data_set: DataSet,
pub properties: AdlsGen2FolderProperties,
}
impl AdlsGen2FolderDataSet {
pub fn new(data_set: DataSet, properties: AdlsGen2FolderProperties) -> Self {
Self { data_set, properties }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen2FolderDataSetMapping {
#[serde(flatten)]
pub data_set_mapping: DataSetMapping,
pub properties: AdlsGen2FolderDataSetMappingProperties,
}
impl AdlsGen2FolderDataSetMapping {
pub fn new(data_set_mapping: DataSetMapping, properties: AdlsGen2FolderDataSetMappingProperties) -> Self {
Self {
data_set_mapping,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen2FolderDataSetMappingProperties {
#[serde(rename = "dataSetId")]
pub data_set_id: String,
#[serde(rename = "dataSetMappingStatus", default, skip_serializing_if = "Option::is_none")]
pub data_set_mapping_status: Option<adls_gen2_folder_data_set_mapping_properties::DataSetMappingStatus>,
#[serde(rename = "fileSystem")]
pub file_system: String,
#[serde(rename = "folderPath")]
pub folder_path: String,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<adls_gen2_folder_data_set_mapping_properties::ProvisioningState>,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "storageAccountName")]
pub storage_account_name: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl AdlsGen2FolderDataSetMappingProperties {
pub fn new(
data_set_id: String,
file_system: String,
folder_path: String,
resource_group: String,
storage_account_name: String,
subscription_id: String,
) -> Self {
Self {
data_set_id,
data_set_mapping_status: None,
file_system,
folder_path,
provisioning_state: None,
resource_group,
storage_account_name,
subscription_id,
}
}
}
pub mod adls_gen2_folder_data_set_mapping_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataSetMappingStatus {
Ok,
Broken,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdlsGen2FolderProperties {
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
#[serde(rename = "fileSystem")]
pub file_system: String,
#[serde(rename = "folderPath")]
pub folder_path: String,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "storageAccountName")]
pub storage_account_name: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl AdlsGen2FolderProperties {
pub fn new(
file_system: String,
folder_path: String,
resource_group: String,
storage_account_name: String,
subscription_id: String,
) -> Self {
Self {
data_set_id: None,
file_system,
folder_path,
resource_group,
storage_account_name,
subscription_id,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Account {
#[serde(flatten)]
pub default_dto: DefaultDto,
pub identity: Identity,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AccountProperties>,
}
impl Account {
pub fn new(identity: Identity) -> Self {
Self {
default_dto: DefaultDto::default(),
identity,
properties: None,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccountList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<Account>,
}
impl AccountList {
pub fn new(value: Vec<Account>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AccountProperties {
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<account_properties::ProvisioningState>,
#[serde(rename = "userEmail", default, skip_serializing_if = "Option::is_none")]
pub user_email: Option<String>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<String>,
}
impl AccountProperties {
pub fn new() -> Self {
Self::default()
}
}
pub mod account_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AccountUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
impl AccountUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobContainerDataSet {
#[serde(flatten)]
pub data_set: DataSet,
pub properties: BlobContainerProperties,
}
impl BlobContainerDataSet {
pub fn new(data_set: DataSet, properties: BlobContainerProperties) -> Self {
Self { data_set, properties }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobContainerDataSetMapping {
#[serde(flatten)]
pub data_set_mapping: DataSetMapping,
pub properties: BlobContainerMappingProperties,
}
impl BlobContainerDataSetMapping {
pub fn new(data_set_mapping: DataSetMapping, properties: BlobContainerMappingProperties) -> Self {
Self {
data_set_mapping,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobContainerMappingProperties {
#[serde(rename = "containerName")]
pub container_name: String,
#[serde(rename = "dataSetId")]
pub data_set_id: String,
#[serde(rename = "dataSetMappingStatus", default, skip_serializing_if = "Option::is_none")]
pub data_set_mapping_status: Option<blob_container_mapping_properties::DataSetMappingStatus>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<blob_container_mapping_properties::ProvisioningState>,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "storageAccountName")]
pub storage_account_name: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl BlobContainerMappingProperties {
pub fn new(
container_name: String,
data_set_id: String,
resource_group: String,
storage_account_name: String,
subscription_id: String,
) -> Self {
Self {
container_name,
data_set_id,
data_set_mapping_status: None,
provisioning_state: None,
resource_group,
storage_account_name,
subscription_id,
}
}
}
pub mod blob_container_mapping_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataSetMappingStatus {
Ok,
Broken,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobContainerProperties {
#[serde(rename = "containerName")]
pub container_name: String,
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "storageAccountName")]
pub storage_account_name: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl BlobContainerProperties {
pub fn new(container_name: String, resource_group: String, storage_account_name: String, subscription_id: String) -> Self {
Self {
container_name,
data_set_id: None,
resource_group,
storage_account_name,
subscription_id,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobDataSet {
#[serde(flatten)]
pub data_set: DataSet,
pub properties: BlobProperties,
}
impl BlobDataSet {
pub fn new(data_set: DataSet, properties: BlobProperties) -> Self {
Self { data_set, properties }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobDataSetMapping {
#[serde(flatten)]
pub data_set_mapping: DataSetMapping,
pub properties: BlobMappingProperties,
}
impl BlobDataSetMapping {
pub fn new(data_set_mapping: DataSetMapping, properties: BlobMappingProperties) -> Self {
Self {
data_set_mapping,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobFolderDataSet {
#[serde(flatten)]
pub data_set: DataSet,
pub properties: BlobFolderProperties,
}
impl BlobFolderDataSet {
pub fn new(data_set: DataSet, properties: BlobFolderProperties) -> Self {
Self { data_set, properties }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobFolderDataSetMapping {
#[serde(flatten)]
pub data_set_mapping: DataSetMapping,
pub properties: BlobFolderMappingProperties,
}
impl BlobFolderDataSetMapping {
pub fn new(data_set_mapping: DataSetMapping, properties: BlobFolderMappingProperties) -> Self {
Self {
data_set_mapping,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobFolderMappingProperties {
#[serde(rename = "containerName")]
pub container_name: String,
#[serde(rename = "dataSetId")]
pub data_set_id: String,
#[serde(rename = "dataSetMappingStatus", default, skip_serializing_if = "Option::is_none")]
pub data_set_mapping_status: Option<blob_folder_mapping_properties::DataSetMappingStatus>,
pub prefix: String,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<blob_folder_mapping_properties::ProvisioningState>,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "storageAccountName")]
pub storage_account_name: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl BlobFolderMappingProperties {
pub fn new(
container_name: String,
data_set_id: String,
prefix: String,
resource_group: String,
storage_account_name: String,
subscription_id: String,
) -> Self {
Self {
container_name,
data_set_id,
data_set_mapping_status: None,
prefix,
provisioning_state: None,
resource_group,
storage_account_name,
subscription_id,
}
}
}
pub mod blob_folder_mapping_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataSetMappingStatus {
Ok,
Broken,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobFolderProperties {
#[serde(rename = "containerName")]
pub container_name: String,
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
pub prefix: String,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "storageAccountName")]
pub storage_account_name: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl BlobFolderProperties {
pub fn new(
container_name: String,
prefix: String,
resource_group: String,
storage_account_name: String,
subscription_id: String,
) -> Self {
Self {
container_name,
data_set_id: None,
prefix,
resource_group,
storage_account_name,
subscription_id,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobMappingProperties {
#[serde(rename = "containerName")]
pub container_name: String,
#[serde(rename = "dataSetId")]
pub data_set_id: String,
#[serde(rename = "dataSetMappingStatus", default, skip_serializing_if = "Option::is_none")]
pub data_set_mapping_status: Option<blob_mapping_properties::DataSetMappingStatus>,
#[serde(rename = "filePath")]
pub file_path: String,
#[serde(rename = "outputType", default, skip_serializing_if = "Option::is_none")]
pub output_type: Option<blob_mapping_properties::OutputType>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<blob_mapping_properties::ProvisioningState>,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "storageAccountName")]
pub storage_account_name: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl BlobMappingProperties {
pub fn new(
container_name: String,
data_set_id: String,
file_path: String,
resource_group: String,
storage_account_name: String,
subscription_id: String,
) -> Self {
Self {
container_name,
data_set_id,
data_set_mapping_status: None,
file_path,
output_type: None,
provisioning_state: None,
resource_group,
storage_account_name,
subscription_id,
}
}
}
pub mod blob_mapping_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataSetMappingStatus {
Ok,
Broken,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OutputType {
Csv,
Parquet,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobProperties {
#[serde(rename = "containerName")]
pub container_name: String,
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
#[serde(rename = "filePath")]
pub file_path: String,
#[serde(rename = "resourceGroup")]
pub resource_group: String,
#[serde(rename = "storageAccountName")]
pub storage_account_name: String,
#[serde(rename = "subscriptionId")]
pub subscription_id: String,
}
impl BlobProperties {
pub fn new(
container_name: String,
file_path: String,
resource_group: String,
storage_account_name: String,
subscription_id: String,
) -> Self {
Self {
container_name,
data_set_id: None,
file_path,
resource_group,
storage_account_name,
subscription_id,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConsumerInvitation {
#[serde(flatten)]
pub proxy_dto: ProxyDto,
pub properties: ConsumerInvitationProperties,
}
impl ConsumerInvitation {
pub fn new(properties: ConsumerInvitationProperties) -> Self {
Self {
proxy_dto: ProxyDto::default(),
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConsumerInvitationList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<ConsumerInvitation>,
}
impl ConsumerInvitationList {
pub fn new(value: Vec<ConsumerInvitation>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConsumerInvitationProperties {
#[serde(rename = "dataSetCount", default, skip_serializing_if = "Option::is_none")]
pub data_set_count: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "invitationId")]
pub invitation_id: String,
#[serde(rename = "invitationStatus", default, skip_serializing_if = "Option::is_none")]
pub invitation_status: Option<consumer_invitation_properties::InvitationStatus>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(rename = "providerEmail", default, skip_serializing_if = "Option::is_none")]
pub provider_email: Option<String>,
#[serde(rename = "providerName", default, skip_serializing_if = "Option::is_none")]
pub provider_name: Option<String>,
#[serde(rename = "providerTenantName", default, skip_serializing_if = "Option::is_none")]
pub provider_tenant_name: Option<String>,
#[serde(rename = "respondedAt", default, skip_serializing_if = "Option::is_none")]
pub responded_at: Option<String>,
#[serde(rename = "sentAt", default, skip_serializing_if = "Option::is_none")]
pub sent_at: Option<String>,
#[serde(rename = "shareName", default, skip_serializing_if = "Option::is_none")]
pub share_name: Option<String>,
#[serde(rename = "termsOfUse", default, skip_serializing_if = "Option::is_none")]
pub terms_of_use: Option<String>,
#[serde(rename = "userEmail", default, skip_serializing_if = "Option::is_none")]
pub user_email: Option<String>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<String>,
}
impl ConsumerInvitationProperties {
pub fn new(invitation_id: String) -> Self {
Self {
data_set_count: None,
description: None,
invitation_id,
invitation_status: None,
location: None,
provider_email: None,
provider_name: None,
provider_tenant_name: None,
responded_at: None,
sent_at: None,
share_name: None,
terms_of_use: None,
user_email: None,
user_name: None,
}
}
}
pub mod consumer_invitation_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum InvitationStatus {
Pending,
Accepted,
Rejected,
Withdrawn,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ConsumerSourceDataSet {
#[serde(flatten)]
pub proxy_dto: ProxyDto,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ConsumerSourceDataSetProperties>,
}
impl ConsumerSourceDataSet {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConsumerSourceDataSetList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<ConsumerSourceDataSet>,
}
impl ConsumerSourceDataSetList {
pub fn new(value: Vec<ConsumerSourceDataSet>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ConsumerSourceDataSetProperties {
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
#[serde(rename = "dataSetLocation", default, skip_serializing_if = "Option::is_none")]
pub data_set_location: Option<String>,
#[serde(rename = "dataSetName", default, skip_serializing_if = "Option::is_none")]
pub data_set_name: Option<String>,
#[serde(rename = "dataSetPath", default, skip_serializing_if = "Option::is_none")]
pub data_set_path: Option<String>,
#[serde(rename = "dataSetType", default, skip_serializing_if = "Option::is_none")]
pub data_set_type: Option<consumer_source_data_set_properties::DataSetType>,
}
impl ConsumerSourceDataSetProperties {
pub fn new() -> Self {
Self::default()
}
}
pub mod consumer_source_data_set_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataSetType {
Blob,
Container,
BlobFolder,
AdlsGen2FileSystem,
AdlsGen2Folder,
AdlsGen2File,
AdlsGen1Folder,
AdlsGen1File,
KustoCluster,
KustoDatabase,
#[serde(rename = "SqlDBTable")]
SqlDbTable,
#[serde(rename = "SqlDWTable")]
SqlDwTable,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataSet {
#[serde(flatten)]
pub proxy_dto: ProxyDto,
pub kind: data_set::Kind,
}
impl DataSet {
pub fn new(kind: data_set::Kind) -> Self {
Self {
proxy_dto: ProxyDto::default(),
kind,
}
}
}
pub mod data_set {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Kind {
Blob,
Container,
BlobFolder,
AdlsGen2FileSystem,
AdlsGen2Folder,
AdlsGen2File,
AdlsGen1Folder,
AdlsGen1File,
KustoCluster,
KustoDatabase,
#[serde(rename = "SqlDBTable")]
SqlDbTable,
#[serde(rename = "SqlDWTable")]
SqlDwTable,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataSetList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<DataSet>,
}
impl DataSetList {
pub fn new(value: Vec<DataSet>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataSetMapping {
#[serde(flatten)]
pub proxy_dto: ProxyDto,
pub kind: data_set_mapping::Kind,
}
impl DataSetMapping {
pub fn new(kind: data_set_mapping::Kind) -> Self {
Self {
proxy_dto: ProxyDto::default(),
kind,
}
}
}
pub mod data_set_mapping {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Kind {
Blob,
Container,
BlobFolder,
AdlsGen2FileSystem,
AdlsGen2Folder,
AdlsGen2File,
KustoCluster,
KustoDatabase,
#[serde(rename = "SqlDBTable")]
SqlDbTable,
#[serde(rename = "SqlDWTable")]
SqlDwTable,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataSetMappingList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<DataSetMapping>,
}
impl DataSetMappingList {
pub fn new(value: Vec<DataSetMapping>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataShareError {
pub error: DataShareErrorInfo,
}
impl DataShareError {
pub fn new(error: DataShareErrorInfo) -> Self {
Self { error }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataShareErrorInfo {
pub code: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<DataShareErrorInfo>,
pub message: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
}
impl DataShareErrorInfo {
pub fn new(code: String, message: String) -> Self {
Self {
code,
details: Vec::new(),
message,
target: None,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct DefaultDto {
#[serde(flatten)]
pub proxy_dto: ProxyDto,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
impl DefaultDto {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct DimensionProperties {
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
impl DimensionProperties {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EmailRegistration {
#[serde(rename = "activationCode", default, skip_serializing_if = "Option::is_none")]
pub activation_code: Option<String>,
#[serde(rename = "activationExpirationDate", default, skip_serializing_if = "Option::is_none")]
pub activation_expiration_date: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
#[serde(rename = "registrationStatus", default, skip_serializing_if = "Option::is_none")]
pub registration_status: Option<email_registration::RegistrationStatus>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
}
impl EmailRegistration {
pub fn new() -> Self {
Self::default()
}
}
pub mod email_registration {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RegistrationStatus {
ActivationPending,
Activated,
ActivationAttemptsExhausted,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Identity {
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<identity::Type>,
}
impl Identity {
pub fn new() -> Self {
Self::default()
}
}
pub mod identity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
SystemAssigned,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Invitation {
#[serde(flatten)]
pub proxy_dto: ProxyDto,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<InvitationProperties>,
}
impl Invitation {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InvitationList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<Invitation>,
}
impl InvitationList {
pub fn new(value: Vec<Invitation>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct InvitationProperties {
#[serde(rename = "invitationId", default, skip_serializing_if = "Option::is_none")]
pub invitation_id: Option<String>,
#[serde(rename = "invitationStatus", default, skip_serializing_if = "Option::is_none")]
pub invitation_status: Option<invitation_properties::InvitationStatus>,
#[serde(rename = "respondedAt", default, skip_serializing_if = "Option::is_none")]
pub responded_at: Option<String>,
#[serde(rename = "sentAt", default, skip_serializing_if = "Option::is_none")]
pub sent_at: Option<String>,
#[serde(rename = "targetActiveDirectoryId", default, skip_serializing_if = "Option::is_none")]
pub target_active_directory_id: Option<String>,
#[serde(rename = "targetEmail", default, skip_serializing_if = "Option::is_none")]
pub target_email: Option<String>,
#[serde(rename = "targetObjectId", default, skip_serializing_if = "Option::is_none")]
pub target_object_id: Option<String>,
#[serde(rename = "userEmail", default, skip_serializing_if = "Option::is_none")]
pub user_email: Option<String>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<String>,
}
impl InvitationProperties {
pub fn new() -> Self {
Self::default()
}
}
pub mod invitation_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum InvitationStatus {
Pending,
Accepted,
Rejected,
Withdrawn,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KustoClusterDataSet {
#[serde(flatten)]
pub data_set: DataSet,
pub properties: KustoClusterDataSetProperties,
}
impl KustoClusterDataSet {
pub fn new(data_set: DataSet, properties: KustoClusterDataSetProperties) -> Self {
Self { data_set, properties }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KustoClusterDataSetMapping {
#[serde(flatten)]
pub data_set_mapping: DataSetMapping,
pub properties: KustoClusterDataSetMappingProperties,
}
impl KustoClusterDataSetMapping {
pub fn new(data_set_mapping: DataSetMapping, properties: KustoClusterDataSetMappingProperties) -> Self {
Self {
data_set_mapping,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KustoClusterDataSetMappingProperties {
#[serde(rename = "dataSetId")]
pub data_set_id: String,
#[serde(rename = "dataSetMappingStatus", default, skip_serializing_if = "Option::is_none")]
pub data_set_mapping_status: Option<kusto_cluster_data_set_mapping_properties::DataSetMappingStatus>,
#[serde(rename = "kustoClusterResourceId")]
pub kusto_cluster_resource_id: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<kusto_cluster_data_set_mapping_properties::ProvisioningState>,
}
impl KustoClusterDataSetMappingProperties {
pub fn new(data_set_id: String, kusto_cluster_resource_id: String) -> Self {
Self {
data_set_id,
data_set_mapping_status: None,
kusto_cluster_resource_id,
location: None,
provisioning_state: None,
}
}
}
pub mod kusto_cluster_data_set_mapping_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataSetMappingStatus {
Ok,
Broken,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KustoClusterDataSetProperties {
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
#[serde(rename = "kustoClusterResourceId")]
pub kusto_cluster_resource_id: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<kusto_cluster_data_set_properties::ProvisioningState>,
}
impl KustoClusterDataSetProperties {
pub fn new(kusto_cluster_resource_id: String) -> Self {
Self {
data_set_id: None,
kusto_cluster_resource_id,
location: None,
provisioning_state: None,
}
}
}
pub mod kusto_cluster_data_set_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KustoDatabaseDataSet {
#[serde(flatten)]
pub data_set: DataSet,
pub properties: KustoDatabaseDataSetProperties,
}
impl KustoDatabaseDataSet {
pub fn new(data_set: DataSet, properties: KustoDatabaseDataSetProperties) -> Self {
Self { data_set, properties }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KustoDatabaseDataSetMapping {
#[serde(flatten)]
pub data_set_mapping: DataSetMapping,
pub properties: KustoDatabaseDataSetMappingProperties,
}
impl KustoDatabaseDataSetMapping {
pub fn new(data_set_mapping: DataSetMapping, properties: KustoDatabaseDataSetMappingProperties) -> Self {
Self {
data_set_mapping,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KustoDatabaseDataSetMappingProperties {
#[serde(rename = "dataSetId")]
pub data_set_id: String,
#[serde(rename = "dataSetMappingStatus", default, skip_serializing_if = "Option::is_none")]
pub data_set_mapping_status: Option<kusto_database_data_set_mapping_properties::DataSetMappingStatus>,
#[serde(rename = "kustoClusterResourceId")]
pub kusto_cluster_resource_id: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<kusto_database_data_set_mapping_properties::ProvisioningState>,
}
impl KustoDatabaseDataSetMappingProperties {
pub fn new(data_set_id: String, kusto_cluster_resource_id: String) -> Self {
Self {
data_set_id,
data_set_mapping_status: None,
kusto_cluster_resource_id,
location: None,
provisioning_state: None,
}
}
}
pub mod kusto_database_data_set_mapping_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataSetMappingStatus {
Ok,
Broken,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KustoDatabaseDataSetProperties {
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
#[serde(rename = "kustoDatabaseResourceId")]
pub kusto_database_resource_id: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<kusto_database_data_set_properties::ProvisioningState>,
}
impl KustoDatabaseDataSetProperties {
pub fn new(kusto_database_resource_id: String) -> Self {
Self {
data_set_id: None,
kusto_database_resource_id,
location: None,
provisioning_state: None,
}
}
}
pub mod kusto_database_data_set_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<OperationModel>,
}
impl OperationList {
pub fn new(value: Vec<OperationModel>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct OperationMetaLogSpecification {
#[serde(rename = "blobDuration", default, skip_serializing_if = "Option::is_none")]
pub blob_duration: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
impl OperationMetaLogSpecification {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct OperationMetaMetricSpecification {
#[serde(rename = "aggregationType", default, skip_serializing_if = "Option::is_none")]
pub aggregation_type: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub dimensions: Vec<DimensionProperties>,
#[serde(rename = "displayDescription", default, skip_serializing_if = "Option::is_none")]
pub display_description: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "enableRegionalMdmAccount", default, skip_serializing_if = "Option::is_none")]
pub enable_regional_mdm_account: Option<String>,
#[serde(rename = "fillGapWithZero", default, skip_serializing_if = "Option::is_none")]
pub fill_gap_with_zero: Option<bool>,
#[serde(rename = "internalMetricName", default, skip_serializing_if = "Option::is_none")]
pub internal_metric_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "resourceIdDimensionNameOverride", default, skip_serializing_if = "Option::is_none")]
pub resource_id_dimension_name_override: Option<String>,
#[serde(rename = "supportedAggregationTypes", default, skip_serializing_if = "Vec::is_empty")]
pub supported_aggregation_types: Vec<String>,
#[serde(rename = "supportedTimeGrainTypes", default, skip_serializing_if = "Vec::is_empty")]
pub supported_time_grain_types: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<String>,
}
impl OperationMetaMetricSpecification {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct OperationMetaPropertyInfo {
#[serde(rename = "serviceSpecification", default, skip_serializing_if = "Option::is_none")]
pub service_specification: Option<OperationMetaServiceSpecification>,
}
impl OperationMetaPropertyInfo {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct OperationMetaServiceSpecification {
#[serde(rename = "logSpecifications", default, skip_serializing_if = "Vec::is_empty")]
pub log_specifications: Vec<OperationMetaLogSpecification>,
#[serde(rename = "metricSpecifications", default, skip_serializing_if = "Vec::is_empty")]
pub metric_specifications: Vec<OperationMetaMetricSpecification>,
}
impl OperationMetaServiceSpecification {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct OperationModel {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<OperationModelProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OperationMetaPropertyInfo>,
}
impl OperationModel {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct OperationModelProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
}
impl OperationModelProperties {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationResponse {
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<DataShareErrorInfo>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
pub status: operation_response::Status,
}
impl OperationResponse {
pub fn new(status: operation_response::Status) -> Self {
Self {
end_time: None,
error: None,
start_time: None,
status,
}
}
}
pub mod operation_response {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Accepted,
InProgress,
TransientFailure,
Succeeded,
Failed,
Canceled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ProviderShareSubscription {
#[serde(flatten)]
pub proxy_dto: ProxyDto,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ProviderShareSubscriptionProperties>,
}
impl ProviderShareSubscription {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProviderShareSubscriptionList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<ProviderShareSubscription>,
}
impl ProviderShareSubscriptionList {
pub fn new(value: Vec<ProviderShareSubscription>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ProviderShareSubscriptionProperties {
#[serde(rename = "consumerEmail", default, skip_serializing_if = "Option::is_none")]
pub consumer_email: Option<String>,
#[serde(rename = "consumerName", default, skip_serializing_if = "Option::is_none")]
pub consumer_name: Option<String>,
#[serde(rename = "consumerTenantName", default, skip_serializing_if = "Option::is_none")]
pub consumer_tenant_name: Option<String>,
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "providerEmail", default, skip_serializing_if = "Option::is_none")]
pub provider_email: Option<String>,
#[serde(rename = "providerName", default, skip_serializing_if = "Option::is_none")]
pub provider_name: Option<String>,
#[serde(rename = "sharedAt", default, skip_serializing_if = "Option::is_none")]
pub shared_at: Option<String>,
#[serde(rename = "shareSubscriptionObjectId", default, skip_serializing_if = "Option::is_none")]
pub share_subscription_object_id: Option<String>,
#[serde(rename = "shareSubscriptionStatus", default, skip_serializing_if = "Option::is_none")]
pub share_subscription_status: Option<provider_share_subscription_properties::ShareSubscriptionStatus>,
}
impl ProviderShareSubscriptionProperties {
pub fn new() -> Self {
Self::default()
}
}
pub mod provider_share_subscription_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ShareSubscriptionStatus {
Active,
Revoked,
SourceDeleted,
Revoking,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ProxyDto {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
impl ProxyDto {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ScheduledSourceShareSynchronizationSettingProperties {
#[serde(rename = "recurrenceInterval", default, skip_serializing_if = "Option::is_none")]
pub recurrence_interval: Option<scheduled_source_share_synchronization_setting_properties::RecurrenceInterval>,
#[serde(rename = "synchronizationTime", default, skip_serializing_if = "Option::is_none")]
pub synchronization_time: Option<String>,
}
impl ScheduledSourceShareSynchronizationSettingProperties {
pub fn new() -> Self {
Self::default()
}
}
pub mod scheduled_source_share_synchronization_setting_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RecurrenceInterval {
Hour,
Day,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduledSourceSynchronizationSetting {
#[serde(flatten)]
pub source_share_synchronization_setting: SourceShareSynchronizationSetting,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ScheduledSourceShareSynchronizationSettingProperties>,
}
impl ScheduledSourceSynchronizationSetting {
pub fn new(source_share_synchronization_setting: SourceShareSynchronizationSetting) -> Self {
Self {
source_share_synchronization_setting,
properties: None,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduledSynchronizationSetting {
#[serde(flatten)]
pub synchronization_setting: SynchronizationSetting,
pub properties: ScheduledSynchronizationSettingProperties,
}
impl ScheduledSynchronizationSetting {
pub fn new(synchronization_setting: SynchronizationSetting, properties: ScheduledSynchronizationSettingProperties) -> Self {
Self {
synchronization_setting,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduledSynchronizationSettingProperties {
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<scheduled_synchronization_setting_properties::ProvisioningState>,
#[serde(rename = "recurrenceInterval")]
pub recurrence_interval: scheduled_synchronization_setting_properties::RecurrenceInterval,
#[serde(rename = "synchronizationTime")]
pub synchronization_time: String,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<String>,
}
impl ScheduledSynchronizationSettingProperties {
pub fn new(
recurrence_interval: scheduled_synchronization_setting_properties::RecurrenceInterval,
synchronization_time: String,
) -> Self {
Self {
created_at: None,
provisioning_state: None,
recurrence_interval,
synchronization_time,
user_name: None,
}
}
}
pub mod scheduled_synchronization_setting_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RecurrenceInterval {
Hour,
Day,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduledTrigger {
#[serde(flatten)]
pub trigger: Trigger,
pub properties: ScheduledTriggerProperties,
}
impl ScheduledTrigger {
pub fn new(trigger: Trigger, properties: ScheduledTriggerProperties) -> Self {
Self { trigger, properties }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduledTriggerProperties {
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<scheduled_trigger_properties::ProvisioningState>,
#[serde(rename = "recurrenceInterval")]
pub recurrence_interval: scheduled_trigger_properties::RecurrenceInterval,
#[serde(rename = "synchronizationMode", default, skip_serializing_if = "Option::is_none")]
pub synchronization_mode: Option<scheduled_trigger_properties::SynchronizationMode>,
#[serde(rename = "synchronizationTime")]
pub synchronization_time: String,
#[serde(rename = "triggerStatus", default, skip_serializing_if = "Option::is_none")]
pub trigger_status: Option<scheduled_trigger_properties::TriggerStatus>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<String>,
}
impl ScheduledTriggerProperties {
pub fn new(recurrence_interval: scheduled_trigger_properties::RecurrenceInterval, synchronization_time: String) -> Self {
Self {
created_at: None,
provisioning_state: None,
recurrence_interval,
synchronization_mode: None,
synchronization_time,
trigger_status: None,
user_name: None,
}
}
}
pub mod scheduled_trigger_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RecurrenceInterval {
Hour,
Day,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SynchronizationMode {
Incremental,
FullSync,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum TriggerStatus {
Active,
Inactive,
SourceSynchronizationSettingDeleted,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Share {
#[serde(flatten)]
pub proxy_dto: ProxyDto,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ShareProperties>,
}
impl Share {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ShareList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<Share>,
}
impl ShareList {
pub fn new(value: Vec<Share>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ShareProperties {
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<share_properties::ProvisioningState>,
#[serde(rename = "shareKind", default, skip_serializing_if = "Option::is_none")]
pub share_kind: Option<share_properties::ShareKind>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub terms: Option<String>,
#[serde(rename = "userEmail", default, skip_serializing_if = "Option::is_none")]
pub user_email: Option<String>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<String>,
}
impl ShareProperties {
pub fn new() -> Self {
Self::default()
}
}
pub mod share_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ShareKind {
CopyBased,
InPlace,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ShareSubscription {
#[serde(flatten)]
pub proxy_dto: ProxyDto,
pub properties: ShareSubscriptionProperties,
}
impl ShareSubscription {
pub fn new(properties: ShareSubscriptionProperties) -> Self {
Self {
proxy_dto: ProxyDto::default(),
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ShareSubscriptionList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<ShareSubscription>,
}
impl ShareSubscriptionList {
pub fn new(value: Vec<ShareSubscription>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ShareSubscriptionProperties {
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "invitationId")]
pub invitation_id: String,
#[serde(rename = "providerEmail", default, skip_serializing_if = "Option::is_none")]
pub provider_email: Option<String>,
#[serde(rename = "providerName", default, skip_serializing_if = "Option::is_none")]
pub provider_name: Option<String>,
#[serde(rename = "providerTenantName", default, skip_serializing_if = "Option::is_none")]
pub provider_tenant_name: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<share_subscription_properties::ProvisioningState>,
#[serde(rename = "shareDescription", default, skip_serializing_if = "Option::is_none")]
pub share_description: Option<String>,
#[serde(rename = "shareKind", default, skip_serializing_if = "Option::is_none")]
pub share_kind: Option<share_subscription_properties::ShareKind>,
#[serde(rename = "shareName", default, skip_serializing_if = "Option::is_none")]
pub share_name: Option<String>,
#[serde(rename = "shareSubscriptionStatus", default, skip_serializing_if = "Option::is_none")]
pub share_subscription_status: Option<share_subscription_properties::ShareSubscriptionStatus>,
#[serde(rename = "shareTerms", default, skip_serializing_if = "Option::is_none")]
pub share_terms: Option<String>,
#[serde(rename = "sourceShareLocation")]
pub source_share_location: String,
#[serde(rename = "userEmail", default, skip_serializing_if = "Option::is_none")]
pub user_email: Option<String>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<String>,
}
impl ShareSubscriptionProperties {
pub fn new(invitation_id: String, source_share_location: String) -> Self {
Self {
created_at: None,
invitation_id,
provider_email: None,
provider_name: None,
provider_tenant_name: None,
provisioning_state: None,
share_description: None,
share_kind: None,
share_name: None,
share_subscription_status: None,
share_terms: None,
source_share_location,
user_email: None,
user_name: None,
}
}
}
pub mod share_subscription_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ShareKind {
CopyBased,
InPlace,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ShareSubscriptionStatus {
Active,
Revoked,
SourceDeleted,
Revoking,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ShareSubscriptionSynchronization {
#[serde(rename = "durationMs", default, skip_serializing_if = "Option::is_none")]
pub duration_ms: Option<i32>,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(rename = "synchronizationId")]
pub synchronization_id: String,
#[serde(rename = "synchronizationMode", default, skip_serializing_if = "Option::is_none")]
pub synchronization_mode: Option<share_subscription_synchronization::SynchronizationMode>,
}
impl ShareSubscriptionSynchronization {
pub fn new(synchronization_id: String) -> Self {
Self {
duration_ms: None,
end_time: None,
message: None,
start_time: None,
status: None,
synchronization_id,
synchronization_mode: None,
}
}
}
pub mod share_subscription_synchronization {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SynchronizationMode {
Incremental,
FullSync,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ShareSubscriptionSynchronizationList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<ShareSubscriptionSynchronization>,
}
impl ShareSubscriptionSynchronizationList {
pub fn new(value: Vec<ShareSubscriptionSynchronization>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ShareSynchronization {
#[serde(rename = "consumerEmail", default, skip_serializing_if = "Option::is_none")]
pub consumer_email: Option<String>,
#[serde(rename = "consumerName", default, skip_serializing_if = "Option::is_none")]
pub consumer_name: Option<String>,
#[serde(rename = "consumerTenantName", default, skip_serializing_if = "Option::is_none")]
pub consumer_tenant_name: Option<String>,
#[serde(rename = "durationMs", default, skip_serializing_if = "Option::is_none")]
pub duration_ms: Option<i32>,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(rename = "synchronizationId", default, skip_serializing_if = "Option::is_none")]
pub synchronization_id: Option<String>,
#[serde(rename = "synchronizationMode", default, skip_serializing_if = "Option::is_none")]
pub synchronization_mode: Option<share_synchronization::SynchronizationMode>,
}
impl ShareSynchronization {
pub fn new() -> Self {
Self::default()
}
}
pub mod share_synchronization {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SynchronizationMode {
Incremental,
FullSync,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ShareSynchronizationList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<ShareSynchronization>,
}
impl ShareSynchronizationList {
pub fn new(value: Vec<ShareSynchronization>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SourceShareSynchronizationSetting {
pub kind: source_share_synchronization_setting::Kind,
}
impl SourceShareSynchronizationSetting {
pub fn new(kind: source_share_synchronization_setting::Kind) -> Self {
Self { kind }
}
}
pub mod source_share_synchronization_setting {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Kind {
ScheduleBased,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SourceShareSynchronizationSettingList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<SourceShareSynchronizationSetting>,
}
impl SourceShareSynchronizationSettingList {
pub fn new(value: Vec<SourceShareSynchronizationSetting>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlDbTableDataSet {
#[serde(flatten)]
pub data_set: DataSet,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SqlDbTableProperties>,
}
impl SqlDbTableDataSet {
pub fn new(data_set: DataSet) -> Self {
Self {
data_set,
properties: None,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlDbTableDataSetMapping {
#[serde(flatten)]
pub data_set_mapping: DataSetMapping,
pub properties: SqlDbTableDataSetMappingProperties,
}
impl SqlDbTableDataSetMapping {
pub fn new(data_set_mapping: DataSetMapping, properties: SqlDbTableDataSetMappingProperties) -> Self {
Self {
data_set_mapping,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlDbTableDataSetMappingProperties {
#[serde(rename = "databaseName")]
pub database_name: String,
#[serde(rename = "dataSetId")]
pub data_set_id: String,
#[serde(rename = "dataSetMappingStatus", default, skip_serializing_if = "Option::is_none")]
pub data_set_mapping_status: Option<sql_db_table_data_set_mapping_properties::DataSetMappingStatus>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<sql_db_table_data_set_mapping_properties::ProvisioningState>,
#[serde(rename = "schemaName")]
pub schema_name: String,
#[serde(rename = "sqlServerResourceId")]
pub sql_server_resource_id: String,
#[serde(rename = "tableName")]
pub table_name: String,
}
impl SqlDbTableDataSetMappingProperties {
pub fn new(
database_name: String,
data_set_id: String,
schema_name: String,
sql_server_resource_id: String,
table_name: String,
) -> Self {
Self {
database_name,
data_set_id,
data_set_mapping_status: None,
provisioning_state: None,
schema_name,
sql_server_resource_id,
table_name,
}
}
}
pub mod sql_db_table_data_set_mapping_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataSetMappingStatus {
Ok,
Broken,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlDbTableProperties {
#[serde(rename = "databaseName")]
pub database_name: String,
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
#[serde(rename = "schemaName")]
pub schema_name: String,
#[serde(rename = "sqlServerResourceId")]
pub sql_server_resource_id: String,
#[serde(rename = "tableName")]
pub table_name: String,
}
impl SqlDbTableProperties {
pub fn new(database_name: String, schema_name: String, sql_server_resource_id: String, table_name: String) -> Self {
Self {
database_name,
data_set_id: None,
schema_name,
sql_server_resource_id,
table_name,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlDwTableDataSet {
#[serde(flatten)]
pub data_set: DataSet,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SqlDwTableProperties>,
}
impl SqlDwTableDataSet {
pub fn new(data_set: DataSet) -> Self {
Self {
data_set,
properties: None,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlDwTableDataSetMapping {
#[serde(flatten)]
pub data_set_mapping: DataSetMapping,
pub properties: SqlDwTableDataSetMappingProperties,
}
impl SqlDwTableDataSetMapping {
pub fn new(data_set_mapping: DataSetMapping, properties: SqlDwTableDataSetMappingProperties) -> Self {
Self {
data_set_mapping,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlDwTableDataSetMappingProperties {
#[serde(rename = "dataSetId")]
pub data_set_id: String,
#[serde(rename = "dataSetMappingStatus", default, skip_serializing_if = "Option::is_none")]
pub data_set_mapping_status: Option<sql_dw_table_data_set_mapping_properties::DataSetMappingStatus>,
#[serde(rename = "dataWarehouseName")]
pub data_warehouse_name: String,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<sql_dw_table_data_set_mapping_properties::ProvisioningState>,
#[serde(rename = "schemaName")]
pub schema_name: String,
#[serde(rename = "sqlServerResourceId")]
pub sql_server_resource_id: String,
#[serde(rename = "tableName")]
pub table_name: String,
}
impl SqlDwTableDataSetMappingProperties {
pub fn new(
data_set_id: String,
data_warehouse_name: String,
schema_name: String,
sql_server_resource_id: String,
table_name: String,
) -> Self {
Self {
data_set_id,
data_set_mapping_status: None,
data_warehouse_name,
provisioning_state: None,
schema_name,
sql_server_resource_id,
table_name,
}
}
}
pub mod sql_dw_table_data_set_mapping_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataSetMappingStatus {
Ok,
Broken,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Creating,
Deleting,
Moving,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlDwTableProperties {
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
#[serde(rename = "dataWarehouseName")]
pub data_warehouse_name: String,
#[serde(rename = "schemaName")]
pub schema_name: String,
#[serde(rename = "sqlServerResourceId")]
pub sql_server_resource_id: String,
#[serde(rename = "tableName")]
pub table_name: String,
}
impl SqlDwTableProperties {
pub fn new(data_warehouse_name: String, schema_name: String, sql_server_resource_id: String, table_name: String) -> Self {
Self {
data_set_id: None,
data_warehouse_name,
schema_name,
sql_server_resource_id,
table_name,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SynchronizationDetails {
#[serde(rename = "dataSetId", default, skip_serializing_if = "Option::is_none")]
pub data_set_id: Option<String>,
#[serde(rename = "dataSetType", default, skip_serializing_if = "Option::is_none")]
pub data_set_type: Option<synchronization_details::DataSetType>,
#[serde(rename = "durationMs", default, skip_serializing_if = "Option::is_none")]
pub duration_ms: Option<i32>,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[serde(rename = "filesRead", default, skip_serializing_if = "Option::is_none")]
pub files_read: Option<i64>,
#[serde(rename = "filesWritten", default, skip_serializing_if = "Option::is_none")]
pub files_written: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "rowsCopied", default, skip_serializing_if = "Option::is_none")]
pub rows_copied: Option<i64>,
#[serde(rename = "rowsRead", default, skip_serializing_if = "Option::is_none")]
pub rows_read: Option<i64>,
#[serde(rename = "sizeRead", default, skip_serializing_if = "Option::is_none")]
pub size_read: Option<i64>,
#[serde(rename = "sizeWritten", default, skip_serializing_if = "Option::is_none")]
pub size_written: Option<i64>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(rename = "vCore", default, skip_serializing_if = "Option::is_none")]
pub v_core: Option<i64>,
}
impl SynchronizationDetails {
pub fn new() -> Self {
Self::default()
}
}
pub mod synchronization_details {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataSetType {
Blob,
Container,
BlobFolder,
AdlsGen2FileSystem,
AdlsGen2Folder,
AdlsGen2File,
AdlsGen1Folder,
AdlsGen1File,
KustoCluster,
KustoDatabase,
#[serde(rename = "SqlDBTable")]
SqlDbTable,
#[serde(rename = "SqlDWTable")]
SqlDwTable,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SynchronizationDetailsList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<SynchronizationDetails>,
}
impl SynchronizationDetailsList {
pub fn new(value: Vec<SynchronizationDetails>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SynchronizationSetting {
#[serde(flatten)]
pub proxy_dto: ProxyDto,
pub kind: synchronization_setting::Kind,
}
impl SynchronizationSetting {
pub fn new(kind: synchronization_setting::Kind) -> Self {
Self {
proxy_dto: ProxyDto::default(),
kind,
}
}
}
pub mod synchronization_setting {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Kind {
ScheduleBased,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SynchronizationSettingList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<SynchronizationSetting>,
}
impl SynchronizationSettingList {
pub fn new(value: Vec<SynchronizationSetting>) -> Self {
Self { next_link: None, value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Synchronize {
#[serde(rename = "synchronizationMode", default, skip_serializing_if = "Option::is_none")]
pub synchronization_mode: Option<synchronize::SynchronizationMode>,
}
impl Synchronize {
pub fn new() -> Self {
Self::default()
}
}
pub mod synchronize {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SynchronizationMode {
Incremental,
FullSync,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Trigger {
#[serde(flatten)]
pub proxy_dto: ProxyDto,
pub kind: trigger::Kind,
}
impl Trigger {
pub fn new(kind: trigger::Kind) -> Self {
Self {
proxy_dto: ProxyDto::default(),
kind,
}
}
}
pub mod trigger {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Kind {
ScheduleBased,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TriggerList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<Trigger>,
}
impl TriggerList {
pub fn new(value: Vec<Trigger>) -> Self {
Self { next_link: None, value }
}
}
| 34.797526 | 135 | 0.674772 |
90625371afff30d31ae8cb1b04586b73c2e739dd
| 17,964 |
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
use super::RaftKv;
use super::Result;
use crate::import::SSTImporter;
use crate::read_pool::ReadPoolHandle;
use crate::server::lock_manager::LockManager;
use crate::server::Config as ServerConfig;
use crate::storage::kv::FlowStatsReporter;
use crate::storage::txn::flow_controller::FlowController;
use crate::storage::DynamicConfigs as StorageDynamicConfigs;
use crate::storage::{config::Config as StorageConfig, Storage};
use api_version::api_v2::TIDB_RANGES_COMPLEMENT;
use concurrency_manager::ConcurrencyManager;
use engine_traits::{Engines, Iterable, KvEngine, RaftEngine, DATA_CFS, DATA_KEY_PREFIX_LEN};
use kvproto::kvrpcpb::ApiVersion;
use kvproto::metapb;
use kvproto::raft_serverpb::StoreIdent;
use kvproto::replication_modepb::ReplicationStatus;
use pd_client::{Error as PdError, PdClient, INVALID_ID};
use raftstore::coprocessor::dispatcher::CoprocessorHost;
use raftstore::router::{LocalReadRouter, RaftStoreRouter};
use raftstore::store::fsm::store::StoreMeta;
use raftstore::store::fsm::{ApplyRouter, RaftBatchSystem, RaftRouter};
use raftstore::store::AutoSplitController;
use raftstore::store::{self, initial_region, Config as StoreConfig, SnapManager, Transport};
use raftstore::store::{GlobalReplicationState, PdTask, RefreshConfigTask, SplitCheckTask};
use resource_metering::{CollectorRegHandle, ResourceTagFactory};
use tikv_util::config::VersionTrack;
use tikv_util::worker::{LazyWorker, Scheduler, Worker};
const MAX_CHECK_CLUSTER_BOOTSTRAPPED_RETRY_COUNT: u64 = 60;
const CHECK_CLUSTER_BOOTSTRAPPED_RETRY_SECONDS: u64 = 3;
/// Creates a new storage engine which is backed by the Raft consensus
/// protocol.
pub fn create_raft_storage<S, EK, R: FlowStatsReporter>(
engine: RaftKv<EK, S>,
cfg: &StorageConfig,
read_pool: ReadPoolHandle,
lock_mgr: LockManager,
concurrency_manager: ConcurrencyManager,
dynamic_configs: StorageDynamicConfigs,
flow_controller: Arc<FlowController>,
reporter: R,
resource_tag_factory: ResourceTagFactory,
) -> Result<Storage<RaftKv<EK, S>, LockManager>>
where
S: RaftStoreRouter<EK> + LocalReadRouter<EK> + 'static,
EK: KvEngine,
{
let store = Storage::from_engine(
engine,
cfg,
read_pool,
lock_mgr,
concurrency_manager,
dynamic_configs,
flow_controller,
reporter,
resource_tag_factory,
)?;
Ok(store)
}
/// A wrapper for the raftstore which runs Multi-Raft.
// TODO: we will rename another better name like RaftStore later.
pub struct Node<C: PdClient + 'static, EK: KvEngine, ER: RaftEngine> {
cluster_id: u64,
store: metapb::Store,
store_cfg: Arc<VersionTrack<StoreConfig>>,
api_version: ApiVersion,
system: RaftBatchSystem<EK, ER>,
has_started: bool,
pd_client: Arc<C>,
state: Arc<Mutex<GlobalReplicationState>>,
bg_worker: Worker,
}
impl<C, EK, ER> Node<C, EK, ER>
where
C: PdClient,
EK: KvEngine,
ER: RaftEngine,
{
/// Creates a new Node.
pub fn new(
system: RaftBatchSystem<EK, ER>,
cfg: &ServerConfig,
store_cfg: Arc<VersionTrack<StoreConfig>>,
api_version: ApiVersion,
pd_client: Arc<C>,
state: Arc<Mutex<GlobalReplicationState>>,
bg_worker: Worker,
) -> Node<C, EK, ER> {
let mut store = metapb::Store::default();
store.set_id(INVALID_ID);
if cfg.advertise_addr.is_empty() {
store.set_address(cfg.addr.clone());
} else {
store.set_address(cfg.advertise_addr.clone())
}
if cfg.advertise_status_addr.is_empty() {
store.set_status_address(cfg.status_addr.clone());
} else {
store.set_status_address(cfg.advertise_status_addr.clone())
}
store.set_version(env!("CARGO_PKG_VERSION").to_string());
if let Ok(path) = std::env::current_exe() {
if let Some(path) = path.parent() {
store.set_deploy_path(path.to_string_lossy().to_string());
}
};
store.set_start_timestamp(chrono::Local::now().timestamp());
store.set_git_hash(
option_env!("TIKV_BUILD_GIT_HASH")
.unwrap_or("Unknown git hash")
.to_string(),
);
let mut labels = Vec::new();
for (k, v) in &cfg.labels {
let mut label = metapb::StoreLabel::default();
label.set_key(k.to_owned());
label.set_value(v.to_owned());
labels.push(label);
}
store.set_labels(labels.into());
Node {
cluster_id: cfg.cluster_id,
store,
store_cfg,
api_version,
pd_client,
system,
has_started: false,
state,
bg_worker,
}
}
pub fn try_bootstrap_store(&mut self, engines: Engines<EK, ER>) -> Result<()> {
let mut store_id = self.check_store(&engines)?;
if store_id == INVALID_ID {
store_id = self.alloc_id()?;
debug!("alloc store id"; "store_id" => store_id);
store::bootstrap_store(&engines, self.cluster_id, store_id)?;
fail_point!("node_after_bootstrap_store", |_| Err(box_err!(
"injected error: node_after_bootstrap_store"
)));
}
self.check_api_version(&engines)?;
self.store.set_id(store_id);
Ok(())
}
/// Starts the Node. It tries to bootstrap cluster if the cluster is not
/// bootstrapped yet. Then it spawns a thread to run the raftstore in
/// background.
#[allow(clippy::too_many_arguments)]
pub fn start<T>(
&mut self,
engines: Engines<EK, ER>,
trans: T,
snap_mgr: SnapManager,
pd_worker: LazyWorker<PdTask<EK, ER>>,
store_meta: Arc<Mutex<StoreMeta>>,
coprocessor_host: CoprocessorHost<EK>,
importer: Arc<SSTImporter>,
split_check_scheduler: Scheduler<SplitCheckTask>,
auto_split_controller: AutoSplitController,
concurrency_manager: ConcurrencyManager,
collector_reg_handle: CollectorRegHandle,
) -> Result<()>
where
T: Transport + 'static,
{
let store_id = self.id();
{
let mut meta = store_meta.lock().unwrap();
meta.store_id = Some(store_id);
}
if let Some(first_region) = self.check_or_prepare_bootstrap_cluster(&engines, store_id)? {
info!("trying to bootstrap cluster"; "store_id" => store_id, "region" => ?first_region);
// cluster is not bootstrapped, and we choose first store to bootstrap
fail_point!("node_after_prepare_bootstrap_cluster", |_| Err(box_err!(
"injected error: node_after_prepare_bootstrap_cluster"
)));
self.bootstrap_cluster(&engines, first_region)?;
}
// Put store only if the cluster is bootstrapped.
info!("put store to PD"; "store" => ?&self.store);
let status = self.pd_client.put_store(self.store.clone())?;
self.load_all_stores(status);
self.start_store(
store_id,
engines,
trans,
snap_mgr,
pd_worker,
store_meta,
coprocessor_host,
importer,
split_check_scheduler,
auto_split_controller,
concurrency_manager,
collector_reg_handle,
)?;
Ok(())
}
/// Gets the store id.
pub fn id(&self) -> u64 {
self.store.get_id()
}
/// Gets the Scheduler of RaftstoreConfigTask, it must be called after start.
pub fn refresh_config_scheduler(&mut self) -> Scheduler<RefreshConfigTask> {
self.system.refresh_config_scheduler()
}
/// Gets a transmission end of a channel which is used to send `Msg` to the
/// raftstore.
pub fn get_router(&self) -> RaftRouter<EK, ER> {
self.system.router()
}
/// Gets a transmission end of a channel which is used send messages to apply worker.
pub fn get_apply_router(&self) -> ApplyRouter<EK> {
self.system.apply_router()
}
// check store, return store id for the engine.
// If the store is not bootstrapped, use INVALID_ID.
fn check_store(&self, engines: &Engines<EK, ER>) -> Result<u64> {
let res = engines.kv.get_msg::<StoreIdent>(keys::STORE_IDENT_KEY)?;
if res.is_none() {
return Ok(INVALID_ID);
}
let ident = res.unwrap();
if ident.get_cluster_id() != self.cluster_id {
return Err(box_err!(
"cluster ID mismatch, local {} != remote {}, \
you are trying to connect to another cluster, please reconnect to the correct PD",
ident.get_cluster_id(),
self.cluster_id
));
}
let store_id = ident.get_store_id();
if store_id == INVALID_ID {
return Err(box_err!("invalid store ident {:?}", ident));
}
Ok(store_id)
}
// During the api version switch only TiDB data are allowed to exist otherwise
// returns error.
fn check_api_version(&self, engines: &Engines<EK, ER>) -> Result<()> {
let ident = engines
.kv
.get_msg::<StoreIdent>(keys::STORE_IDENT_KEY)?
.expect("Store should have bootstrapped");
// API version is not written into `StoreIdent` in legacy TiKV, thus it will be V1 in
// `StoreIdent` regardless of `storage.enable_ttl`. To allow upgrading from legacy V1
// TiKV, the config switch between V1 and V1ttl are not checked here.
// It's safe to do so because `storage.enable_ttl` is impossible to change thanks to the
// config check.
let should_check = match (ident.api_version, self.api_version) {
(ApiVersion::V1, ApiVersion::V1ttl) | (ApiVersion::V1ttl, ApiVersion::V1) => false,
(left, right) => left != right,
};
if should_check {
// Check if there are only TiDB data in the engine
let snapshot = engines.kv.snapshot();
for cf in DATA_CFS {
for (start, end) in TIDB_RANGES_COMPLEMENT {
let mut unexpected_data_key = None;
snapshot.scan_cf(
cf,
&keys::data_key(start),
&keys::data_key(end),
false,
|key, _| {
unexpected_data_key = Some(key[DATA_KEY_PREFIX_LEN..].to_vec());
Ok(false)
},
)?;
if let Some(unexpected_data_key) = unexpected_data_key {
return Err(box_err!(
"unable to switch `storage.api_version` from {:?} to {:?} \
because found data key that is not written by TiDB: {:?}",
ident.api_version,
self.api_version,
log_wrappers::hex_encode_upper(&unexpected_data_key)
));
}
}
}
// Switch api version
let ident = StoreIdent {
api_version: self.api_version,
..ident
};
engines.kv.put_msg(keys::STORE_IDENT_KEY, &ident)?;
engines.sync_kv()?;
}
Ok(())
}
fn alloc_id(&self) -> Result<u64> {
let id = self.pd_client.alloc_id()?;
Ok(id)
}
fn load_all_stores(&mut self, status: Option<ReplicationStatus>) {
info!("initializing replication mode"; "status" => ?status, "store_id" => self.store.id);
let stores = match self.pd_client.get_all_stores(false) {
Ok(stores) => stores,
Err(e) => panic!("failed to load all stores: {:?}", e),
};
let mut state = self.state.lock().unwrap();
if let Some(s) = status {
state.set_status(s);
}
for mut store in stores {
state
.group
.register_store(store.id, store.take_labels().into());
}
}
// Exported for tests.
#[doc(hidden)]
pub fn prepare_bootstrap_cluster(
&self,
engines: &Engines<EK, ER>,
store_id: u64,
) -> Result<metapb::Region> {
let region_id = self.alloc_id()?;
debug!(
"alloc first region id";
"region_id" => region_id,
"cluster_id" => self.cluster_id,
"store_id" => store_id
);
let peer_id = self.alloc_id()?;
debug!(
"alloc first peer id for first region";
"peer_id" => peer_id,
"region_id" => region_id,
);
let region = initial_region(store_id, region_id, peer_id);
store::prepare_bootstrap_cluster(engines, ®ion)?;
Ok(region)
}
fn check_or_prepare_bootstrap_cluster(
&self,
engines: &Engines<EK, ER>,
store_id: u64,
) -> Result<Option<metapb::Region>> {
if let Some(first_region) = engines.kv.get_msg(keys::PREPARE_BOOTSTRAP_KEY)? {
Ok(Some(first_region))
} else if self.check_cluster_bootstrapped()? {
Ok(None)
} else {
self.prepare_bootstrap_cluster(engines, store_id).map(Some)
}
}
fn bootstrap_cluster(
&mut self,
engines: &Engines<EK, ER>,
first_region: metapb::Region,
) -> Result<()> {
let region_id = first_region.get_id();
let mut retry = 0;
while retry < MAX_CHECK_CLUSTER_BOOTSTRAPPED_RETRY_COUNT {
match self
.pd_client
.bootstrap_cluster(self.store.clone(), first_region.clone())
{
Ok(_) => {
info!("bootstrap cluster ok"; "cluster_id" => self.cluster_id);
fail_point!("node_after_bootstrap_cluster", |_| Err(box_err!(
"injected error: node_after_bootstrap_cluster"
)));
store::clear_prepare_bootstrap_key(engines)?;
return Ok(());
}
Err(PdError::ClusterBootstrapped(_)) => match self.pd_client.get_region(b"") {
Ok(region) => {
if region == first_region {
store::clear_prepare_bootstrap_key(engines)?;
} else {
info!("cluster is already bootstrapped"; "cluster_id" => self.cluster_id);
store::clear_prepare_bootstrap_cluster(engines, region_id)?;
}
return Ok(());
}
Err(e) => {
warn!("get the first region failed"; "err" => ?e);
}
},
// TODO: should we clean region for other errors too?
Err(e) => error!(?e; "bootstrap cluster"; "cluster_id" => self.cluster_id,),
}
retry += 1;
thread::sleep(Duration::from_secs(
CHECK_CLUSTER_BOOTSTRAPPED_RETRY_SECONDS,
));
}
Err(box_err!("bootstrapped cluster failed"))
}
fn check_cluster_bootstrapped(&self) -> Result<bool> {
for _ in 0..MAX_CHECK_CLUSTER_BOOTSTRAPPED_RETRY_COUNT {
match self.pd_client.is_cluster_bootstrapped() {
Ok(b) => return Ok(b),
Err(e) => {
warn!("check cluster bootstrapped failed"; "err" => ?e);
}
}
thread::sleep(Duration::from_secs(
CHECK_CLUSTER_BOOTSTRAPPED_RETRY_SECONDS,
));
}
Err(box_err!("check cluster bootstrapped failed"))
}
#[allow(clippy::too_many_arguments)]
fn start_store<T>(
&mut self,
store_id: u64,
engines: Engines<EK, ER>,
trans: T,
snap_mgr: SnapManager,
pd_worker: LazyWorker<PdTask<EK, ER>>,
store_meta: Arc<Mutex<StoreMeta>>,
coprocessor_host: CoprocessorHost<EK>,
importer: Arc<SSTImporter>,
split_check_scheduler: Scheduler<SplitCheckTask>,
auto_split_controller: AutoSplitController,
concurrency_manager: ConcurrencyManager,
collector_reg_handle: CollectorRegHandle,
) -> Result<()>
where
T: Transport + 'static,
{
info!("start raft store thread"; "store_id" => store_id);
if self.has_started {
return Err(box_err!("{} is already started", store_id));
}
self.has_started = true;
let cfg = self.store_cfg.clone();
let pd_client = Arc::clone(&self.pd_client);
let store = self.store.clone();
self.system.spawn(
store,
cfg,
engines,
trans,
pd_client,
snap_mgr,
pd_worker,
store_meta,
coprocessor_host,
importer,
split_check_scheduler,
self.bg_worker.clone(),
auto_split_controller,
self.state.clone(),
concurrency_manager,
collector_reg_handle,
)?;
Ok(())
}
fn stop_store(&mut self, store_id: u64) {
info!("stop raft store thread"; "store_id" => store_id);
self.system.shutdown();
}
/// Stops the Node.
pub fn stop(&mut self) {
let store_id = self.store.get_id();
self.stop_store(store_id);
self.bg_worker.stop();
}
}
| 35.431953 | 102 | 0.57053 |
11e0d1fef0acd843ae7959a4b8fe2c3357369afe
| 5,321 |
use async_trait::async_trait;
use futures::{stream::BoxStream, StreamExt};
use tokio::{io, io::AsyncWriteExt};
use vector_core::{
buffers::Acker,
internal_event::{BytesSent, EventsSent},
ByteSizeOf,
};
use crate::{
event::Event,
sinks::util::{
encoding::{Encoder, EncodingConfig, StandardEncodings},
StreamSink,
},
};
pub struct WriterSink<T> {
pub acker: Acker,
pub output: T,
pub encoding: EncodingConfig<StandardEncodings>,
}
#[async_trait]
impl<T> StreamSink<Event> for WriterSink<T>
where
T: io::AsyncWrite + Send + Sync + Unpin,
{
async fn run(mut self: Box<Self>, mut input: BoxStream<'_, Event>) -> Result<(), ()> {
while let Some(event) = input.next().await {
let event_byte_size = event.size_of();
if let Some(mut buf) = encode_event(event, &self.encoding) {
buf.push('\n');
if let Err(error) = self.output.write_all(buf.as_bytes()).await {
// Error when writing to stdout/stderr is likely irrecoverable,
// so stop the sink.
error!(message = "Error writing to output. Stopping sink.", %error);
return Err(());
}
self.acker.ack(1);
emit!(EventsSent {
byte_size: event_byte_size,
count: 1,
output: None,
});
emit!(BytesSent {
byte_size: buf.len(),
protocol: "console"
});
}
}
Ok(())
}
}
fn encode_event(event: Event, encoding: &EncodingConfig<StandardEncodings>) -> Option<String> {
encoding.encode_input_to_string(event).ok()
}
#[cfg(test)]
mod test {
use chrono::{offset::TimeZone, Utc};
use pretty_assertions::assert_eq;
use super::*;
use crate::{
event::{
metric::{Metric, MetricKind, MetricValue, StatisticKind},
Event, Value,
},
sinks::util::encoding::StandardEncodings,
};
#[test]
fn encodes_raw_logs() {
let event = Event::from("foo");
assert_eq!(
"foo",
encode_event(event, &EncodingConfig::from(StandardEncodings::Text)).unwrap()
);
}
#[test]
fn encodes_log_events() {
let mut event = Event::new_empty_log();
let log = event.as_mut_log();
log.insert("x", Value::from("23"));
log.insert("z", Value::from(25));
log.insert("a", Value::from("0"));
let encoded = encode_event(event, &EncodingConfig::from(StandardEncodings::Json));
let expected = r#"{"a":"0","x":"23","z":25}"#;
assert_eq!(encoded.unwrap(), expected);
}
#[test]
fn encodes_counter() {
let event = Event::Metric(
Metric::new(
"foos",
MetricKind::Incremental,
MetricValue::Counter { value: 100.0 },
)
.with_namespace(Some("vector"))
.with_tags(Some(
vec![
("key2".to_owned(), "value2".to_owned()),
("key1".to_owned(), "value1".to_owned()),
("Key3".to_owned(), "Value3".to_owned()),
]
.into_iter()
.collect(),
))
.with_timestamp(Some(Utc.ymd(2018, 11, 14).and_hms_nano(8, 9, 10, 11))),
);
assert_eq!(
r#"{"name":"foos","namespace":"vector","tags":{"Key3":"Value3","key1":"value1","key2":"value2"},"timestamp":"2018-11-14T08:09:10.000000011Z","kind":"incremental","counter":{"value":100.0}}"#,
encode_event(event, &EncodingConfig::from(StandardEncodings::Json)).unwrap()
);
}
#[test]
fn encodes_set() {
let event = Event::Metric(Metric::new(
"users",
MetricKind::Incremental,
MetricValue::Set {
values: vec!["bob".into()].into_iter().collect(),
},
));
assert_eq!(
r#"{"name":"users","kind":"incremental","set":{"values":["bob"]}}"#,
encode_event(event, &EncodingConfig::from(StandardEncodings::Json)).unwrap()
);
}
#[test]
fn encodes_histogram_without_timestamp() {
let event = Event::Metric(Metric::new(
"glork",
MetricKind::Incremental,
MetricValue::Distribution {
samples: vector_core::samples![10.0 => 1],
statistic: StatisticKind::Histogram,
},
));
assert_eq!(
r#"{"name":"glork","kind":"incremental","distribution":{"samples":[{"value":10.0,"rate":1}],"statistic":"histogram"}}"#,
encode_event(event, &EncodingConfig::from(StandardEncodings::Json)).unwrap()
);
}
#[test]
fn encodes_metric_text() {
let event = Event::Metric(Metric::new(
"users",
MetricKind::Incremental,
MetricValue::Set {
values: vec!["bob".into()].into_iter().collect(),
},
));
assert_eq!(
"users{} + bob",
encode_event(event, &EncodingConfig::from(StandardEncodings::Text)).unwrap()
);
}
}
| 31.485207 | 203 | 0.514377 |
f54f45881d2729eb55e14915d4a118fc91b902fe
| 5,774 |
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(missing_docs, bad_style)]
use io::{self, ErrorKind};
use libc;
#[cfg(any(dox, target_os = "linux"))] pub use os::linux as platform;
#[cfg(all(not(dox), target_os = "android"))] pub use os::android as platform;
#[cfg(all(not(dox), target_os = "bitrig"))] pub use os::bitrig as platform;
#[cfg(all(not(dox), target_os = "dragonfly"))] pub use os::dragonfly as platform;
#[cfg(all(not(dox), target_os = "freebsd"))] pub use os::freebsd as platform;
#[cfg(all(not(dox), target_os = "haiku"))] pub use os::haiku as platform;
#[cfg(all(not(dox), target_os = "ios"))] pub use os::ios as platform;
#[cfg(all(not(dox), target_os = "macos"))] pub use os::macos as platform;
#[cfg(all(not(dox), target_os = "netbsd"))] pub use os::netbsd as platform;
#[cfg(all(not(dox), target_os = "openbsd"))] pub use os::openbsd as platform;
#[cfg(all(not(dox), target_os = "solaris"))] pub use os::solaris as platform;
#[cfg(all(not(dox), target_os = "emscripten"))] pub use os::emscripten as platform;
#[cfg(all(not(dox), target_os = "fuchsia"))] pub use os::fuchsia as platform;
#[cfg(all(not(dox), target_os = "l4re"))] pub use os::linux as platform;
#[cfg(all(not(dox), target_os = "horizon-nx"))] pub use os::horizon as platform;
pub use self::rand::hashmap_random_keys;
pub use libc::strlen;
#[macro_use]
pub mod weak;
pub mod args;
pub mod android;
#[cfg(feature = "backtrace")]
pub mod backtrace;
pub mod cmath;
pub mod condvar;
pub mod env;
pub mod ext;
pub mod fast_thread_local;
pub mod fd;
pub mod fs;
pub mod memchr;
pub mod mutex;
#[cfg(not(target_os = "l4re"))]
pub mod net;
#[cfg(target_os = "l4re")]
mod l4re;
#[cfg(target_os = "l4re")]
pub use self::l4re::net;
pub mod os;
pub mod os_str;
pub mod path;
pub mod pipe;
pub mod process;
pub mod rand;
pub mod rwlock;
pub mod stack_overflow;
pub mod thread;
pub mod thread_local;
pub mod time;
pub mod stdio;
#[cfg(not(test))]
pub fn init() {
// By default, some platforms will send a *signal* when an EPIPE error
// would otherwise be delivered. This runtime doesn't install a SIGPIPE
// handler, causing it to kill the program, which isn't exactly what we
// want!
//
// Hence, we set SIGPIPE to ignore when the program starts up in order
// to prevent this problem.
unsafe {
reset_sigpipe();
}
#[cfg(not(any(target_os = "emscripten", target_os = "fuchsia")))]
unsafe fn reset_sigpipe() {
assert!(signal(0 as _, libc::SIG_IGN) != libc::SIG_ERR);
}
#[cfg(any(target_os = "emscripten", target_os = "fuchsia"))]
unsafe fn reset_sigpipe() {}
}
#[cfg(target_os = "android")]
pub use sys::android::signal;
#[cfg(not(target_os = "android"))]
pub use libc::signal;
pub fn unsupported<T>() -> io::Result<T> {
Err(unsupported_err())
}
pub fn unsupported_err() -> io::Error {
io::Error::new(io::ErrorKind::Other,
"operation not supported on 3DS yet")
}
#[derive(Copy,Clone)]
pub enum Void {}
pub fn decode_error_kind(errno: i32) -> ErrorKind {
match errno as libc::c_int {
libc::ECONNREFUSED => ErrorKind::ConnectionRefused,
libc::ECONNRESET => ErrorKind::ConnectionReset,
libc::EPERM | libc::EACCES => ErrorKind::PermissionDenied,
libc::EPIPE => ErrorKind::BrokenPipe,
libc::ENOTCONN => ErrorKind::NotConnected,
libc::ECONNABORTED => ErrorKind::ConnectionAborted,
libc::EADDRNOTAVAIL => ErrorKind::AddrNotAvailable,
libc::EADDRINUSE => ErrorKind::AddrInUse,
libc::ENOENT => ErrorKind::NotFound,
libc::EINTR => ErrorKind::Interrupted,
libc::EINVAL => ErrorKind::InvalidInput,
libc::ETIMEDOUT => ErrorKind::TimedOut,
libc::EEXIST => ErrorKind::AlreadyExists,
// These two constants can have the same value on some systems,
// but different values on others, so we can't use a match
// clause
x if x == libc::EAGAIN || x == libc::EWOULDBLOCK =>
ErrorKind::WouldBlock,
_ => ErrorKind::Other,
}
}
#[doc(hidden)]
pub trait IsMinusOne {
fn is_minus_one(&self) -> bool;
}
macro_rules! impl_is_minus_one {
($($t:ident)*) => ($(impl IsMinusOne for $t {
fn is_minus_one(&self) -> bool {
*self == -1
}
})*)
}
impl_is_minus_one! { i8 i16 i32 i64 isize }
pub fn cvt<T: IsMinusOne>(t: T) -> io::Result<T> {
if t.is_minus_one() {
Err(io::Error::last_os_error())
} else {
Ok(t)
}
}
pub fn cvt_r<T, F>(mut f: F) -> io::Result<T>
where T: IsMinusOne,
F: FnMut() -> T
{
loop {
match cvt(f()) {
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
other => return other,
}
}
}
// On Unix-like platforms, libc::abort will unregister signal handlers
// including the SIGABRT handler, preventing the abort from being blocked, and
// fclose streams, with the side effect of flushing them so libc bufferred
// output will be printed. Additionally the shell will generally print a more
// understandable error message like "Abort trap" rather than "Illegal
// instruction" that intrinsics::abort would cause, as intrinsics::abort is
// implemented as an illegal instruction.
pub unsafe fn abort_internal() -> ! {
::libc::abort()
}
| 32.077778 | 85 | 0.649636 |
1474f3dbb026c44862ba46a7bd17b22f66fdc2e5
| 16,582 |
use std::str;
use std::io;
use std::fs::File;
use std::ops::{Range, Deref, Index};
#[cfg(unix)]
use std::os::unix::fs::MetadataExt;
use std::path::Path;
use std::sync::Arc;
use std::fmt;
use memmap::Mmap;
use goblin::elf::header as elf_header;
use goblin::elf::section_header::{SHT_SYMTAB, SHT_DYNSYM, SHT_STRTAB};
use goblin::elf::program_header::PT_LOAD;
use gimli;
use speedy::{Readable, Writable};
use crate::elf::{self, Endian};
use crate::utils::{StableIndex, get_major, get_minor};
use crate::types::{Inode, Bitness, Endianness};
enum Blob {
Mmap( Mmap ),
StaticSlice( &'static [u8] ),
Owned( Vec< u8 > )
}
impl Deref for Blob {
type Target = [u8];
#[inline]
fn deref( &self ) -> &Self::Target {
match *self {
Blob::Mmap( ref mmap ) => &mmap,
Blob::StaticSlice( slice ) => slice,
Blob::Owned( ref bytes ) => &bytes
}
}
}
#[derive(Debug)]
pub struct SymbolTable {
pub range: Range< u64 >,
pub strtab_range: Range< u64 >,
pub is_dynamic: bool
}
#[derive(Clone, Debug, Readable, Writable)]
pub struct LoadHeader {
pub address: u64,
pub file_offset: u64,
pub file_size: u64,
pub memory_size: u64,
pub alignment: u64,
pub is_readable: bool,
pub is_writable: bool,
pub is_executable: bool
}
pub struct BinaryData {
inode: Option< Inode >,
name: String,
blob: Blob,
data_range: Option< Range< usize > >,
text_range: Option< Range< usize > >,
eh_frame_range: Option< Range< usize > >,
eh_frame_hdr_range: Option< Range< usize > >,
debug_frame_range: Option< Range< usize > >,
gnu_debuglink_range: Option< Range< usize > >,
arm_extab_range: Option< Range< usize > >,
arm_exidx_range: Option< Range< usize > >,
is_shared_object: bool,
symbol_tables: Vec< SymbolTable >,
load_headers: Vec< LoadHeader >,
architecture: &'static str,
endianness: Endianness,
bitness: Bitness,
build_id: Option< Vec< u8 > >
}
impl BinaryData {
#[cfg(unix)]
pub fn load_from_fs< P: AsRef< Path > >( path: P ) -> io::Result< Self > {
let path = path.as_ref();
debug!( "Loading binary {:?}...", path );
let fp = File::open( path )?;
let mmap = unsafe { Mmap::map( &fp )? };
let blob = Blob::Mmap( mmap );
let metadata = fp.metadata()?;
let inode = metadata.ino();
let dev = metadata.dev();
let dev_major = get_major( dev );
let dev_minor = get_minor( dev );
let inode = Inode { inode, dev_major, dev_minor };
let mut data = BinaryData::load( &path.to_string_lossy(), blob )?;
data.set_inode( inode );
Ok( data )
}
#[cfg(not(unix))]
pub fn load_from_fs< P: AsRef< Path > >( _: P ) -> io::Result< Self > {
unimplemented!();
}
pub fn load_from_static_slice( name: &str, slice: &'static [u8] ) -> io::Result< Self > {
debug!( "Loading binary '{}'...", name );
let blob = Blob::StaticSlice( slice );
BinaryData::load( name, blob )
}
pub fn load_from_owned_bytes( name: &str, bytes: Vec< u8 > ) -> io::Result< Self > {
debug!( "Loading binary '{}'...", name );
let blob = Blob::Owned( bytes );
BinaryData::load( name, blob )
}
pub fn check_inode( &self, expected_inode: Inode ) -> io::Result< () > {
if self.inode != Some( expected_inode ) {
return Err( io::Error::new( io::ErrorKind::Other, format!( "major/minor/inode of {:?} doesn't match the expected value: {:?} != {:?}", self.name, self.inode, expected_inode ) ) );
}
Ok(())
}
fn load( path: &str, blob: Blob ) -> io::Result< Self > {
if !blob.starts_with( b"\x7FELF" ) {
return Err( io::Error::new( io::ErrorKind::InvalidData, "not an ELF file" ) );
}
let mut data_range = None;
let mut text_range = None;
let mut eh_frame_range = None;
let mut eh_frame_hdr_range = None;
let mut debug_frame_range = None;
let mut gnu_debuglink_range = None;
let mut arm_extab_range = None;
let mut arm_exidx_range = None;
let mut build_id_range = None;
let mut build_id = None;
let mut is_shared_object = false;
let mut symbol_tables = Vec::new();
let mut load_headers = Vec::new();
let mut endianness = Endianness::LittleEndian;
let mut bitness = Bitness::B32;
let mut architecture = "";
{
let elf = elf::parse( &blob ).map_err( |err| io::Error::new( io::ErrorKind::Other, err ) )?;
parse_elf!( elf, |elf| {
endianness = match elf.endianness() {
Endian::Little => Endianness::LittleEndian,
Endian::Big => Endianness::BigEndian
};
bitness = if elf.is_64_bit() {
Bitness::B64
} else {
Bitness::B32
};
is_shared_object = match elf.header().e_type {
elf_header::ET_EXEC => false,
elf_header::ET_DYN => true,
_ => {
return Err( io::Error::new( io::ErrorKind::Other, format!( "unknown ELF type '{}' for {:?}", elf.header().e_type, path ) ) );
}
};
architecture = match elf.header().e_machine {
elf_header::EM_X86_64 => "amd64",
elf_header::EM_386 => "x86",
elf_header::EM_ARM => "arm",
elf_header::EM_MIPS => {
if elf.is_64_bit() {
"mips64"
} else {
"mips"
}
},
elf_header::EM_AARCH64 => "aarch64",
kind => {
return Err( io::Error::new( io::ErrorKind::Other, format!( "unknown machine type '{}' for {:?}", kind, path ) ) );
}
};
let name_strtab_header = elf.get_section_header( elf.header().e_shstrndx as usize )
.ok_or_else( || io::Error::new( io::ErrorKind::Other, format!( "missing section header for section names strtab for {:?}", path ) ) )?;
let name_strtab = elf.get_strtab( &name_strtab_header )
.ok_or_else( || io::Error::new( io::ErrorKind::Other, format!( "missing strtab for section names strtab for {:?}", path ) ) )?;
for header in elf.section_headers() {
let ty = header.sh_type as u32;
if ty == SHT_SYMTAB || ty == SHT_DYNSYM {
let is_dynamic = ty == SHT_DYNSYM;
let strtab_key = header.sh_link as usize;
if let Some( strtab_header ) = elf.get_section_header( strtab_key ) {
if strtab_header.sh_type as u32 == SHT_STRTAB {
let strtab_range = elf.get_section_body_range( &strtab_header );
let symtab_range = elf.get_section_body_range( &header );
symbol_tables.push( SymbolTable {
range: symtab_range,
strtab_range,
is_dynamic
});
}
}
}
let section_name = match name_strtab.get( header.sh_name ) {
Some( Ok( name ) ) => name,
_ => continue
};
let out_range = match section_name {
".data" => Some( &mut data_range ),
".text" => Some( &mut text_range ),
".eh_frame" => Some( &mut eh_frame_range ),
".eh_frame_hdr" => Some( &mut eh_frame_hdr_range ),
".debug_frame" => Some( &mut debug_frame_range ),
".gnu_debuglink" => Some( &mut gnu_debuglink_range ),
".ARM.extab" => Some( &mut arm_extab_range ),
".ARM.exidx" => Some( &mut arm_exidx_range ),
".note.gnu.build-id" => Some( &mut build_id_range ),
_ => None
};
let offset = header.sh_offset as usize;
let length = header.sh_size as usize;
let range = offset..offset + length;
if let Some( _ ) = blob.get( range.clone() ) {
if let Some( out_range ) = out_range {
*out_range = Some( range.clone() );
}
}
}
if let Some( range ) = build_id_range {
let data = blob.get( range.clone() ).unwrap();
let note = match endianness {
Endianness::LittleEndian => elf.parse_note( data ),
Endianness::BigEndian => elf.parse_note( data )
};
if let Some( note ) = note {
build_id = Some( note.desc.into() );
}
}
for header in elf.program_headers() {
if header.p_type != PT_LOAD {
continue;
}
let entry = LoadHeader {
address: header.p_vaddr,
file_offset: header.p_offset,
file_size: header.p_filesz,
memory_size: header.p_memsz,
alignment: header.p_align,
is_readable: header.is_read(),
is_writable: header.is_write(),
is_executable: header.is_executable()
};
load_headers.push( entry );
}
Ok(())
})?;
}
let binary = BinaryData {
inode: None,
name: path.to_string(),
blob,
data_range,
text_range,
eh_frame_range,
eh_frame_hdr_range,
debug_frame_range,
gnu_debuglink_range,
arm_extab_range,
arm_exidx_range,
is_shared_object,
symbol_tables,
load_headers,
architecture,
endianness,
bitness,
build_id
};
Ok( binary )
}
#[inline]
pub fn inode( &self ) -> Option< Inode > {
self.inode
}
#[inline]
pub fn set_inode( &mut self, inode: Inode ) {
self.inode = Some( inode );
}
#[inline]
pub fn name( &self ) -> &str {
&self.name
}
#[inline]
pub fn architecture( &self ) -> &str {
self.architecture
}
#[inline]
pub fn endianness( &self ) -> Endianness {
self.endianness
}
#[inline]
pub fn bitness( &self ) -> Bitness {
self.bitness
}
#[inline]
pub fn symbol_tables( &self ) -> &[SymbolTable] {
&self.symbol_tables
}
#[inline]
pub fn as_bytes( &self ) -> &[u8] {
&self.blob
}
#[inline]
pub fn is_shared_object( &self ) -> bool {
self.is_shared_object
}
#[inline]
pub fn data_range( &self ) -> Option< Range< usize > > {
self.data_range.clone()
}
#[inline]
pub fn text_range( &self ) -> Option< Range< usize > > {
self.text_range.clone()
}
#[inline]
pub fn eh_frame_range( &self ) -> Option< Range< usize > > {
self.eh_frame_range.clone()
}
#[inline]
pub fn eh_frame_hdr_range( &self ) -> Option< Range< usize > > {
self.eh_frame_hdr_range.clone()
}
#[inline]
pub fn debug_frame_range( &self ) -> Option< Range< usize > > {
self.debug_frame_range.clone()
}
#[inline]
pub fn gnu_debuglink_range( &self ) -> Option< Range< usize > > {
self.gnu_debuglink_range.clone()
}
#[inline]
pub fn arm_extab_range( &self ) -> Option< Range< usize > > {
self.arm_extab_range.clone()
}
#[inline]
pub fn arm_exidx_range( &self ) -> Option< Range< usize > > {
self.arm_exidx_range.clone()
}
fn get_section_range( &self, name: &str ) -> Option< Range< usize > > {
let elf = elf::parse( &self.blob ).map_err( |err| io::Error::new( io::ErrorKind::Other, err ) ).unwrap();
parse_elf!( elf, |elf| {
let name_strtab_header = elf.get_section_header( elf.header().e_shstrndx as usize ).unwrap();
let name_strtab = elf.get_strtab( &name_strtab_header ).unwrap();
for header in elf.section_headers() {
let section_name = match name_strtab.get( header.sh_name ) {
Some( Ok( name ) ) => name,
_ => continue
};
if section_name != name {
continue;
}
let offset = header.sh_offset as usize;
let length = header.sh_size as usize;
let range = offset..offset + length;
if let Some( _ ) = self.blob.get( range.clone() ) {
return Some( range );
}
}
None
})
}
#[inline]
pub fn get_empty_section( data: &Arc< BinaryData > ) -> BinaryDataReader {
Self::get_range_reader( data, 0..0 )
}
#[inline]
pub fn get_section_or_empty< S >( data: &Arc< BinaryData > ) -> S
where S: From< gimli::EndianReader< gimli::RunTimeEndian, BinaryDataSlice > > +
gimli::Section< gimli::EndianReader< gimli::RunTimeEndian, BinaryDataSlice > >
{
let range = match data.get_section_range( S::section_name() ) {
Some( range ) => range.clone(),
None => 0..0
};
Self::get_range_reader( data, range ).into()
}
#[inline]
fn get_range_reader( data: &Arc< BinaryData >, range: Range< usize > ) -> BinaryDataReader {
let endianness = match data.endianness() {
Endianness::LittleEndian => gimli::RunTimeEndian::Little,
Endianness::BigEndian => gimli::RunTimeEndian::Big
};
gimli::EndianReader::new( Self::subslice( data.clone(), range ), endianness ).into()
}
#[inline]
pub fn load_headers( &self ) -> &[LoadHeader] {
&self.load_headers
}
#[inline]
pub fn build_id( &self ) -> Option< &[u8] > {
self.build_id.as_ref().map( |id| id.as_slice() )
}
#[inline]
pub fn debuglink( &self ) -> Option< &[u8] > {
let debuglink = &self.as_bytes()[ self.gnu_debuglink_range.clone()? ];
let debuglink_length = debuglink.iter().position( |&byte| byte == 0 ).unwrap_or( debuglink.len() );
if debuglink_length == 0 {
return None;
}
Some( &debuglink[ 0..debuglink_length ] )
}
#[inline]
fn subslice( data: Arc< BinaryData >, range: Range< usize > ) -> BinaryDataSlice {
BinaryDataSlice {
data,
range
}
}
}
impl Deref for BinaryData {
type Target = [u8];
#[inline]
fn deref( &self ) -> &Self::Target {
self.as_bytes()
}
}
unsafe impl StableIndex for BinaryData {}
impl Index< Range< u64 > > for BinaryData {
type Output = [u8];
#[inline]
fn index( &self, index: Range< u64 > ) -> &Self::Output {
&self.as_bytes()[ index.start as usize..index.end as usize ]
}
}
#[derive(Clone)]
pub struct BinaryDataSlice {
data: Arc< BinaryData >,
range: Range< usize >
}
impl fmt::Debug for BinaryDataSlice {
fn fmt( &self, fmt: &mut fmt::Formatter ) -> Result< (), fmt::Error > {
write!( fmt, "BinaryData[{:?}]", self.range )
}
}
impl Deref for BinaryDataSlice {
type Target = [u8];
#[inline]
fn deref( &self ) -> &Self::Target {
&self.data.as_bytes()[ self.range.clone() ]
}
}
unsafe impl gimli::StableDeref for BinaryDataSlice {}
unsafe impl gimli::CloneStableDeref for BinaryDataSlice {}
pub type BinaryDataReader = gimli::EndianReader< gimli::RunTimeEndian, BinaryDataSlice >;
| 31.949904 | 191 | 0.506453 |
2fb34551bb28e0fdb37d90dc577936981e8973cb
| 11,038 |
use cosmwasm_std::{
attr, to_binary, Addr, CosmosMsg, Decimal, Deps, DepsMut, MessageInfo, Order, Response,
StdError, StdResult, Storage, Uint128, WasmMsg,
};
use crate::error::ContractError;
use crate::state::{
read_config, read_pool_info, rewards_read, rewards_store, store_pool_info, Config, PoolInfo,
RewardInfo,
};
use nebula_protocol::staking::{RewardInfoResponse, RewardInfoResponseItem};
use cw20::Cw20ExecuteMsg;
/// ## Description
/// Adds reward to LP staking pools.
///
/// ## Params
/// - **deps** is an object of type [`DepsMut`].
///
/// - **rewards** is an object of type [`Vec<(String, Uint128)>`] which is a list of rewards
/// deposited to each LP staking pool -- (cluster token, deposit amount).
///
/// - **rewards_amount** is an object of type [`Uint128`] which is the total deposit rewards.
pub fn deposit_reward(
deps: DepsMut,
rewards: Vec<(String, Uint128)>,
rewards_amount: Uint128,
) -> Result<Response, ContractError> {
for (asset_token, amount) in rewards.iter() {
// Validate address format
let validated_asset_token = deps.api.addr_validate(asset_token.as_str())?;
let mut pool_info: PoolInfo = read_pool_info(deps.storage, &validated_asset_token)?;
let mut reward_amount = *amount;
if pool_info.total_bond_amount.is_zero() {
// If there is no bonding at all, cannot compute reward_per_bond
// Store all deposit rewards to pending rewards
pool_info.pending_reward += reward_amount;
} else {
// If there is some bonding, update the reward index
// Take pending reward into account
reward_amount += pool_info.pending_reward;
pool_info.pending_reward = Uint128::zero();
// Compute reward to be distribute per bond for this round
let normal_reward_per_bond =
Decimal::from_ratio(reward_amount, pool_info.total_bond_amount);
// Update the reward index
// -- new_reward_index = old_reward_index + reward_per_bond_this_round
pool_info.reward_index = pool_info.reward_index + normal_reward_per_bond;
}
store_pool_info(deps.storage, &validated_asset_token, &pool_info)?;
}
Ok(Response::new().add_attributes(vec![
attr("action", "deposit_reward"),
attr("rewards_amount", rewards_amount.to_string()),
]))
}
/// ## Description
/// Withdraws all rewards or single reward depending on `asset_token`.
///
/// ## Params
/// - **deps** is an object of type [`DepsMut`].
///
/// - **info** is an object of type [`MessageInfo`].
///
/// - **asset_token** is an object of type [`Option<String>`] which is an address of
/// a cluster token contract.
pub fn withdraw_reward(
deps: DepsMut,
info: MessageInfo,
asset_token: Option<String>,
) -> Result<Response, ContractError> {
// Validate address format
let validated_asset_token = asset_token
.map(|x| deps.api.addr_validate(x.as_str()))
.transpose()?;
let staker_addr = info.sender;
// Compute the pending rewards of the staker
let reward_amount = _withdraw_reward(deps.storage, &staker_addr, &validated_asset_token)?;
// Transfer rewards from this LP staking contract to the message sender / staker
let config: Config = read_config(deps.storage)?;
Ok(Response::new()
.add_messages(vec![CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: config.nebula_token.to_string(),
msg: to_binary(&Cw20ExecuteMsg::Transfer {
recipient: staker_addr.to_string(),
amount: reward_amount,
})?,
funds: vec![],
})])
.add_attributes(vec![
attr("action", "withdraw"),
attr("amount", reward_amount.to_string()),
]))
}
/// ## Description
/// Computes all pending rewards or single pending reward of a staker depending on `asset_token`.
///
/// ## Params
/// - **storage** is a mutable reference to an object implementing trait [`Storage`].
///
/// - **staker_addr** is a reference to an object of type [`Addr`] which is a staker address.
///
/// - **asset_token** is a reference to an object of type [`Option<Addr>`] which is an address
/// of a cluster token contract.
fn _withdraw_reward(
storage: &mut dyn Storage,
staker_addr: &Addr,
asset_token: &Option<Addr>,
) -> Result<Uint128, ContractError> {
// Get all rewards owned by this staker
let rewards_bucket = rewards_read(storage, staker_addr);
let reward_pairs: Vec<(Addr, RewardInfo)>;
if let Some(asset_token) = asset_token {
// Withdraw single reward
let reward_info = rewards_bucket.may_load(asset_token.as_bytes())?;
reward_pairs = if let Some(reward_info) = reward_info {
vec![(asset_token.clone(), reward_info)]
} else {
vec![]
};
} else {
// Withdraw all rewards
reward_pairs =
rewards_bucket
.range(None, None, Order::Ascending)
.map(|item| {
let (k, v) = item?;
Ok((
Addr::unchecked(std::str::from_utf8(&k).map_err(|_| {
ContractError::Invalid("reward pair address".to_string())
})?),
v,
))
})
.collect::<Result<Vec<(Addr, RewardInfo)>, ContractError>>()?;
}
let mut amount: Uint128 = Uint128::zero();
for reward_pair in reward_pairs {
let (asset_token, mut reward_info) = reward_pair;
let pool_info: PoolInfo = read_pool_info(storage, &asset_token)?;
// Withdraw reward to staker pending reward
before_share_change(&pool_info, &mut reward_info)?;
amount += reward_info.pending_reward;
reward_info.pending_reward = Uint128::zero();
// Update rewards info
if reward_info.pending_reward.is_zero() && reward_info.bond_amount.is_zero() {
rewards_store(storage, staker_addr).remove(asset_token.as_bytes());
} else {
rewards_store(storage, staker_addr).save(asset_token.as_bytes(), &reward_info)?;
}
}
Ok(amount)
}
/// ## Description
/// Withdraws current reward to staker's pending reward
///
/// ## Params
/// - **pool_info** is a reference to an object of type [`PoolInfo`] which is the information of
/// a LP staking pool.
///
/// - **reward_info** is a mutable reference to an object of type [`RewardInfo`] which is
/// the staker related information to the LP staking pool.
#[allow(clippy::suspicious_operation_groupings)]
pub fn before_share_change(pool_info: &PoolInfo, reward_info: &mut RewardInfo) -> StdResult<()> {
// Calculate the current pending rewards
// -- pending rewards = staker_bond * (pool_reward_index - staker_reward_index)
// = staker_bonding * (reward_per_bond_i + ... + reward_per_bond_j)
let pending_reward = (reward_info.bond_amount * pool_info.reward_index)
.checked_sub(reward_info.bond_amount * reward_info.index)?;
// Update staker reward index and add pending reward
reward_info.index = pool_info.reward_index;
reward_info.pending_reward += pending_reward;
Ok(())
}
/// ## Description
/// Returns staker reward information on a specific LP staking pool. Return all rewards if
/// `asset_token` is not specified.
///
/// ## Params
/// - **deps** is an object of type [`Deps`].
///
/// - **staker_addr** is an object of type [`String`] which is the staker address.
///
/// - **asset_token** is an object of type [`Option<String>`] which is an address of
/// a cluster token contract.
pub fn query_reward_info(
deps: Deps,
staker_addr: String,
asset_token: Option<String>,
) -> StdResult<RewardInfoResponse> {
// Validate address format
let validated_staker_addr = deps.api.addr_validate(staker_addr.as_str())?;
let validated_asset_token = asset_token
.map(|x| deps.api.addr_validate(x.as_str()))
.transpose()?;
// Retrieve the reward information of the staker on the CT related LP staking pool
let reward_infos: Vec<RewardInfoResponseItem> =
_read_reward_infos(deps.storage, &validated_staker_addr, &validated_asset_token)?;
Ok(RewardInfoResponse {
staker_addr,
reward_infos,
})
}
/// ## Description
/// Returns all rewards or single reward of a staker depending on `asset_token` as a vector
/// of custom struct [`RewardInfoResponseItem`].
///
/// ## Params
/// - **storage** is a reference to an object implementing trait [`Storage`].
///
/// - **staker_addr** is a reference to an object of type [`Addr`] which is the staker address.
///
/// - **asset_token** is a reference to an object of type [`Option<Addr>`] which is an address
/// of a cluster token contract.
fn _read_reward_infos(
storage: &dyn Storage,
staker_addr: &Addr,
asset_token: &Option<Addr>,
) -> StdResult<Vec<RewardInfoResponseItem>> {
// Get all rewards owned by this staker
let rewards_bucket = rewards_read(storage, staker_addr);
let reward_infos: Vec<RewardInfoResponseItem>;
if let Some(asset_token) = asset_token {
// Read single reward
reward_infos =
if let Some(mut reward_info) = rewards_bucket.may_load(asset_token.as_bytes())? {
// Get LP staking pool information
let pool_info = read_pool_info(storage, asset_token)?;
// Add newer rewards to pending rewards
before_share_change(&pool_info, &mut reward_info)?;
vec![RewardInfoResponseItem {
asset_token: asset_token.to_string(),
bond_amount: reward_info.bond_amount,
pending_reward: reward_info.pending_reward,
}]
} else {
vec![]
};
} else {
// Read all rewards
reward_infos = rewards_bucket
.range(None, None, Order::Ascending)
.map(|item| {
let (k, v) = item?;
let asset_token = Addr::unchecked(
std::str::from_utf8(&k)
.map_err(|_| StdError::invalid_utf8("invalid asset token address"))?,
);
let mut reward_info = v;
// Get LP staking pool information
let pool_info = read_pool_info(storage, &asset_token)?;
// Add newer rewards to pending rewards
before_share_change(&pool_info, &mut reward_info)?;
Ok(RewardInfoResponseItem {
asset_token: asset_token.to_string(),
bond_amount: reward_info.bond_amount,
pending_reward: reward_info.pending_reward,
})
})
.collect::<StdResult<Vec<RewardInfoResponseItem>>>()?;
}
Ok(reward_infos)
}
| 38.193772 | 97 | 0.624751 |
9bc01e41aafc2a5231d2e6a021a74b6aed732722
| 7,115 |
// Generated from definition io.k8s.api.authorization.v1.SubjectAccessReviewSpec
/// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set
#[derive(Clone, Debug, Default, PartialEq)]
pub struct SubjectAccessReviewSpec {
/// Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.
pub extra: Option<std::collections::BTreeMap<String, Option<Vec<String>>>>,
/// Groups is the groups you're testing for.
pub groups: Option<Vec<String>>,
/// NonResourceAttributes describes information for a non-resource access request
pub non_resource_attributes: Option<crate::v1_10::api::authorization::v1::NonResourceAttributes>,
/// ResourceAuthorizationAttributes describes information for a resource access request
pub resource_attributes: Option<crate::v1_10::api::authorization::v1::ResourceAttributes>,
/// UID information about the requesting user.
pub uid: Option<String>,
/// User is the user you're testing for. If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups
pub user: Option<String>,
}
impl<'de> serde::Deserialize<'de> for SubjectAccessReviewSpec {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_extra,
Key_groups,
Key_non_resource_attributes,
Key_resource_attributes,
Key_uid,
Key_user,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"extra" => Field::Key_extra,
"groups" => Field::Key_groups,
"nonResourceAttributes" => Field::Key_non_resource_attributes,
"resourceAttributes" => Field::Key_resource_attributes,
"uid" => Field::Key_uid,
"user" => Field::Key_user,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = SubjectAccessReviewSpec;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "struct SubjectAccessReviewSpec")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_extra: Option<std::collections::BTreeMap<String, Option<Vec<String>>>> = None;
let mut value_groups: Option<Vec<String>> = None;
let mut value_non_resource_attributes: Option<crate::v1_10::api::authorization::v1::NonResourceAttributes> = None;
let mut value_resource_attributes: Option<crate::v1_10::api::authorization::v1::ResourceAttributes> = None;
let mut value_uid: Option<String> = None;
let mut value_user: Option<String> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_extra => value_extra = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_groups => value_groups = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_non_resource_attributes => value_non_resource_attributes = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_resource_attributes => value_resource_attributes = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_uid => value_uid = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_user => value_user = serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(SubjectAccessReviewSpec {
extra: value_extra,
groups: value_groups,
non_resource_attributes: value_non_resource_attributes,
resource_attributes: value_resource_attributes,
uid: value_uid,
user: value_user,
})
}
}
deserializer.deserialize_struct(
"SubjectAccessReviewSpec",
&[
"extra",
"groups",
"nonResourceAttributes",
"resourceAttributes",
"uid",
"user",
],
Visitor,
)
}
}
impl serde::Serialize for SubjectAccessReviewSpec {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"SubjectAccessReviewSpec",
self.extra.as_ref().map_or(0, |_| 1) +
self.groups.as_ref().map_or(0, |_| 1) +
self.non_resource_attributes.as_ref().map_or(0, |_| 1) +
self.resource_attributes.as_ref().map_or(0, |_| 1) +
self.uid.as_ref().map_or(0, |_| 1) +
self.user.as_ref().map_or(0, |_| 1),
)?;
if let Some(value) = &self.extra {
serde::ser::SerializeStruct::serialize_field(&mut state, "extra", value)?;
}
if let Some(value) = &self.groups {
serde::ser::SerializeStruct::serialize_field(&mut state, "groups", value)?;
}
if let Some(value) = &self.non_resource_attributes {
serde::ser::SerializeStruct::serialize_field(&mut state, "nonResourceAttributes", value)?;
}
if let Some(value) = &self.resource_attributes {
serde::ser::SerializeStruct::serialize_field(&mut state, "resourceAttributes", value)?;
}
if let Some(value) = &self.uid {
serde::ser::SerializeStruct::serialize_field(&mut state, "uid", value)?;
}
if let Some(value) = &self.user {
serde::ser::SerializeStruct::serialize_field(&mut state, "user", value)?;
}
serde::ser::SerializeStruct::end(state)
}
}
| 46.503268 | 166 | 0.568658 |
280451b0833249dec39034adfcf3af7ca78fe1d8
| 3,833 |
use crate::serialization::op_code::OpCode;
use crate::serialization::sigma_byte_reader::SigmaByteRead;
use crate::serialization::sigma_byte_writer::SigmaByteWrite;
use crate::serialization::SerializationError;
use crate::serialization::SigmaSerializable;
use crate::types::stype::SType;
use super::expr::Expr;
use super::expr::InvalidArgumentError;
use crate::has_opcode::HasStaticOpCode;
/// Selects an interval of elements
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct Slice {
/// Collection
pub input: Box<Expr>,
/// The lowest index to include from this collection
pub from: Box<Expr>,
/// The lowest index to exclude from this collection
pub until: Box<Expr>,
}
impl Slice {
/// Create new object, returns an error if any of the requirements failed
pub fn new(input: Expr, from: Expr, until: Expr) -> Result<Self, InvalidArgumentError> {
match input.post_eval_tpe() {
SType::SColl(_) => {}
_ => {
return Err(InvalidArgumentError(format!(
"Expected Slice input to be SColl, got {0:?}",
input.tpe()
)))
}
};
if from.post_eval_tpe() != SType::SInt {
return Err(InvalidArgumentError(format!(
"Slice: expected from type to be SInt, got {0:?}",
from
)));
}
if until.post_eval_tpe() != SType::SInt {
return Err(InvalidArgumentError(format!(
"Slice: expected until type to be SInt, got {0:?}",
until
)));
}
Ok(Self {
input: input.into(),
from: from.into(),
until: until.into(),
})
}
/// Type
pub fn tpe(&self) -> SType {
self.input.tpe()
}
}
impl HasStaticOpCode for Slice {
const OP_CODE: OpCode = OpCode::SLICE;
}
impl SigmaSerializable for Slice {
fn sigma_serialize<W: SigmaByteWrite>(&self, w: &mut W) -> Result<(), std::io::Error> {
self.input.sigma_serialize(w)?;
self.from.sigma_serialize(w)?;
self.until.sigma_serialize(w)
}
fn sigma_parse<R: SigmaByteRead>(r: &mut R) -> Result<Self, SerializationError> {
let input = Expr::sigma_parse(r)?;
let from = Expr::sigma_parse(r)?;
let until = Expr::sigma_parse(r)?;
Ok(Self::new(input, from, until)?)
}
}
#[cfg(feature = "arbitrary")]
#[allow(clippy::unwrap_used)]
mod arbitrary {
use super::*;
use crate::mir::expr::arbitrary::ArbExprParams;
use proptest::prelude::*;
impl Arbitrary for Slice {
type Strategy = BoxedStrategy<Self>;
type Parameters = ();
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
(
any_with::<Expr>(ArbExprParams {
tpe: SType::SColl(SType::SBoolean.into()),
depth: 1,
}),
any_with::<Expr>(ArbExprParams {
tpe: SType::SInt,
depth: 0,
}),
any_with::<Expr>(ArbExprParams {
tpe: SType::SInt,
depth: 0,
}),
)
.prop_map(|(input, from, until)| Self::new(input, from, until).unwrap())
.boxed()
}
}
}
#[cfg(test)]
#[cfg(feature = "arbitrary")]
mod tests {
use super::*;
use crate::mir::expr::Expr;
use crate::serialization::sigma_serialize_roundtrip;
use proptest::prelude::*;
proptest! {
#![proptest_config(ProptestConfig::with_cases(16))]
#[test]
fn ser_roundtrip(v in any::<Slice>()) {
let expr: Expr = v.into();
prop_assert_eq![sigma_serialize_roundtrip(&expr), expr];
}
}
}
| 29.484615 | 92 | 0.549961 |
ef19bc2fff21f213025a4afc53a3af3bb075c2f4
| 6,607 |
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#[allow(unused_imports)]
use log::{debug, info, warn};
use anyhow::bail;
use diem_types::transaction::{ArgumentABI, ScriptABI, TypeArgumentABI};
use heck::SnakeCase;
use move_core_types::language_storage::TypeTag;
use move_model::{
model::{GlobalEnv, ModuleEnv},
ty,
};
use serde::{Deserialize, Serialize};
use std::{collections::BTreeMap, io::Read, path::PathBuf};
/// Options passed into the ABI generator.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct AbigenOptions {
/// Where to find the .mv files of scripts.
pub compiled_script_directory: String,
/// In which directory to store output.
pub output_directory: String,
}
impl Default for AbigenOptions {
fn default() -> Self {
Self {
compiled_script_directory: ".".to_string(),
output_directory: "abi".to_string(),
}
}
}
/// The ABI generator.
pub struct Abigen<'env> {
/// Options.
options: &'env AbigenOptions,
/// Input definitions.
env: &'env GlobalEnv,
/// Map from file name to generated script ABI (if any).
output: BTreeMap<String, ScriptABI>,
}
impl<'env> Abigen<'env> {
/// Creates a new ABI generator.
pub fn new(env: &'env GlobalEnv, options: &'env AbigenOptions) -> Self {
Self {
options,
env,
output: Default::default(),
}
}
/// Returns the result of ABI generation, a vector of pairs of filenames
/// and JSON content.
pub fn into_result(mut self) -> Vec<(String, Vec<u8>)> {
std::mem::take(&mut self.output)
.into_iter()
.map(|(path, abi)| {
let content = bcs::to_bytes(&abi).expect("ABI serialization should not fail");
(path, content)
})
.collect()
}
/// Generates ABIs for all script modules in the environment (excluding the dependency set).
pub fn gen(&mut self) {
for module in self.env.get_modules() {
if module.is_script_module() && module.is_target() {
let mut path = PathBuf::from(&self.options.output_directory);
path.push(
PathBuf::from(module.get_source_path())
.with_extension("abi")
.file_name()
.expect("file name"),
);
match self.compute_abi(&module) {
Ok(abi) => {
self.output.insert(path.to_string_lossy().to_string(), abi);
}
Err(error) => panic!(
"Error while processing script file {:?}: {}",
module.get_source_path(),
error
),
}
}
}
}
/// Compute the ABI of a script module.
fn compute_abi(&self, module_env: &ModuleEnv<'env>) -> anyhow::Result<ScriptABI> {
let symbol_pool = module_env.symbol_pool();
let func = match module_env.get_functions().next() {
Some(f) => f,
None => bail!("A script module should define a function."),
};
let name = symbol_pool.string(func.get_name()).to_string();
let doc = func.get_doc().to_string();
let code = self.load_compiled_bytes(&module_env)?.to_vec();
let ty_args = func
.get_named_type_parameters()
.iter()
.map(|ty_param| {
TypeArgumentABI::new(symbol_pool.string(ty_param.0).to_string().to_snake_case())
})
.collect();
let args = func
.get_parameters()
.iter()
.filter_map(
|param| match self.get_type_tag_skipping_references(¶m.1) {
Ok(Some(tag)) => Some(Ok(ArgumentABI::new(
symbol_pool.string(param.0).to_string(),
tag,
))),
Ok(None) => None,
Err(error) => Some(Err(error)),
},
)
.collect::<anyhow::Result<_>>()?;
Ok(ScriptABI::new(name, doc, code, ty_args, args))
}
fn load_compiled_bytes(&self, module_env: &ModuleEnv<'env>) -> anyhow::Result<Vec<u8>> {
let mut path = PathBuf::from(&self.options.compiled_script_directory);
path.push(
PathBuf::from(module_env.get_source_path())
.with_extension("mv")
.file_name()
.expect("file name"),
);
let mut f = match std::fs::File::open(path.clone()) {
Ok(f) => f,
Err(error) => bail!("Failed to open compiled file {:?}: {}", path, error),
};
let mut bytes = Vec::new();
f.read_to_end(&mut bytes)?;
Ok(bytes)
}
fn get_type_tag_skipping_references(&self, ty0: &ty::Type) -> anyhow::Result<Option<TypeTag>> {
use ty::Type::*;
let tag = match ty0 {
Primitive(prim) => {
use ty::PrimitiveType::*;
match prim {
Bool => TypeTag::Bool,
U8 => TypeTag::U8,
U64 => TypeTag::U64,
U128 => TypeTag::U128,
Address => TypeTag::Address,
Signer => TypeTag::Signer,
Num | Range | TypeValue | EventStore => {
bail!("Type {:?} is not allowed in scripts.", ty0)
}
}
}
Reference(_, _) => {
// Skip references (most likely a `&signer` type)
return Ok(None);
}
Vector(ty) => {
let tag = self.get_type_tag(ty)?;
TypeTag::Vector(Box::new(tag))
}
Tuple(_)
| Struct(_, _, _)
| TypeParameter(_)
| Fun(_, _)
| TypeDomain(_)
| ResourceDomain(..)
| TypeLocal(_)
| Error
| Var(_) => bail!("Type {:?} is not allowed in scripts.", ty0),
};
Ok(Some(tag))
}
fn get_type_tag(&self, ty: &ty::Type) -> anyhow::Result<TypeTag> {
if let Some(tag) = self.get_type_tag_skipping_references(ty)? {
return Ok(tag);
}
bail!(
"References such as {:?} are only allowed in the list of parameters.",
ty
);
}
}
| 34.056701 | 99 | 0.504465 |
abc7229a63fe91a11255e9676a9f709e6bbfd609
| 15,137 |
#![allow(unused_imports)]
use std::io::{Read, Write};
use std::net::TcpStream;
use std::sync::{Condvar, Mutex};
use std::{env, sync::atomic::*, sync::Arc, thread, time::*};
use anyhow::*;
use log::*;
use url;
use embedded_svc::anyerror::*;
use embedded_svc::httpd::registry::*;
use embedded_svc::httpd::*;
use embedded_svc::ping::Ping;
use embedded_svc::wifi::*;
use esp_idf_svc::httpd as idf;
use esp_idf_svc::httpd::ServerRegistry;
use esp_idf_svc::netif::*;
use esp_idf_svc::nvs::*;
use esp_idf_svc::ping;
use esp_idf_svc::sysloop::*;
use esp_idf_svc::wifi::*;
use esp_idf_hal::delay;
use esp_idf_hal::gpio;
use esp_idf_hal::i2c;
use esp_idf_hal::prelude::*;
use esp_idf_hal::spi;
use esp_idf_hal::ulp;
use esp_idf_sys;
use esp_idf_sys::esp;
use display_interface_spi::SPIInterfaceNoCS;
use embedded_graphics::mono_font::{ascii::FONT_10X20, MonoTextStyle};
use embedded_graphics::pixelcolor::*;
use embedded_graphics::prelude::*;
use embedded_graphics::primitives::*;
use embedded_graphics::text::*;
use ili9341;
use ssd1306;
use ssd1306::mode::DisplayConfig;
use st7789;
const SSID: &str = "ssid";
const PASS: &str = "pass";
#[cfg(esp32s2)]
include!(env!("CARGO_PIO_SYMGEN_RUNNER_SYMBOLS_FILE"));
#[cfg(esp32s2)]
const ULP: &[u8] = include_bytes!(env!("CARGO_PIO_BINGEN_RUNNER_BIN_FILE"));
fn main() -> Result<()> {
test_print();
test_atomics();
test_threads();
// Enough playing.
// The real demo: start WiFi and ignite Httpd
env::set_var("RUST_BACKTRACE", "1"); // Get some nice backtraces from Anyhow
// Uncomment this if you have a TTGO ESP32 board
// For other boards, you might have to use a different embedded-graphics driver and pin configuration
// ttgo_hello_world()?;
// ... or uncomment this if you have a Kaluga-1 ESP32-S2 board
// For other boards, you might have to use a different embedded-graphics driver and pin configuration
// kaluga_hello_world(true)?;
// ... or uncomment this if you have a Heltec LoRa 32 board
// For other boards, you might have to use a different embedded-graphics driver and pin configuration
// heltec_hello_world()?;
let wifi = wifi()?;
test_tcp()?;
let mutex = Arc::new((Mutex::new(None), Condvar::new()));
let httpd = httpd(mutex.clone())?;
let mut wait = mutex.0.lock().unwrap();
#[allow(unused)]
let cycles = loop {
if let Some(cycles) = *wait {
break cycles;
} else {
wait = mutex.1.wait(wait).unwrap();
}
};
for s in 0..3 {
info!("Shutting down in {} secs", 3 - s);
thread::sleep(Duration::from_secs(1));
}
drop(httpd);
info!("Httpd stopped");
drop(wifi);
info!("Wifi stopped");
#[cfg(esp32s2)]
start_ulp(cycles)?;
Ok(())
}
fn test_print() {
// Start simple
println!("Hello, world from Rust!");
// Check collections
let mut children = vec![];
children.push("foo");
children.push("bar");
println!("More complex print {:?}", children);
}
fn test_threads() {
let mut children = vec![];
println!("Rust main thread: {:?}", thread::current());
for i in 0..5 {
// Spin up another thread
children.push(thread::spawn(move || {
println!("This is thread number {}, {:?}", i, thread::current());
}));
}
println!(
"About to join the threads. If ESP-IDF was patched successfully, joining will NOT crash"
);
for child in children {
// Wait for the thread to finish. Returns a result.
let _ = child.join();
}
thread::sleep(Duration::from_secs(2));
println!("Joins were successful.");
}
fn test_tcp() -> Result<()> {
info!("About to open a TCP connection to 1.1.1.1 port 80");
let mut stream = TcpStream::connect("one.one.one.one:80")?;
stream.write("GET / HTTP/1.0\n\n".as_bytes())?;
let mut result = Vec::new();
stream.read_to_end(&mut result)?;
info!(
"1.1.1.1 returned:\n=================\n{}\n=================\nSince it returned something, all is OK",
std::str::from_utf8(&result)?);
Ok(())
}
#[allow(deprecated)]
fn test_atomics() {
let a = AtomicUsize::new(0);
let v1 = a.compare_and_swap(0, 1, Ordering::SeqCst);
let v2 = a.swap(2, Ordering::SeqCst);
let (r1, r2) = unsafe {
// don't optimize our atomics out
let r1 = core::ptr::read_volatile(&v1);
let r2 = core::ptr::read_volatile(&v2);
(r1, r2)
};
println!("Result: {}, {}", r1, r2);
}
#[allow(dead_code)]
#[cfg(esp32)]
fn ttgo_hello_world() -> Result<()> {
info!("About to initialize the TTGO ST7789 LED driver");
let peripherals = Peripherals::take().unwrap();
let pins = peripherals.pins;
let config = <spi::config::Config as Default>::default()
.baudrate(26.MHz().into())
.bit_order(spi::config::BitOrder::MSBFirst);
let mut backlight = pins.gpio4.into_output()?;
backlight.set_high()?;
let di = SPIInterfaceNoCS::new(
spi::Master::<spi::SPI2, _, _, _, _>::new(
peripherals.spi2,
spi::Pins {
sclk: pins.gpio18,
sdo: pins.gpio19,
sdi: Option::<gpio::Gpio21<gpio::Unknown>>::None,
cs: Some(pins.gpio5),
},
config,
)?,
pins.gpio16.into_output()?,
);
let mut display = st7789::ST7789::new(
di,
pins.gpio23.into_output()?,
// SP7789V is designed to drive 240x320 screens, even though the TTGO physical screen is smaller
240,
320,
);
AnyError::<st7789::Error<_>>::wrap(|| {
display.init(&mut delay::Ets)?;
display.set_orientation(st7789::Orientation::Portrait)?;
// The TTGO board's screen does not start at offset 0x0, and the physical size is 135x240, instead of 240x320
let top_left = Point::new(52, 40);
let size = Size::new(135, 240);
led_draw(&mut display.cropped(&Rectangle::new(top_left, size)))
})
}
#[allow(dead_code)]
#[cfg(esp32s2)]
fn kaluga_hello_world(ili9341: bool) -> Result<()> {
info!(
"About to initialize the Kaluga {} SPI LED driver",
if ili9341 { "ILI9341" } else { "ST7789" }
);
let peripherals = Peripherals::take().unwrap();
let pins = peripherals.pins;
let config = <spi::config::Config as Default>::default()
.baudrate((if ili9341 { 40 } else { 80 }).MHz().into())
.bit_order(spi::config::BitOrder::MSBFirst);
let mut backlight = pins.gpio6.into_output()?;
backlight.set_high()?;
let di = SPIInterfaceNoCS::new(
spi::Master::<spi::SPI3, _, _, _, _>::new(
peripherals.spi3,
spi::Pins {
sclk: pins.gpio15,
sdo: pins.gpio9,
sdi: Option::<gpio::Gpio21<gpio::Unknown>>::None,
cs: Some(pins.gpio11),
},
config,
)?,
pins.gpio13.into_output()?,
);
let reset = pins.gpio16.into_output()?;
if ili9341 {
AnyError::<ili9341::DisplayError>::wrap(|| {
let mut display = ili9341::Ili9341::new(
di,
reset,
&mut delay::Ets,
KalugaOrientation::Landscape,
ili9341::DisplaySize240x320,
)?;
led_draw(&mut display)
})
} else {
let mut display = st7789::ST7789::new(di, reset, 320, 240);
AnyError::<st7789::Error<_>>::wrap(|| {
display.init(&mut delay::Ets)?;
display.set_orientation(st7789::Orientation::Landscape)?;
led_draw(&mut display)
})
}
}
#[allow(dead_code)]
#[cfg(esp32)]
fn heltec_hello_world() -> Result<()> {
info!("About to initialize the Heltec SSD1306 I2C LED driver");
let peripherals = Peripherals::take().unwrap();
let pins = peripherals.pins;
let config = <i2c::config::MasterConfig as Default>::default().baudrate(400.kHz().into());
let di = ssd1306::I2CDisplayInterface::new(i2c::Master::<i2c::I2C0, _, _>::new(
peripherals.i2c0,
i2c::Pins {
sda: pins.gpio4,
scl: pins.gpio15,
},
config,
)?);
let mut delay = delay::Ets;
let mut reset = pins.gpio16.into_output()?;
reset.set_high()?;
delay.delay_ms(1 as u32);
reset.set_low()?;
delay.delay_ms(10 as u32);
reset.set_high()?;
let mut display = Box::new(
ssd1306::Ssd1306::new(
di,
ssd1306::size::DisplaySize128x64,
ssd1306::rotation::DisplayRotation::Rotate0,
)
.into_buffered_graphics_mode(),
);
AnyError::<display_interface::DisplayError>::wrap(|| {
display.init()?;
led_draw(&mut *display)?;
display.flush()
})
}
#[allow(dead_code)]
fn led_draw<D>(display: &mut D) -> Result<(), D::Error>
where
D: DrawTarget + Dimensions,
D::Color: From<Rgb565>,
{
display.clear(Rgb565::BLACK.into())?;
Rectangle::new(display.bounding_box().top_left, display.bounding_box().size)
.into_styled(
PrimitiveStyleBuilder::new()
.fill_color(Rgb565::BLUE.into())
.stroke_color(Rgb565::YELLOW.into())
.stroke_width(1)
.build(),
)
.draw(display)?;
Text::new(
"Hello Rust!",
Point::new(10, (display.bounding_box().size.height - 10) as i32 / 2),
MonoTextStyle::new(&FONT_10X20, Rgb565::WHITE.into()),
)
.draw(display)?;
info!("LED rendering done");
Ok(())
}
#[allow(unused_variables)]
fn httpd(mutex: Arc<(Mutex<Option<u32>>, Condvar)>) -> Result<idf::Server> {
let server = idf::ServerRegistry::new()
.at("/")
.get(|_| Ok("Hello, world!".into()))?
.at("/foo")
.get(|_| bail!("Boo, something happened!"))?
.at("/bar")
.get(|_| {
Response::new(403)
.status_message("No permissions")
.body("You have no permissions to access this page".into())
.into()
})?;
#[cfg(esp32s2)]
let server = httpd_ulp_endpoints(server, mutex)?;
server.start(&Default::default())
}
#[cfg(esp32s2)]
fn httpd_ulp_endpoints(
server: ServerRegistry,
mutex: Arc<(Mutex<Option<u32>>, Condvar)>,
) -> Result<ServerRegistry> {
server
.at("/ulp")
.get(|_| {
Ok(r#"
<doctype html5>
<html>
<body>
<form method = "post" action = "/ulp_start" enctype="application/x-www-form-urlencoded">
Connect a LED to ESP32-S2 GPIO <b>Pin 04</b> and GND.<br>
Blink it with ULP <input name = "cycles" type = "text" value = "10"> times
<input type = "submit" value = "Go!">
</form>
</body>
</html>
"#.into())
})?
.at("/ulp_start")
.post(move |mut request| {
let body = request.as_bytes()?;
let cycles = url::form_urlencoded::parse(&body)
.filter(|p| p.0 == "cycles")
.map(|p| str::parse::<u32>(&p.1).map_err(Error::msg))
.next()
.ok_or(anyhow!("No parameter cycles"))??;
let mut wait = mutex.0.lock().unwrap();
*wait = Some(cycles);
mutex.1.notify_one();
Ok(format!(
r#"
<doctype html5>
<html>
<body>
About to sleep now. The ULP chip should blink the LED {} times and then wake me up. Bye!
</body>
</html>
"#,
cycles).to_owned().into())
})
}
#[cfg(esp32s2)]
fn start_ulp(cycles: u32) -> Result<()> {
use esp_idf_hal::ulp;
unsafe {
esp!(esp_idf_sys::ulp_riscv_load_binary(
ULP.as_ptr(),
ULP.len() as _
))?;
info!("RiscV ULP binary loaded successfully");
// Once started, the ULP will wakeup every 5 minutes
// TODO: Figure out how to disable ULP timer-based wakeup completely, with an ESP-IDF call
ulp::enable_timer(false);
info!("RiscV ULP Timer configured");
info!(
"Default ULP LED blink cycles: {}",
core::ptr::read_volatile(CYCLES as *mut u32)
);
core::ptr::write_volatile(CYCLES as *mut u32, cycles);
info!(
"Sent {} LED blink cycles to the ULP",
core::ptr::read_volatile(CYCLES as *mut u32)
);
esp!(esp_idf_sys::ulp_riscv_run())?;
info!("RiscV ULP started");
esp!(esp_idf_sys::esp_sleep_enable_ulp_wakeup())?;
info!("Wakeup from ULP enabled");
// Wake up by a timer in 60 seconds
info!("About to get to sleep now. Will wake up automatically either in 1 minute, or once the ULP has done blinking the LED");
esp_idf_sys::esp_deep_sleep(Duration::from_secs(60).as_micros() as u64);
}
Ok(())
}
fn wifi() -> Result<Box<impl Wifi>> {
let mut wifi = Box::new(EspWifi::new(
Arc::new(EspNetif::new()?),
Arc::new(EspSysLoop::new()?),
Arc::new(EspDefaultNvs::new()?),
)?);
info!("Wifi created");
wifi.set_configuration(&Configuration::Client(ClientConfiguration {
ssid: SSID.into(),
password: PASS.into(),
..Default::default()
}))?;
info!("Wifi configuration set, about to get status");
let status = wifi.get_status();
if let Status(
ClientStatus::Started(ClientConnectionStatus::Connected(ClientIpStatus::Done(ip_settings))),
_,
) = status
{
info!("Wifi connected, about to do some pings");
let ping_summary =
ping::EspPing::default().ping(ip_settings.subnet.gateway, &Default::default())?;
if ping_summary.transmitted != ping_summary.received {
bail!(
"Pinging gateway {} resulted in timeouts",
ip_settings.subnet.gateway
);
}
info!("Pinging done");
} else {
bail!("Unexpected Wifi status: {:?}", &status);
}
Ok(wifi)
}
// Kaluga needs customized screen orientation commands
// (not a surprise; quite a few ILI9341 boards need these as evidences in the TFT_eSPI & lvgl ESP32 C drivers)
pub enum KalugaOrientation {
Portrait,
PortraitFlipped,
Landscape,
LandscapeFlipped,
}
impl ili9341::Mode for KalugaOrientation {
fn mode(&self) -> u8 {
match self {
Self::Portrait => 0,
Self::Landscape => 0x20 | 0x40,
Self::PortraitFlipped => 0x80 | 0x40,
Self::LandscapeFlipped => 0x80 | 0x20,
}
}
fn is_landscape(&self) -> bool {
match self {
Self::Landscape | Self::LandscapeFlipped => true,
Self::Portrait | Self::PortraitFlipped => false,
}
}
}
| 27.12724 | 133 | 0.562397 |
b925f0ea4216cdba2525134d73dbdc55692fbc2f
| 613 |
#![allow(unused_imports)]
use super::*;
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
extern "wasm-bindgen" {
# [wasm_bindgen (is_type_of = | _ | false , extends = :: js_sys :: Object , js_name = OES_texture_float , typescript_type = "OES_texture_float")]
#[derive(Debug, Clone, PartialEq, Eq)]
#[doc = "The `OesTextureFloat` class."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/OES_texture_float)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `OesTextureFloat`*"]
pub type OesTextureFloat;
}
| 40.866667 | 149 | 0.663948 |
8fdb7306eb353d4bbc7d1271a2269e51d21ddb7d
| 35,623 |
//! Defines a Transaction type to package an atomic sequence of instructions.
#![cfg(feature = "full")]
use {
crate::{
hash::Hash,
instruction::{CompiledInstruction, Instruction},
message::Message,
nonce::NONCED_TX_MARKER_IX_INDEX,
precompiles::verify_if_precompile,
program_utils::limited_deserialize,
pubkey::Pubkey,
sanitize::{Sanitize, SanitizeError},
short_vec,
signature::{Signature, SignerError},
signers::Signers,
wasm_bindgen,
},
serde::Serialize,
solana_program::{system_instruction::SystemInstruction, system_program},
solana_sdk::feature_set,
std::{result, sync::Arc},
};
mod error;
mod sanitized;
mod versioned;
pub use {error::*, sanitized::*, versioned::*};
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum TransactionVerificationMode {
HashOnly,
HashAndVerifyPrecompiles,
FullVerification,
}
pub type Result<T> = result::Result<T, TransactionError>;
/// An atomic transaction
#[wasm_bindgen]
#[frozen_abi(digest = "FZtncnS1Xk8ghHfKiXE5oGiUbw2wJhmfXQuNgQR3K6Mc")]
#[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize, AbiExample)]
pub struct Transaction {
/// A set of digital signatures of a serialized [`Message`], signed by the
/// first `signatures.len()` keys of [`account_keys`].
///
/// [`account_keys`]: Message::account_keys
///
// NOTE: Serialization-related changes must be paired with the direct read at sigverify.
#[wasm_bindgen(skip)]
#[serde(with = "short_vec")]
pub signatures: Vec<Signature>,
/// The message to sign.
#[wasm_bindgen(skip)]
pub message: Message,
}
impl Sanitize for Transaction {
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
if self.message.header.num_required_signatures as usize > self.signatures.len() {
return Err(SanitizeError::IndexOutOfBounds);
}
if self.signatures.len() > self.message.account_keys.len() {
return Err(SanitizeError::IndexOutOfBounds);
}
self.message.sanitize()
}
}
impl Transaction {
pub fn new_unsigned(message: Message) -> Self {
Self {
signatures: vec![Signature::default(); message.header.num_required_signatures as usize],
message,
}
}
pub fn new_with_payer(instructions: &[Instruction], payer: Option<&Pubkey>) -> Self {
let message = Message::new(instructions, payer);
Self::new_unsigned(message)
}
/// Create a signed transaction with the given payer.
///
/// # Panics
///
/// Panics when signing fails.
pub fn new_signed_with_payer<T: Signers>(
instructions: &[Instruction],
payer: Option<&Pubkey>,
signing_keypairs: &T,
recent_blockhash: Hash,
) -> Self {
let message = Message::new(instructions, payer);
Self::new(signing_keypairs, message, recent_blockhash)
}
/// Create a signed transaction.
///
/// # Panics
///
/// Panics when signing fails.
pub fn new<T: Signers>(
from_keypairs: &T,
message: Message,
recent_blockhash: Hash,
) -> Transaction {
let mut tx = Self::new_unsigned(message);
tx.sign(from_keypairs, recent_blockhash);
tx
}
/// Create a signed transaction
/// * `from_keypairs` - The keys used to sign the transaction.
/// * `keys` - The keys for the transaction. These are the program state
/// instances or lamport recipient keys.
/// * `recent_blockhash` - The PoH hash.
/// * `program_ids` - The keys that identify programs used in the `instruction` vector.
/// * `instructions` - Instructions that will be executed atomically.
///
/// # Panics
///
/// Panics when signing fails.
pub fn new_with_compiled_instructions<T: Signers>(
from_keypairs: &T,
keys: &[Pubkey],
recent_blockhash: Hash,
program_ids: Vec<Pubkey>,
instructions: Vec<CompiledInstruction>,
) -> Self {
let mut account_keys = from_keypairs.pubkeys();
let from_keypairs_len = account_keys.len();
account_keys.extend_from_slice(keys);
account_keys.extend(&program_ids);
let message = Message::new_with_compiled_instructions(
from_keypairs_len as u8,
0,
program_ids.len() as u8,
account_keys,
Hash::default(),
instructions,
);
Transaction::new(from_keypairs, message, recent_blockhash)
}
pub fn data(&self, instruction_index: usize) -> &[u8] {
&self.message.instructions[instruction_index].data
}
fn key_index(&self, instruction_index: usize, accounts_index: usize) -> Option<usize> {
self.message
.instructions
.get(instruction_index)
.and_then(|instruction| instruction.accounts.get(accounts_index))
.map(|&account_keys_index| account_keys_index as usize)
}
pub fn key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> {
self.key_index(instruction_index, accounts_index)
.and_then(|account_keys_index| self.message.account_keys.get(account_keys_index))
}
pub fn signer_key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> {
match self.key_index(instruction_index, accounts_index) {
None => None,
Some(signature_index) => {
if signature_index >= self.signatures.len() {
return None;
}
self.message.account_keys.get(signature_index)
}
}
}
/// Return a message containing all data that should be signed.
pub fn message(&self) -> &Message {
&self.message
}
/// Return the serialized message data to sign.
pub fn message_data(&self) -> Vec<u8> {
self.message().serialize()
}
/// Check keys and keypair lengths, then sign this transaction.
///
/// # Panics
///
/// Panics when signing fails, use [`Transaction::try_sign`] to handle the error.
pub fn sign<T: Signers>(&mut self, keypairs: &T, recent_blockhash: Hash) {
if let Err(e) = self.try_sign(keypairs, recent_blockhash) {
panic!("Transaction::sign failed with error {:?}", e);
}
}
/// Sign using some subset of required keys
/// if recent_blockhash is not the same as currently in the transaction,
/// clear any prior signatures and update recent_blockhash
///
/// # Panics
///
/// Panics when signing fails, use [`Transaction::try_partial_sign`] to handle the error.
pub fn partial_sign<T: Signers>(&mut self, keypairs: &T, recent_blockhash: Hash) {
if let Err(e) = self.try_partial_sign(keypairs, recent_blockhash) {
panic!("Transaction::partial_sign failed with error {:?}", e);
}
}
/// Sign the transaction and place the signatures in their associated positions in `signatures`
/// without checking that the positions are correct.
///
/// # Panics
///
/// Panics when signing fails, use [`Transaction::try_partial_sign_unchecked`] to handle the error.
pub fn partial_sign_unchecked<T: Signers>(
&mut self,
keypairs: &T,
positions: Vec<usize>,
recent_blockhash: Hash,
) {
if let Err(e) = self.try_partial_sign_unchecked(keypairs, positions, recent_blockhash) {
panic!(
"Transaction::partial_sign_unchecked failed with error {:?}",
e
);
}
}
/// Check keys and keypair lengths, then sign this transaction, returning any signing errors
/// encountered
pub fn try_sign<T: Signers>(
&mut self,
keypairs: &T,
recent_blockhash: Hash,
) -> result::Result<(), SignerError> {
self.try_partial_sign(keypairs, recent_blockhash)?;
if !self.is_signed() {
Err(SignerError::NotEnoughSigners)
} else {
Ok(())
}
}
/// Sign using some subset of required keys, returning any signing errors encountered. If
/// recent_blockhash is not the same as currently in the transaction, clear any prior
/// signatures and update recent_blockhash
pub fn try_partial_sign<T: Signers>(
&mut self,
keypairs: &T,
recent_blockhash: Hash,
) -> result::Result<(), SignerError> {
let positions = self.get_signing_keypair_positions(&keypairs.pubkeys())?;
if positions.iter().any(|pos| pos.is_none()) {
return Err(SignerError::KeypairPubkeyMismatch);
}
let positions: Vec<usize> = positions.iter().map(|pos| pos.unwrap()).collect();
self.try_partial_sign_unchecked(keypairs, positions, recent_blockhash)
}
/// Sign the transaction, returning any signing errors encountered, and place the
/// signatures in their associated positions in `signatures` without checking that the
/// positions are correct.
pub fn try_partial_sign_unchecked<T: Signers>(
&mut self,
keypairs: &T,
positions: Vec<usize>,
recent_blockhash: Hash,
) -> result::Result<(), SignerError> {
// if you change the blockhash, you're re-signing...
if recent_blockhash != self.message.recent_blockhash {
self.message.recent_blockhash = recent_blockhash;
self.signatures
.iter_mut()
.for_each(|signature| *signature = Signature::default());
}
let signatures = keypairs.try_sign_message(&self.message_data())?;
for i in 0..positions.len() {
self.signatures[positions[i]] = signatures[i];
}
Ok(())
}
/// Verify the transaction
pub fn verify(&self) -> Result<()> {
let message_bytes = self.message_data();
if !self
._verify_with_results(&message_bytes)
.iter()
.all(|verify_result| *verify_result)
{
Err(TransactionError::SignatureFailure)
} else {
Ok(())
}
}
pub fn get_invalid_signature() -> Signature {
Signature::default()
}
/// Verify the transaction and hash its message
pub fn verify_and_hash_message(&self) -> Result<Hash> {
let message_bytes = self.message_data();
if !self
._verify_with_results(&message_bytes)
.iter()
.all(|verify_result| *verify_result)
{
Err(TransactionError::SignatureFailure)
} else {
Ok(Message::hash_raw_message(&message_bytes))
}
}
pub fn verify_with_results(&self) -> Vec<bool> {
self._verify_with_results(&self.message_data())
}
pub(crate) fn _verify_with_results(&self, message_bytes: &[u8]) -> Vec<bool> {
self.signatures
.iter()
.zip(&self.message.account_keys)
.map(|(signature, pubkey)| signature.verify(pubkey.as_ref(), message_bytes))
.collect()
}
/// Verify the precompiled programs in this transaction
pub fn verify_precompiles(&self, feature_set: &Arc<feature_set::FeatureSet>) -> Result<()> {
for instruction in &self.message().instructions {
// The Transaction may not be sanitized at this point
if instruction.program_id_index as usize >= self.message().account_keys.len() {
return Err(TransactionError::AccountNotFound);
}
let program_id = &self.message().account_keys[instruction.program_id_index as usize];
verify_if_precompile(
program_id,
instruction,
&self.message().instructions,
feature_set,
)
.map_err(|_| TransactionError::InvalidAccountIndex)?;
}
Ok(())
}
/// Get the positions of the pubkeys in `account_keys` associated with signing keypairs
pub fn get_signing_keypair_positions(&self, pubkeys: &[Pubkey]) -> Result<Vec<Option<usize>>> {
if self.message.account_keys.len() < self.message.header.num_required_signatures as usize {
return Err(TransactionError::InvalidAccountIndex);
}
let signed_keys =
&self.message.account_keys[0..self.message.header.num_required_signatures as usize];
Ok(pubkeys
.iter()
.map(|pubkey| signed_keys.iter().position(|x| x == pubkey))
.collect())
}
/// Replace all the signatures and pubkeys
pub fn replace_signatures(&mut self, signers: &[(Pubkey, Signature)]) -> Result<()> {
let num_required_signatures = self.message.header.num_required_signatures as usize;
if signers.len() != num_required_signatures
|| self.signatures.len() != num_required_signatures
|| self.message.account_keys.len() < num_required_signatures
{
return Err(TransactionError::InvalidAccountIndex);
}
signers
.iter()
.enumerate()
.for_each(|(i, (pubkey, signature))| {
self.signatures[i] = *signature;
self.message.account_keys[i] = *pubkey;
});
self.verify()
}
pub fn is_signed(&self) -> bool {
self.signatures
.iter()
.all(|signature| *signature != Signature::default())
}
}
pub fn uses_durable_nonce(tx: &Transaction) -> Option<&CompiledInstruction> {
let message = tx.message();
message
.instructions
.get(NONCED_TX_MARKER_IX_INDEX as usize)
.filter(|instruction| {
// Is system program
matches!(
message.account_keys.get(instruction.program_id_index as usize),
Some(program_id) if system_program::check_id(program_id)
)
// Is a nonce advance instruction
&& matches!(
limited_deserialize(&instruction.data),
Ok(SystemInstruction::AdvanceNonceAccount)
)
// Nonce account is writable
&& matches!(
instruction.accounts.get(0),
Some(index) if message.is_writable(*index as usize, true)
)
})
}
#[deprecated]
pub fn get_nonce_pubkey_from_instruction<'a>(
ix: &CompiledInstruction,
tx: &'a Transaction,
) -> Option<&'a Pubkey> {
ix.accounts.get(0).and_then(|idx| {
let idx = *idx as usize;
tx.message().account_keys.get(idx)
})
}
#[cfg(test)]
mod tests {
#![allow(deprecated)]
use {
super::*,
crate::{
hash::hash,
instruction::AccountMeta,
signature::{Keypair, Presigner, Signer},
system_instruction, sysvar,
},
bincode::{deserialize, serialize, serialized_size},
std::mem::size_of,
};
fn get_program_id(tx: &Transaction, instruction_index: usize) -> &Pubkey {
let message = tx.message();
let instruction = &message.instructions[instruction_index];
instruction.program_id(&message.account_keys)
}
#[test]
fn test_refs() {
let key = Keypair::new();
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let prog1 = solana_sdk::pubkey::new_rand();
let prog2 = solana_sdk::pubkey::new_rand();
let instructions = vec![
CompiledInstruction::new(3, &(), vec![0, 1]),
CompiledInstruction::new(4, &(), vec![0, 2]),
];
let tx = Transaction::new_with_compiled_instructions(
&[&key],
&[key1, key2],
Hash::default(),
vec![prog1, prog2],
instructions,
);
assert!(tx.sanitize().is_ok());
assert_eq!(tx.key(0, 0), Some(&key.pubkey()));
assert_eq!(tx.signer_key(0, 0), Some(&key.pubkey()));
assert_eq!(tx.key(1, 0), Some(&key.pubkey()));
assert_eq!(tx.signer_key(1, 0), Some(&key.pubkey()));
assert_eq!(tx.key(0, 1), Some(&key1));
assert_eq!(tx.signer_key(0, 1), None);
assert_eq!(tx.key(1, 1), Some(&key2));
assert_eq!(tx.signer_key(1, 1), None);
assert_eq!(tx.key(2, 0), None);
assert_eq!(tx.signer_key(2, 0), None);
assert_eq!(tx.key(0, 2), None);
assert_eq!(tx.signer_key(0, 2), None);
assert_eq!(*get_program_id(&tx, 0), prog1);
assert_eq!(*get_program_id(&tx, 1), prog2);
}
#[test]
fn test_refs_invalid_program_id() {
let key = Keypair::new();
let instructions = vec![CompiledInstruction::new(1, &(), vec![])];
let tx = Transaction::new_with_compiled_instructions(
&[&key],
&[],
Hash::default(),
vec![],
instructions,
);
assert_eq!(tx.sanitize(), Err(SanitizeError::IndexOutOfBounds));
}
#[test]
fn test_refs_invalid_account() {
let key = Keypair::new();
let instructions = vec![CompiledInstruction::new(1, &(), vec![2])];
let tx = Transaction::new_with_compiled_instructions(
&[&key],
&[],
Hash::default(),
vec![Pubkey::default()],
instructions,
);
assert_eq!(*get_program_id(&tx, 0), Pubkey::default());
assert_eq!(tx.sanitize(), Err(SanitizeError::IndexOutOfBounds));
}
#[test]
fn test_sanitize_txs() {
let key = Keypair::new();
let id0 = Pubkey::default();
let program_id = solana_sdk::pubkey::new_rand();
let ix = Instruction::new_with_bincode(
program_id,
&0,
vec![
AccountMeta::new(key.pubkey(), true),
AccountMeta::new(id0, true),
],
);
let mut tx = Transaction::new_with_payer(&[ix], Some(&key.pubkey()));
let o = tx.clone();
assert_eq!(tx.sanitize(), Ok(()));
assert_eq!(tx.message.account_keys.len(), 3);
tx = o.clone();
tx.message.header.num_required_signatures = 3;
assert_eq!(tx.sanitize(), Err(SanitizeError::IndexOutOfBounds));
tx = o.clone();
tx.message.header.num_readonly_signed_accounts = 4;
tx.message.header.num_readonly_unsigned_accounts = 0;
assert_eq!(tx.sanitize(), Err(SanitizeError::IndexOutOfBounds));
tx = o.clone();
tx.message.header.num_readonly_signed_accounts = 2;
tx.message.header.num_readonly_unsigned_accounts = 2;
assert_eq!(tx.sanitize(), Err(SanitizeError::IndexOutOfBounds));
tx = o.clone();
tx.message.header.num_readonly_signed_accounts = 0;
tx.message.header.num_readonly_unsigned_accounts = 4;
assert_eq!(tx.sanitize(), Err(SanitizeError::IndexOutOfBounds));
tx = o.clone();
tx.message.instructions[0].program_id_index = 3;
assert_eq!(tx.sanitize(), Err(SanitizeError::IndexOutOfBounds));
tx = o.clone();
tx.message.instructions[0].accounts[0] = 3;
assert_eq!(tx.sanitize(), Err(SanitizeError::IndexOutOfBounds));
tx = o.clone();
tx.message.instructions[0].program_id_index = 0;
assert_eq!(tx.sanitize(), Err(SanitizeError::IndexOutOfBounds));
tx = o.clone();
tx.message.header.num_readonly_signed_accounts = 2;
tx.message.header.num_readonly_unsigned_accounts = 3;
tx.message.account_keys.resize(4, Pubkey::default());
assert_eq!(tx.sanitize(), Err(SanitizeError::IndexOutOfBounds));
tx = o;
tx.message.header.num_readonly_signed_accounts = 2;
tx.message.header.num_required_signatures = 1;
assert_eq!(tx.sanitize(), Err(SanitizeError::IndexOutOfBounds));
}
fn create_sample_transaction() -> Transaction {
let keypair = Keypair::from_bytes(&[
48, 83, 2, 1, 1, 48, 5, 6, 3, 43, 101, 112, 4, 34, 4, 32, 255, 101, 36, 24, 124, 23,
167, 21, 132, 204, 155, 5, 185, 58, 121, 75, 156, 227, 116, 193, 215, 38, 142, 22, 8,
14, 229, 239, 119, 93, 5, 218, 161, 35, 3, 33, 0, 36, 100, 158, 252, 33, 161, 97, 185,
62, 89, 99,
])
.unwrap();
let to = Pubkey::new(&[
1, 1, 1, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 7, 6, 5, 4,
1, 1, 1,
]);
let program_id = Pubkey::new(&[
2, 2, 2, 4, 5, 6, 7, 8, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 8, 7, 6, 5, 4,
2, 2, 2,
]);
let account_metas = vec![
AccountMeta::new(keypair.pubkey(), true),
AccountMeta::new(to, false),
];
let instruction =
Instruction::new_with_bincode(program_id, &(1u8, 2u8, 3u8), account_metas);
let message = Message::new(&[instruction], Some(&keypair.pubkey()));
Transaction::new(&[&keypair], message, Hash::default())
}
#[test]
fn test_transaction_serialize() {
let tx = create_sample_transaction();
let ser = serialize(&tx).unwrap();
let deser = deserialize(&ser).unwrap();
assert_eq!(tx, deser);
}
/// Detect changes to the serialized size of payment transactions, which affects TPS.
#[test]
fn test_transaction_minimum_serialized_size() {
let alice_keypair = Keypair::new();
let alice_pubkey = alice_keypair.pubkey();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let ix = system_instruction::transfer(&alice_pubkey, &bob_pubkey, 42);
let expected_data_size = size_of::<u32>() + size_of::<u64>();
assert_eq!(expected_data_size, 12);
assert_eq!(
ix.data.len(),
expected_data_size,
"unexpected system instruction size"
);
let expected_instruction_size = 1 + 1 + ix.accounts.len() + 1 + expected_data_size;
assert_eq!(expected_instruction_size, 17);
let message = Message::new(&[ix], Some(&alice_pubkey));
assert_eq!(
serialized_size(&message.instructions[0]).unwrap() as usize,
expected_instruction_size,
"unexpected Instruction::serialized_size"
);
let tx = Transaction::new(&[&alice_keypair], message, Hash::default());
let len_size = 1;
let num_required_sigs_size = 1;
let num_readonly_accounts_size = 2;
let blockhash_size = size_of::<Hash>();
let expected_transaction_size = len_size
+ (tx.signatures.len() * size_of::<Signature>())
+ num_required_sigs_size
+ num_readonly_accounts_size
+ len_size
+ (tx.message.account_keys.len() * size_of::<Pubkey>())
+ blockhash_size
+ len_size
+ expected_instruction_size;
assert_eq!(expected_transaction_size, 215);
assert_eq!(
serialized_size(&tx).unwrap() as usize,
expected_transaction_size,
"unexpected serialized transaction size"
);
}
/// Detect binary changes in the serialized transaction data, which could have a downstream
/// affect on SDKs and applications
#[test]
fn test_sdk_serialize() {
assert_eq!(
serialize(&create_sample_transaction()).unwrap(),
vec![
1, 71, 59, 9, 187, 190, 129, 150, 165, 21, 33, 158, 72, 87, 110, 144, 120, 79, 238,
132, 134, 105, 39, 102, 116, 209, 29, 229, 154, 36, 105, 44, 172, 118, 131, 22,
124, 131, 179, 142, 176, 27, 117, 160, 89, 102, 224, 204, 1, 252, 141, 2, 136, 0,
37, 218, 225, 129, 92, 154, 250, 59, 97, 178, 10, 1, 0, 1, 3, 156, 227, 116, 193,
215, 38, 142, 22, 8, 14, 229, 239, 119, 93, 5, 218, 161, 35, 3, 33, 0, 36, 100,
158, 252, 33, 161, 97, 185, 62, 89, 99, 1, 1, 1, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 7, 6, 5, 4, 1, 1, 1, 2, 2, 2, 4, 5, 6, 7, 8, 9, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 8, 7, 6, 5, 4, 2, 2, 2, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2,
2, 0, 1, 3, 1, 2, 3
]
);
}
#[test]
#[should_panic]
fn test_transaction_missing_key() {
let keypair = Keypair::new();
let message = Message::new(&[], None);
Transaction::new_unsigned(message).sign(&[&keypair], Hash::default());
}
#[test]
#[should_panic]
fn test_partial_sign_mismatched_key() {
let keypair = Keypair::new();
let fee_payer = solana_sdk::pubkey::new_rand();
let ix = Instruction::new_with_bincode(
Pubkey::default(),
&0,
vec![AccountMeta::new(fee_payer, true)],
);
let message = Message::new(&[ix], Some(&fee_payer));
Transaction::new_unsigned(message).partial_sign(&[&keypair], Hash::default());
}
#[test]
fn test_partial_sign() {
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let ix = Instruction::new_with_bincode(
Pubkey::default(),
&0,
vec![
AccountMeta::new(keypair0.pubkey(), true),
AccountMeta::new(keypair1.pubkey(), true),
AccountMeta::new(keypair2.pubkey(), true),
],
);
let message = Message::new(&[ix], Some(&keypair0.pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.partial_sign(&[&keypair0, &keypair2], Hash::default());
assert!(!tx.is_signed());
tx.partial_sign(&[&keypair1], Hash::default());
assert!(tx.is_signed());
let hash = hash(&[1]);
tx.partial_sign(&[&keypair1], hash);
assert!(!tx.is_signed());
tx.partial_sign(&[&keypair0, &keypair2], hash);
assert!(tx.is_signed());
}
#[test]
#[should_panic]
fn test_transaction_missing_keypair() {
let program_id = Pubkey::default();
let keypair0 = Keypair::new();
let id0 = keypair0.pubkey();
let ix = Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, true)]);
let message = Message::new(&[ix], Some(&id0));
Transaction::new_unsigned(message).sign(&Vec::<&Keypair>::new(), Hash::default());
}
#[test]
#[should_panic]
fn test_transaction_wrong_key() {
let program_id = Pubkey::default();
let keypair0 = Keypair::new();
let wrong_id = Pubkey::default();
let ix =
Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(wrong_id, true)]);
let message = Message::new(&[ix], Some(&wrong_id));
Transaction::new_unsigned(message).sign(&[&keypair0], Hash::default());
}
#[test]
fn test_transaction_correct_key() {
let program_id = Pubkey::default();
let keypair0 = Keypair::new();
let id0 = keypair0.pubkey();
let ix = Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, true)]);
let message = Message::new(&[ix], Some(&id0));
let mut tx = Transaction::new_unsigned(message);
tx.sign(&[&keypair0], Hash::default());
assert_eq!(
tx.message.instructions[0],
CompiledInstruction::new(1, &0, vec![0])
);
assert!(tx.is_signed());
}
#[test]
fn test_transaction_instruction_with_duplicate_keys() {
let program_id = Pubkey::default();
let keypair0 = Keypair::new();
let id0 = keypair0.pubkey();
let id1 = solana_sdk::pubkey::new_rand();
let ix = Instruction::new_with_bincode(
program_id,
&0,
vec![
AccountMeta::new(id0, true),
AccountMeta::new(id1, false),
AccountMeta::new(id0, false),
AccountMeta::new(id1, false),
],
);
let message = Message::new(&[ix], Some(&id0));
let mut tx = Transaction::new_unsigned(message);
tx.sign(&[&keypair0], Hash::default());
assert_eq!(
tx.message.instructions[0],
CompiledInstruction::new(2, &0, vec![0, 1, 0, 1])
);
assert!(tx.is_signed());
}
#[test]
fn test_try_sign_dyn_keypairs() {
let program_id = Pubkey::default();
let keypair = Keypair::new();
let pubkey = keypair.pubkey();
let presigner_keypair = Keypair::new();
let presigner_pubkey = presigner_keypair.pubkey();
let ix = Instruction::new_with_bincode(
program_id,
&0,
vec![
AccountMeta::new(pubkey, true),
AccountMeta::new(presigner_pubkey, true),
],
);
let message = Message::new(&[ix], Some(&pubkey));
let mut tx = Transaction::new_unsigned(message);
let presigner_sig = presigner_keypair.sign_message(&tx.message_data());
let presigner = Presigner::new(&presigner_pubkey, &presigner_sig);
let signers: Vec<&dyn Signer> = vec![&keypair, &presigner];
let res = tx.try_sign(&signers, Hash::default());
assert_eq!(res, Ok(()));
assert_eq!(tx.signatures[0], keypair.sign_message(&tx.message_data()));
assert_eq!(tx.signatures[1], presigner_sig);
// Wrong key should error, not panic
let another_pubkey = solana_sdk::pubkey::new_rand();
let ix = Instruction::new_with_bincode(
program_id,
&0,
vec![
AccountMeta::new(another_pubkey, true),
AccountMeta::new(presigner_pubkey, true),
],
);
let message = Message::new(&[ix], Some(&another_pubkey));
let mut tx = Transaction::new_unsigned(message);
let res = tx.try_sign(&signers, Hash::default());
assert!(res.is_err());
assert_eq!(
tx.signatures,
vec![Signature::default(), Signature::default()]
);
}
fn nonced_transfer_tx() -> (Pubkey, Pubkey, Transaction) {
let from_keypair = Keypair::new();
let from_pubkey = from_keypair.pubkey();
let nonce_keypair = Keypair::new();
let nonce_pubkey = nonce_keypair.pubkey();
let instructions = [
system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
system_instruction::transfer(&from_pubkey, &nonce_pubkey, 42),
];
let message = Message::new(&instructions, Some(&nonce_pubkey));
let tx = Transaction::new(&[&from_keypair, &nonce_keypair], message, Hash::default());
(from_pubkey, nonce_pubkey, tx)
}
#[test]
fn tx_uses_nonce_ok() {
let (_, _, tx) = nonced_transfer_tx();
assert!(uses_durable_nonce(&tx).is_some());
}
#[test]
fn tx_uses_nonce_empty_ix_fail() {
assert!(uses_durable_nonce(&Transaction::default()).is_none());
}
#[test]
fn tx_uses_nonce_bad_prog_id_idx_fail() {
let (_, _, mut tx) = nonced_transfer_tx();
tx.message.instructions.get_mut(0).unwrap().program_id_index = 255u8;
assert!(uses_durable_nonce(&tx).is_none());
}
#[test]
fn tx_uses_nonce_first_prog_id_not_nonce_fail() {
let from_keypair = Keypair::new();
let from_pubkey = from_keypair.pubkey();
let nonce_keypair = Keypair::new();
let nonce_pubkey = nonce_keypair.pubkey();
let instructions = [
system_instruction::transfer(&from_pubkey, &nonce_pubkey, 42),
system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
];
let message = Message::new(&instructions, Some(&from_pubkey));
let tx = Transaction::new(&[&from_keypair, &nonce_keypair], message, Hash::default());
assert!(uses_durable_nonce(&tx).is_none());
}
#[test]
fn tx_uses_ro_nonce_account() {
let from_keypair = Keypair::new();
let from_pubkey = from_keypair.pubkey();
let nonce_keypair = Keypair::new();
let nonce_pubkey = nonce_keypair.pubkey();
let account_metas = vec![
AccountMeta::new_readonly(nonce_pubkey, false),
#[allow(deprecated)]
AccountMeta::new_readonly(sysvar::recent_blockhashes::id(), false),
AccountMeta::new_readonly(nonce_pubkey, true),
];
let nonce_instruction = Instruction::new_with_bincode(
system_program::id(),
&system_instruction::SystemInstruction::AdvanceNonceAccount,
account_metas,
);
let tx = Transaction::new_signed_with_payer(
&[nonce_instruction],
Some(&from_pubkey),
&[&from_keypair, &nonce_keypair],
Hash::default(),
);
assert!(uses_durable_nonce(&tx).is_none());
}
#[test]
fn tx_uses_nonce_wrong_first_nonce_ix_fail() {
let from_keypair = Keypair::new();
let from_pubkey = from_keypair.pubkey();
let nonce_keypair = Keypair::new();
let nonce_pubkey = nonce_keypair.pubkey();
let instructions = [
system_instruction::withdraw_nonce_account(
&nonce_pubkey,
&nonce_pubkey,
&from_pubkey,
42,
),
system_instruction::transfer(&from_pubkey, &nonce_pubkey, 42),
];
let message = Message::new(&instructions, Some(&nonce_pubkey));
let tx = Transaction::new(&[&from_keypair, &nonce_keypair], message, Hash::default());
assert!(uses_durable_nonce(&tx).is_none());
}
#[test]
fn get_nonce_pub_from_ix_ok() {
let (_, nonce_pubkey, tx) = nonced_transfer_tx();
let nonce_ix = uses_durable_nonce(&tx).unwrap();
assert_eq!(
get_nonce_pubkey_from_instruction(nonce_ix, &tx),
Some(&nonce_pubkey),
);
}
#[test]
fn get_nonce_pub_from_ix_no_accounts_fail() {
let (_, _, tx) = nonced_transfer_tx();
let nonce_ix = uses_durable_nonce(&tx).unwrap();
let mut nonce_ix = nonce_ix.clone();
nonce_ix.accounts.clear();
assert_eq!(get_nonce_pubkey_from_instruction(&nonce_ix, &tx), None,);
}
#[test]
fn get_nonce_pub_from_ix_bad_acc_idx_fail() {
let (_, _, tx) = nonced_transfer_tx();
let nonce_ix = uses_durable_nonce(&tx).unwrap();
let mut nonce_ix = nonce_ix.clone();
nonce_ix.accounts[0] = 255u8;
assert_eq!(get_nonce_pubkey_from_instruction(&nonce_ix, &tx), None,);
}
#[test]
fn tx_keypair_pubkey_mismatch() {
let from_keypair = Keypair::new();
let from_pubkey = from_keypair.pubkey();
let to_pubkey = Pubkey::new_unique();
let instructions = [system_instruction::transfer(&from_pubkey, &to_pubkey, 42)];
let mut tx = Transaction::new_with_payer(&instructions, Some(&from_pubkey));
let unused_keypair = Keypair::new();
let err = tx
.try_partial_sign(&[&from_keypair, &unused_keypair], Hash::default())
.unwrap_err();
assert_eq!(err, SignerError::KeypairPubkeyMismatch);
}
}
| 36.019211 | 103 | 0.583527 |
7a8ff3860a9bdab5ecbdd194f70af60425863085
| 1,824 |
use gemlab::shapes::{GeoKind, Scratchpad};
use gemlab::StrError;
use russell_chk::assert_vec_approx_eq;
use russell_lab::Vector;
fn main() -> Result<(), StrError> {
// 3-------------2 ξ₀ ξ₁
// | ξ₁ | node r s
// | | | 0 -1.0 -1.0
// h | +--ξ₀ | 1 1.0 -1.0
// | | 2 1.0 1.0
// | | 3 -1.0 1.0
// 0-------------1
// (x0,y0) w
// constants
let (x0, y0) = (3.0, 4.0);
let (w, h) = (2.0, 1.0);
// scratchpad
let space_ndim = 2;
let mut pad = Scratchpad::new(space_ndim, GeoKind::Qua4)?;
pad.set_xx(0, 0, x0);
pad.set_xx(0, 1, y0);
pad.set_xx(1, 0, x0 + w);
pad.set_xx(1, 1, y0);
pad.set_xx(2, 0, x0 + w);
pad.set_xx(2, 1, y0 + h);
pad.set_xx(3, 0, x0);
pad.set_xx(3, 1, y0 + h);
// perform interpolation
//
// Any coordinate within the element is calculated
// by the following "isoparametric" formula:
//
// → → → →
// x(ξ) = Σ Nᵐ(ξ) xᵐ
// m
//
// Let us calculate the coordinates, at the middle of the element
// with ξ = [0.0, 0.0]
// compute interpolation functions @ ksi_middle
let ksi_middle = &[0.0, 0.0];
(pad.fn_interp)(&mut pad.interp, ksi_middle);
// perform summation
let nnode = pad.kind.nnode();
let mut x_interpolated = Vector::new(space_ndim);
for m in 0..nnode {
for j in 0..space_ndim {
x_interpolated[j] += pad.interp[m] * pad.xxt[j][m];
}
}
// check
let xm = x0 + w / 2.0;
let ym = y0 + h / 2.0;
println!("xm = {}, ym = {}", xm, ym);
println!("x_interpolated =\n{}", x_interpolated);
assert_vec_approx_eq!(x_interpolated.as_data(), &[xm, ym], 1e-15);
Ok(())
}
| 28.061538 | 70 | 0.48739 |
64109d68eb912a1cf1606f8cce8e69463d118c2a
| 123,448 |
// DO NOT EDIT !
// This file was generated automatically from 'src/mako/cli/main.rs.mako'
// DO NOT EDIT !
#![allow(unused_variables, unused_imports, dead_code, unused_mut)]
extern crate tokio;
#[macro_use]
extern crate clap;
use std::env;
use std::io::{self, Write};
use clap::{App, SubCommand, Arg};
use google_file1::{api, Error, oauth2};
mod client;
use client::{InvalidOptionsError, CLIError, arg_from_str, writer_from_opts, parse_kv_arg,
input_file_from_opts, input_mime_from_opts, FieldCursor, FieldError, CallType, UploadProtocol,
calltype_from_str, remove_json_null_values, ComplexType, JsonType, JsonTypeInfo};
use std::default::Default;
use std::str::FromStr;
use serde_json as json;
use clap::ArgMatches;
enum DoitError {
IoError(String, io::Error),
ApiError(Error),
}
struct Engine<'n> {
opt: ArgMatches<'n>,
hub: api::CloudFilestore,
gp: Vec<&'static str>,
gpm: Vec<(&'static str, &'static str)>,
}
impl<'n> Engine<'n> {
async fn _projects_locations_backups_create(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"capacity-gb" => Some(("capacityGb", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"create-time" => Some(("createTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"description" => Some(("description", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"download-bytes" => Some(("downloadBytes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"labels" => Some(("labels", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Map })),
"name" => Some(("name", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"satisfies-pzs" => Some(("satisfiesPzs", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"source-file-share" => Some(("sourceFileShare", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"source-instance" => Some(("sourceInstance", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"source-instance-tier" => Some(("sourceInstanceTier", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"state" => Some(("state", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"storage-bytes" => Some(("storageBytes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["capacity-gb", "create-time", "description", "download-bytes", "labels", "name", "satisfies-pzs", "source-file-share", "source-instance", "source-instance-tier", "state", "storage-bytes"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::Backup = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().locations_backups_create(request, opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"backup-id" => {
call = call.backup_id(value.unwrap_or(""));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["backup-id"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_backups_delete(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_backups_delete(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_backups_get(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_backups_get(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_backups_list(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_backups_list(opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"page-token" => {
call = call.page_token(value.unwrap_or(""));
},
"page-size" => {
call = call.page_size(arg_from_str(value.unwrap_or("-0"), err, "page-size", "integer"));
},
"order-by" => {
call = call.order_by(value.unwrap_or(""));
},
"filter" => {
call = call.filter(value.unwrap_or(""));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["filter", "order-by", "page-size", "page-token"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_backups_patch(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"capacity-gb" => Some(("capacityGb", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"create-time" => Some(("createTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"description" => Some(("description", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"download-bytes" => Some(("downloadBytes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"labels" => Some(("labels", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Map })),
"name" => Some(("name", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"satisfies-pzs" => Some(("satisfiesPzs", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"source-file-share" => Some(("sourceFileShare", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"source-instance" => Some(("sourceInstance", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"source-instance-tier" => Some(("sourceInstanceTier", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"state" => Some(("state", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"storage-bytes" => Some(("storageBytes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["capacity-gb", "create-time", "description", "download-bytes", "labels", "name", "satisfies-pzs", "source-file-share", "source-instance", "source-instance-tier", "state", "storage-bytes"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::Backup = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().locations_backups_patch(request, opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"update-mask" => {
call = call.update_mask(value.unwrap_or(""));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["update-mask"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_get(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_get(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_instances_create(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"create-time" => Some(("createTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"description" => Some(("description", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"etag" => Some(("etag", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"kms-key-name" => Some(("kmsKeyName", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"labels" => Some(("labels", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Map })),
"name" => Some(("name", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"satisfies-pzs" => Some(("satisfiesPzs", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"state" => Some(("state", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"status-message" => Some(("statusMessage", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"suspension-reasons" => Some(("suspensionReasons", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"tier" => Some(("tier", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["create-time", "description", "etag", "kms-key-name", "labels", "name", "satisfies-pzs", "state", "status-message", "suspension-reasons", "tier"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::Instance = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().locations_instances_create(request, opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"instance-id" => {
call = call.instance_id(value.unwrap_or(""));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["instance-id"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_instances_delete(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_instances_delete(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"force" => {
call = call.force(arg_from_str(value.unwrap_or("false"), err, "force", "boolean"));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["force"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_instances_get(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_instances_get(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_instances_list(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_instances_list(opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"page-token" => {
call = call.page_token(value.unwrap_or(""));
},
"page-size" => {
call = call.page_size(arg_from_str(value.unwrap_or("-0"), err, "page-size", "integer"));
},
"order-by" => {
call = call.order_by(value.unwrap_or(""));
},
"filter" => {
call = call.filter(value.unwrap_or(""));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["filter", "order-by", "page-size", "page-token"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_instances_patch(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"create-time" => Some(("createTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"description" => Some(("description", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"etag" => Some(("etag", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"kms-key-name" => Some(("kmsKeyName", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"labels" => Some(("labels", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Map })),
"name" => Some(("name", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"satisfies-pzs" => Some(("satisfiesPzs", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"state" => Some(("state", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"status-message" => Some(("statusMessage", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"suspension-reasons" => Some(("suspensionReasons", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"tier" => Some(("tier", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["create-time", "description", "etag", "kms-key-name", "labels", "name", "satisfies-pzs", "state", "status-message", "suspension-reasons", "tier"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::Instance = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().locations_instances_patch(request, opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"update-mask" => {
call = call.update_mask(value.unwrap_or(""));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["update-mask"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_instances_restore(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"file-share" => Some(("fileShare", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"source-backup" => Some(("sourceBackup", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["file-share", "source-backup"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::RestoreInstanceRequest = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().locations_instances_restore(request, opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_instances_snapshots_create(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"create-time" => Some(("createTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"description" => Some(("description", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"filesystem-used-bytes" => Some(("filesystemUsedBytes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"labels" => Some(("labels", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Map })),
"name" => Some(("name", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"state" => Some(("state", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["create-time", "description", "filesystem-used-bytes", "labels", "name", "state"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::Snapshot = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().locations_instances_snapshots_create(request, opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"snapshot-id" => {
call = call.snapshot_id(value.unwrap_or(""));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["snapshot-id"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_instances_snapshots_delete(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_instances_snapshots_delete(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_instances_snapshots_get(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_instances_snapshots_get(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_instances_snapshots_list(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_instances_snapshots_list(opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"page-token" => {
call = call.page_token(value.unwrap_or(""));
},
"page-size" => {
call = call.page_size(arg_from_str(value.unwrap_or("-0"), err, "page-size", "integer"));
},
"order-by" => {
call = call.order_by(value.unwrap_or(""));
},
"filter" => {
call = call.filter(value.unwrap_or(""));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["filter", "order-by", "page-size", "page-token"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_instances_snapshots_patch(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"create-time" => Some(("createTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"description" => Some(("description", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"filesystem-used-bytes" => Some(("filesystemUsedBytes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"labels" => Some(("labels", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Map })),
"name" => Some(("name", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"state" => Some(("state", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["create-time", "description", "filesystem-used-bytes", "labels", "name", "state"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::Snapshot = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().locations_instances_snapshots_patch(request, opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"update-mask" => {
call = call.update_mask(value.unwrap_or(""));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["update-mask"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_list(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_list(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"page-token" => {
call = call.page_token(value.unwrap_or(""));
},
"page-size" => {
call = call.page_size(arg_from_str(value.unwrap_or("-0"), err, "page-size", "integer"));
},
"include-unrevealed-locations" => {
call = call.include_unrevealed_locations(arg_from_str(value.unwrap_or("false"), err, "include-unrevealed-locations", "boolean"));
},
"filter" => {
call = call.filter(value.unwrap_or(""));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["filter", "include-unrevealed-locations", "page-size", "page-token"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_operations_cancel(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec![]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::CancelOperationRequest = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().locations_operations_cancel(request, opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_operations_delete(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_operations_delete(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_operations_get(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_operations_get(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_locations_operations_list(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().locations_operations_list(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"page-token" => {
call = call.page_token(value.unwrap_or(""));
},
"page-size" => {
call = call.page_size(arg_from_str(value.unwrap_or("-0"), err, "page-size", "integer"));
},
"filter" => {
call = call.filter(value.unwrap_or(""));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["filter", "page-size", "page-token"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _doit(&self, dry_run: bool) -> Result<Result<(), DoitError>, Option<InvalidOptionsError>> {
let mut err = InvalidOptionsError::new();
let mut call_result: Result<(), DoitError> = Ok(());
let mut err_opt: Option<InvalidOptionsError> = None;
match self.opt.subcommand() {
("projects", Some(opt)) => {
match opt.subcommand() {
("locations-backups-create", Some(opt)) => {
call_result = self._projects_locations_backups_create(opt, dry_run, &mut err).await;
},
("locations-backups-delete", Some(opt)) => {
call_result = self._projects_locations_backups_delete(opt, dry_run, &mut err).await;
},
("locations-backups-get", Some(opt)) => {
call_result = self._projects_locations_backups_get(opt, dry_run, &mut err).await;
},
("locations-backups-list", Some(opt)) => {
call_result = self._projects_locations_backups_list(opt, dry_run, &mut err).await;
},
("locations-backups-patch", Some(opt)) => {
call_result = self._projects_locations_backups_patch(opt, dry_run, &mut err).await;
},
("locations-get", Some(opt)) => {
call_result = self._projects_locations_get(opt, dry_run, &mut err).await;
},
("locations-instances-create", Some(opt)) => {
call_result = self._projects_locations_instances_create(opt, dry_run, &mut err).await;
},
("locations-instances-delete", Some(opt)) => {
call_result = self._projects_locations_instances_delete(opt, dry_run, &mut err).await;
},
("locations-instances-get", Some(opt)) => {
call_result = self._projects_locations_instances_get(opt, dry_run, &mut err).await;
},
("locations-instances-list", Some(opt)) => {
call_result = self._projects_locations_instances_list(opt, dry_run, &mut err).await;
},
("locations-instances-patch", Some(opt)) => {
call_result = self._projects_locations_instances_patch(opt, dry_run, &mut err).await;
},
("locations-instances-restore", Some(opt)) => {
call_result = self._projects_locations_instances_restore(opt, dry_run, &mut err).await;
},
("locations-instances-snapshots-create", Some(opt)) => {
call_result = self._projects_locations_instances_snapshots_create(opt, dry_run, &mut err).await;
},
("locations-instances-snapshots-delete", Some(opt)) => {
call_result = self._projects_locations_instances_snapshots_delete(opt, dry_run, &mut err).await;
},
("locations-instances-snapshots-get", Some(opt)) => {
call_result = self._projects_locations_instances_snapshots_get(opt, dry_run, &mut err).await;
},
("locations-instances-snapshots-list", Some(opt)) => {
call_result = self._projects_locations_instances_snapshots_list(opt, dry_run, &mut err).await;
},
("locations-instances-snapshots-patch", Some(opt)) => {
call_result = self._projects_locations_instances_snapshots_patch(opt, dry_run, &mut err).await;
},
("locations-list", Some(opt)) => {
call_result = self._projects_locations_list(opt, dry_run, &mut err).await;
},
("locations-operations-cancel", Some(opt)) => {
call_result = self._projects_locations_operations_cancel(opt, dry_run, &mut err).await;
},
("locations-operations-delete", Some(opt)) => {
call_result = self._projects_locations_operations_delete(opt, dry_run, &mut err).await;
},
("locations-operations-get", Some(opt)) => {
call_result = self._projects_locations_operations_get(opt, dry_run, &mut err).await;
},
("locations-operations-list", Some(opt)) => {
call_result = self._projects_locations_operations_list(opt, dry_run, &mut err).await;
},
_ => {
err.issues.push(CLIError::MissingMethodError("projects".to_string()));
writeln!(io::stderr(), "{}\n", opt.usage()).ok();
}
}
},
_ => {
err.issues.push(CLIError::MissingCommandError);
writeln!(io::stderr(), "{}\n", self.opt.usage()).ok();
}
}
if dry_run {
if err.issues.len() > 0 {
err_opt = Some(err);
}
Err(err_opt)
} else {
Ok(call_result)
}
}
// Please note that this call will fail if any part of the opt can't be handled
async fn new(opt: ArgMatches<'n>) -> Result<Engine<'n>, InvalidOptionsError> {
let (config_dir, secret) = {
let config_dir = match client::assure_config_dir_exists(opt.value_of("folder").unwrap_or("~/.google-service-cli")) {
Err(e) => return Err(InvalidOptionsError::single(e, 3)),
Ok(p) => p,
};
match client::application_secret_from_directory(&config_dir, "file1-secret.json",
"{\"installed\":{\"auth_uri\":\"https://accounts.google.com/o/oauth2/auth\",\"client_secret\":\"hCsslbCUyfehWMmbkG8vTYxG\",\"token_uri\":\"https://accounts.google.com/o/oauth2/token\",\"client_email\":\"\",\"redirect_uris\":[\"urn:ietf:wg:oauth:2.0:oob\",\"oob\"],\"client_x509_cert_url\":\"\",\"client_id\":\"620010449518-9ngf7o4dhs0dka470npqvor6dc5lqb9b.apps.googleusercontent.com\",\"auth_provider_x509_cert_url\":\"https://www.googleapis.com/oauth2/v1/certs\"}}") {
Ok(secret) => (config_dir, secret),
Err(e) => return Err(InvalidOptionsError::single(e, 4))
}
};
let auth = oauth2::InstalledFlowAuthenticator::builder(
secret,
oauth2::InstalledFlowReturnMethod::HTTPRedirect,
).persist_tokens_to_disk(format!("{}/file1", config_dir)).build().await.unwrap();
let client = hyper::Client::builder().build(
hyper_rustls::HttpsConnectorBuilder::new().with_native_roots()
.https_or_http()
.enable_http1()
.enable_http2()
.build()
);
let engine = Engine {
opt: opt,
hub: api::CloudFilestore::new(client, auth),
gp: vec!["$-xgafv", "access-token", "alt", "callback", "fields", "key", "oauth-token", "pretty-print", "quota-user", "upload-type", "upload-protocol"],
gpm: vec![
("$-xgafv", "$.xgafv"),
("access-token", "access_token"),
("oauth-token", "oauth_token"),
("pretty-print", "prettyPrint"),
("quota-user", "quotaUser"),
("upload-type", "uploadType"),
("upload-protocol", "upload_protocol"),
]
};
match engine._doit(true).await {
Err(Some(err)) => Err(err),
Err(None) => Ok(engine),
Ok(_) => unreachable!(),
}
}
async fn doit(&self) -> Result<(), DoitError> {
match self._doit(false).await {
Ok(res) => res,
Err(_) => unreachable!(),
}
}
}
#[tokio::main]
async fn main() {
let mut exit_status = 0i32;
let arg_data = [
("projects", "methods: 'locations-backups-create', 'locations-backups-delete', 'locations-backups-get', 'locations-backups-list', 'locations-backups-patch', 'locations-get', 'locations-instances-create', 'locations-instances-delete', 'locations-instances-get', 'locations-instances-list', 'locations-instances-patch', 'locations-instances-restore', 'locations-instances-snapshots-create', 'locations-instances-snapshots-delete', 'locations-instances-snapshots-get', 'locations-instances-snapshots-list', 'locations-instances-snapshots-patch', 'locations-list', 'locations-operations-cancel', 'locations-operations-delete', 'locations-operations-get' and 'locations-operations-list'", vec![
("locations-backups-create",
Some(r##"Creates a backup."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-backups-create",
vec![
(Some(r##"parent"##),
None,
Some(r##"Required. The backup's project and location, in the format `projects/{project_number}/locations/{location}`. In Cloud Filestore, backup locations map to GCP regions, for example **us-west1**."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-backups-delete",
Some(r##"Deletes a backup."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-backups-delete",
vec![
(Some(r##"name"##),
None,
Some(r##"Required. The backup resource name, in the format `projects/{project_number}/locations/{location}/backups/{backup_id}`"##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-backups-get",
Some(r##"Gets the details of a specific backup."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-backups-get",
vec![
(Some(r##"name"##),
None,
Some(r##"Required. The backup resource name, in the format `projects/{project_number}/locations/{location}/backups/{backup_id}`."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-backups-list",
Some(r##"Lists all backups in a project for either a specified location or for all locations."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-backups-list",
vec![
(Some(r##"parent"##),
None,
Some(r##"Required. The project and location for which to retrieve backup information, in the format `projects/{project_number}/locations/{location}`. In Cloud Filestore, backup locations map to GCP regions, for example **us-west1**. To retrieve backup information for all locations, use "-" for the `{location}` value."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-backups-patch",
Some(r##"Updates the settings of a specific backup."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-backups-patch",
vec![
(Some(r##"name"##),
None,
Some(r##"Output only. The resource name of the backup, in the format `projects/{project_number}/locations/{location_id}/backups/{backup_id}`."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-get",
Some(r##"Gets information about a location."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-get",
vec![
(Some(r##"name"##),
None,
Some(r##"Resource name for the location."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-instances-create",
Some(r##"Creates an instance. When creating from a backup, the capacity of the new instance needs to be equal to or larger than the capacity of the backup (and also equal to or larger than the minimum capacity of the tier)."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-instances-create",
vec![
(Some(r##"parent"##),
None,
Some(r##"Required. The instance's project and location, in the format `projects/{project_id}/locations/{location}`. In Cloud Filestore, locations map to GCP zones, for example **us-west1-b**."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-instances-delete",
Some(r##"Deletes an instance."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-instances-delete",
vec![
(Some(r##"name"##),
None,
Some(r##"Required. The instance resource name, in the format `projects/{project_id}/locations/{location}/instances/{instance_id}`"##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-instances-get",
Some(r##"Gets the details of a specific instance."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-instances-get",
vec![
(Some(r##"name"##),
None,
Some(r##"Required. The instance resource name, in the format `projects/{project_id}/locations/{location}/instances/{instance_id}`."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-instances-list",
Some(r##"Lists all instances in a project for either a specified location or for all locations."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-instances-list",
vec![
(Some(r##"parent"##),
None,
Some(r##"Required. The project and location for which to retrieve instance information, in the format `projects/{project_id}/locations/{location}`. In Cloud Filestore, locations map to GCP zones, for example **us-west1-b**. To retrieve instance information for all locations, use "-" for the `{location}` value."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-instances-patch",
Some(r##"Updates the settings of a specific instance."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-instances-patch",
vec![
(Some(r##"name"##),
None,
Some(r##"Output only. The resource name of the instance, in the format `projects/{project}/locations/{location}/instances/{instance}`."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-instances-restore",
Some(r##"Restores an existing instance's file share from a backup. The capacity of the instance needs to be equal to or larger than the capacity of the backup (and also equal to or larger than the minimum capacity of the tier)."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-instances-restore",
vec![
(Some(r##"name"##),
None,
Some(r##"Required. The resource name of the instance, in the format `projects/{project_number}/locations/{location_id}/instances/{instance_id}`."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-instances-snapshots-create",
Some(r##"Creates a snapshot."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-instances-snapshots-create",
vec![
(Some(r##"parent"##),
None,
Some(r##"Required. The Filestore Instance to create the snapshots of, in the format `projects/{project_id}/locations/{location}/instances/{instance_id}`"##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-instances-snapshots-delete",
Some(r##"Deletes a snapshot."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-instances-snapshots-delete",
vec![
(Some(r##"name"##),
None,
Some(r##"Required. The snapshot resource name, in the format `projects/{project_id}/locations/{location}/instances/{instance_id}/snapshots/{snapshot_id}`"##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-instances-snapshots-get",
Some(r##"Gets the details of a specific snapshot."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-instances-snapshots-get",
vec![
(Some(r##"name"##),
None,
Some(r##"Required. The snapshot resource name, in the format `projects/{project_id}/locations/{location}/instances/{instance_id}/snapshots/{snapshot_id}`"##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-instances-snapshots-list",
Some(r##"Lists all snapshots in a project for either a specified location or for all locations."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-instances-snapshots-list",
vec![
(Some(r##"parent"##),
None,
Some(r##"Required. The instance for which to retrieve snapshot information, in the format `projects/{project_id}/locations/{location}/instances/{instance_id}`."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-instances-snapshots-patch",
Some(r##"Updates the settings of a specific snapshot."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-instances-snapshots-patch",
vec![
(Some(r##"name"##),
None,
Some(r##"Output only. The resource name of the snapshot, in the format `projects/{project_id}/locations/{location_id}/instances/{instance_id}/snapshots/{snapshot_id}`."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-list",
Some(r##"Lists information about the supported locations for this service."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-list",
vec![
(Some(r##"name"##),
None,
Some(r##"The resource that owns the locations collection, if applicable."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-operations-cancel",
Some(r##"Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-operations-cancel",
vec![
(Some(r##"name"##),
None,
Some(r##"The name of the operation resource to be cancelled."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-operations-delete",
Some(r##"Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-operations-delete",
vec![
(Some(r##"name"##),
None,
Some(r##"The name of the operation resource to be deleted."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-operations-get",
Some(r##"Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-operations-get",
vec![
(Some(r##"name"##),
None,
Some(r##"The name of the operation resource."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("locations-operations-list",
Some(r##"Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `"/v1/{name=users/*}/operations"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id."##),
"Details at http://byron.github.io/google-apis-rs/google_file1_cli/projects_locations-operations-list",
vec![
(Some(r##"name"##),
None,
Some(r##"The name of the operation's parent resource."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
]),
];
let mut app = App::new("file1")
.author("Sebastian Thiel <[email protected]>")
.version("3.1.0+20220214")
.about("The Cloud Filestore API is used for creating and managing cloud file servers.")
.after_help("All documentation details can be found at http://byron.github.io/google-apis-rs/google_file1_cli")
.arg(Arg::with_name("url")
.long("scope")
.help("Specify the authentication a method should be executed in. Each scope requires the user to grant this application permission to use it.If unset, it defaults to the shortest scope url for a particular method.")
.multiple(true)
.takes_value(true))
.arg(Arg::with_name("folder")
.long("config-dir")
.help("A directory into which we will store our persistent data. Defaults to a user-writable directory that we will create during the first invocation.[default: ~/.google-service-cli")
.multiple(false)
.takes_value(true))
.arg(Arg::with_name("debug")
.long("debug")
.help("Debug print all errors")
.multiple(false)
.takes_value(false));
for &(main_command_name, about, ref subcommands) in arg_data.iter() {
let mut mcmd = SubCommand::with_name(main_command_name).about(about);
for &(sub_command_name, ref desc, url_info, ref args) in subcommands {
let mut scmd = SubCommand::with_name(sub_command_name);
if let &Some(desc) = desc {
scmd = scmd.about(desc);
}
scmd = scmd.after_help(url_info);
for &(ref arg_name, ref flag, ref desc, ref required, ref multi) in args {
let arg_name_str =
match (arg_name, flag) {
(&Some(an), _ ) => an,
(_ , &Some(f)) => f,
_ => unreachable!(),
};
let mut arg = Arg::with_name(arg_name_str)
.empty_values(false);
if let &Some(short_flag) = flag {
arg = arg.short(short_flag);
}
if let &Some(desc) = desc {
arg = arg.help(desc);
}
if arg_name.is_some() && flag.is_some() {
arg = arg.takes_value(true);
}
if let &Some(required) = required {
arg = arg.required(required);
}
if let &Some(multi) = multi {
arg = arg.multiple(multi);
}
scmd = scmd.arg(arg);
}
mcmd = mcmd.subcommand(scmd);
}
app = app.subcommand(mcmd);
}
let matches = app.get_matches();
let debug = matches.is_present("debug");
match Engine::new(matches).await {
Err(err) => {
exit_status = err.exit_code;
writeln!(io::stderr(), "{}", err).ok();
},
Ok(engine) => {
if let Err(doit_err) = engine.doit().await {
exit_status = 1;
match doit_err {
DoitError::IoError(path, err) => {
writeln!(io::stderr(), "Failed to open output file '{}': {}", path, err).ok();
},
DoitError::ApiError(err) => {
if debug {
writeln!(io::stderr(), "{:#?}", err).ok();
} else {
writeln!(io::stderr(), "{}", err).ok();
}
}
}
}
}
}
std::process::exit(exit_status);
}
| 51.934371 | 697 | 0.449388 |
287e2be3ad3bf97d853c5b7975cc1b05889f863f
| 2,676 |
use crate::lex::group::Group;
#[derive(PartialEq, Eq, Debug)]
pub enum MatchRepeats {
Once,
ZeroOrMore,
OneOrMore,
ZeroOrOne,
}
pub fn min_matches(match_repeats: &MatchRepeats) -> i32 {
let result;
match match_repeats {
MatchRepeats::Once => { result = 1 }
MatchRepeats::ZeroOrMore => { result = 0 }
MatchRepeats::OneOrMore => { result = 1 }
MatchRepeats::ZeroOrOne => { result = 0 }
}
return result;
}
pub struct GroupRepeats {
pub match_repeats: MatchRepeats,
pub group: Box<dyn Group>,
}
impl Group for GroupRepeats {
fn match_with(&self, text: &[char], offset: usize) -> Option<u32> {
let first_match = self.group.match_with(text, offset);
if self.match_repeats == MatchRepeats::Once {
return first_match;
}
if first_match.is_none() {
if self.match_repeats == MatchRepeats::ZeroOrMore
|| self.match_repeats == MatchRepeats::ZeroOrOne {
return Some(0);
}
return None;
}
if self.match_repeats == MatchRepeats::ZeroOrOne {
return first_match;
}
let mut total_length = first_match.unwrap();
if total_length > 0 {
loop {
if let Some(next_match) = self.group.match_with(text, offset + total_length as usize) {
if next_match == 0 {
break;
}
total_length += next_match;
} else {
break;
}
}
}
return Some(total_length);
}
fn contains_any(&self) -> bool {
// i think this is the correct behavior
// contains any is used to help the AndGroup decide if it needs to look ahead
return self.group.contains_any();
}
fn name(&self) -> String {
return "MatchRepeats".to_string();
}
fn min_matches(&self) -> usize {
let minimum_matches = min_matches(&self.match_repeats);
if minimum_matches == 0 {
return 0;
}
return minimum_matches as usize * self.group.min_matches();
}
fn render(&self) -> String {
let mut result: String = String::from(&*self.group.render());
match self.match_repeats {
MatchRepeats::Once => {}
MatchRepeats::ZeroOrMore => {
result += "*";
}
MatchRepeats::OneOrMore => {
result += "+";
}
MatchRepeats::ZeroOrOne => {
result += "?";
}
}
return result;
}
}
| 28.468085 | 103 | 0.523543 |
218cf65c6a4943edcbb5298191502849f2927a05
| 1,082 |
use core::f64;
use std::u64;
fn main() {
let city_name = "Rustville";
println!("The city of {}:\n", city_name);
print_population(1_324_578, 114_293, 108_097);
}
fn print_population(adults: u64, kids: u32, buildings: u32) {
// 👉 TODO compute population by adding adults to kids
//
// 💡 TIP: Use the `as` keyword to convert between numeric types!
let population = adults + (kids as u64);
// 👉 TODO compute buildings_per_person by dividing buildings by population
//
// 💡 TIP: To get a f64 answer here, both numerator and denominator must be f64 values
let buildings_per_person = (buildings as f64) / (population as f64);
println!(" Population: {}", population);
println!(" Adults: {}", adults);
println!(" Kids: {}", kids);
println!(" Buildings: {}", buildings);
println!(" Buildings per person: {}\n", buildings_per_person);
if buildings_per_person >= 1.0 {
println!("Everyone can have their own building!");
} else {
println!("Buildings must be shared!");
}
}
| 30.914286 | 89 | 0.624769 |
1c5028fbba1b1f187888aa1a1c0d36b8f7b66470
| 3,635 |
use chain_crypto::{bech32::Bech32 as _, Ed25519, PublicKey};
use chain_impl_mockchain::{config::ConfigParam, leadership::bft::LeaderId};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::{convert::TryFrom, fmt};
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ConsensusLeaderId(pub LeaderId);
custom_error! { pub TryFromConsensusLeaderIdError
Incompatible = "Incompatible Config param, expected Add BFT Leader",
}
impl TryFrom<ConfigParam> for ConsensusLeaderId {
type Error = TryFromConsensusLeaderIdError;
fn try_from(config_param: ConfigParam) -> Result<Self, Self::Error> {
match config_param {
ConfigParam::AddBftLeader(leader_id) => Ok(ConsensusLeaderId(leader_id)),
_ => Err(TryFromConsensusLeaderIdError::Incompatible),
}
}
}
impl From<ConsensusLeaderId> for ConfigParam {
fn from(consensus_leader_id: ConsensusLeaderId) -> Self {
ConfigParam::AddBftLeader(consensus_leader_id.0)
}
}
impl Serialize for ConsensusLeaderId {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.0.as_public_key().to_bech32_str().serialize(serializer)
}
}
impl<'de> Deserialize<'de> for ConsensusLeaderId {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
use serde::de::{self, Visitor};
struct ConsensusLeaderIdVisitor;
impl<'de> Visitor<'de> for ConsensusLeaderIdVisitor {
type Value = ConsensusLeaderId;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
use chain_crypto::AsymmetricPublicKey as _;
write!(
formatter,
"bech32 encoding of the leader id's public key ({})",
Ed25519::PUBLIC_BECH32_HRP
)
}
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
PublicKey::try_from_bech32_str(s)
.map(|pk| ConsensusLeaderId(pk.into()))
.map_err(E::custom)
}
}
deserializer.deserialize_str(ConsensusLeaderIdVisitor)
}
}
#[cfg(test)]
mod test {
use super::*;
use quickcheck::{Arbitrary, Gen};
impl Arbitrary for ConsensusLeaderId {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
use crate::crypto::key::KeyPair;
let kp: KeyPair<Ed25519> = KeyPair::arbitrary(g);
let public_key = kp.identifier().into_public_key();
ConsensusLeaderId(LeaderId::from(public_key))
}
}
#[test]
fn deserialize_from_str() {
const STR: &'static str =
"---\n\"ed25519_pk1evu9kfx9tztez708nac569hcp0xwkvekxpwc7m8ztxu44tmq4gws3yayej\"";
let _: ConsensusLeaderId = serde_yaml::from_str(&STR).unwrap();
}
quickcheck! {
fn serde_encode_decode(consensus_leader_id: ConsensusLeaderId) -> bool {
let s = serde_yaml::to_string(&consensus_leader_id).unwrap();
let consensus_leader_id_dec: ConsensusLeaderId = serde_yaml::from_str(&s).unwrap();
consensus_leader_id == consensus_leader_id_dec
}
fn convert_from_to_config_param(consensus_leader_id: ConsensusLeaderId) -> bool {
let cp = ConfigParam::from(consensus_leader_id.clone());
let consensus_leader_id_dec = ConsensusLeaderId::try_from(cp).unwrap();
consensus_leader_id == consensus_leader_id_dec
}
}
}
| 33.348624 | 95 | 0.624484 |
22c96844980011eb98fe75ae2112bf05412d3a1a
| 60,431 |
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::IOCFG0 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R { bits: self.register.get() }
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct RESERVED31R {
bits: bool,
}
impl RESERVED31R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct HYST_ENR {
bits: bool,
}
impl HYST_ENR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct IER {
bits: bool,
}
impl IER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct WU_CFGR {
bits: u8,
}
impl WU_CFGR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = "Possible values of the field `IOMODE`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum IOMODER {
#[doc = "Open Source\nInverted input/output\n\n"]
OPENSRC_INV,
#[doc = "Open Source\nNormal input / outut\n\n"]
OPENSRC,
#[doc = "Open Drain\nInverted input / output\n\n"]
OPENDR_INV,
#[doc = "Open Drain, \nNormal input / output\n\n"]
OPENDR,
#[doc = "Inverted input / ouput\n\n"]
INV,
#[doc = "Normal input / output\n\n"]
NORMAL,
#[doc = r" Reserved"]
_Reserved(u8),
}
impl IOMODER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
IOMODER::OPENSRC_INV => 7,
IOMODER::OPENSRC => 6,
IOMODER::OPENDR_INV => 5,
IOMODER::OPENDR => 4,
IOMODER::INV => 1,
IOMODER::NORMAL => 0,
IOMODER::_Reserved(bits) => bits,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> IOMODER {
match value {
7 => IOMODER::OPENSRC_INV,
6 => IOMODER::OPENSRC,
5 => IOMODER::OPENDR_INV,
4 => IOMODER::OPENDR,
1 => IOMODER::INV,
0 => IOMODER::NORMAL,
i => IOMODER::_Reserved(i),
}
}
#[doc = "Checks if the value of the field is `OPENSRC_INV`"]
#[inline]
pub fn is_opensrc_inv(&self) -> bool {
*self == IOMODER::OPENSRC_INV
}
#[doc = "Checks if the value of the field is `OPENSRC`"]
#[inline]
pub fn is_opensrc(&self) -> bool {
*self == IOMODER::OPENSRC
}
#[doc = "Checks if the value of the field is `OPENDR_INV`"]
#[inline]
pub fn is_opendr_inv(&self) -> bool {
*self == IOMODER::OPENDR_INV
}
#[doc = "Checks if the value of the field is `OPENDR`"]
#[inline]
pub fn is_opendr(&self) -> bool {
*self == IOMODER::OPENDR
}
#[doc = "Checks if the value of the field is `INV`"]
#[inline]
pub fn is_inv(&self) -> bool {
*self == IOMODER::INV
}
#[doc = "Checks if the value of the field is `NORMAL`"]
#[inline]
pub fn is_normal(&self) -> bool {
*self == IOMODER::NORMAL
}
}
#[doc = r" Value of the field"]
pub struct RESERVED19R {
bits: u8,
}
impl RESERVED19R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct EDGE_IRQ_ENR {
bits: bool,
}
impl EDGE_IRQ_ENR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = "Possible values of the field `EDGE_DET`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EDGE_DETR {
#[doc = "Positive and negative edge detection"]
BOTH,
#[doc = "Positive edge detection"]
POS,
#[doc = "Negative edge detection"]
NEG,
#[doc = "No edge detection"]
NONE,
}
impl EDGE_DETR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
EDGE_DETR::BOTH => 3,
EDGE_DETR::POS => 2,
EDGE_DETR::NEG => 1,
EDGE_DETR::NONE => 0,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> EDGE_DETR {
match value {
3 => EDGE_DETR::BOTH,
2 => EDGE_DETR::POS,
1 => EDGE_DETR::NEG,
0 => EDGE_DETR::NONE,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `BOTH`"]
#[inline]
pub fn is_both(&self) -> bool {
*self == EDGE_DETR::BOTH
}
#[doc = "Checks if the value of the field is `POS`"]
#[inline]
pub fn is_pos(&self) -> bool {
*self == EDGE_DETR::POS
}
#[doc = "Checks if the value of the field is `NEG`"]
#[inline]
pub fn is_neg(&self) -> bool {
*self == EDGE_DETR::NEG
}
#[doc = "Checks if the value of the field is `NONE`"]
#[inline]
pub fn is_none(&self) -> bool {
*self == EDGE_DETR::NONE
}
}
#[doc = r" Value of the field"]
pub struct RESERVED15R {
bits: bool,
}
impl RESERVED15R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = "Possible values of the field `PULL_CTL`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PULL_CTLR {
#[doc = "No pull"]
DIS,
#[doc = "Pull up"]
UP,
#[doc = "Pull down"]
DWN,
#[doc = r" Reserved"]
_Reserved(u8),
}
impl PULL_CTLR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
PULL_CTLR::DIS => 3,
PULL_CTLR::UP => 2,
PULL_CTLR::DWN => 1,
PULL_CTLR::_Reserved(bits) => bits,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> PULL_CTLR {
match value {
3 => PULL_CTLR::DIS,
2 => PULL_CTLR::UP,
1 => PULL_CTLR::DWN,
i => PULL_CTLR::_Reserved(i),
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == PULL_CTLR::DIS
}
#[doc = "Checks if the value of the field is `UP`"]
#[inline]
pub fn is_up(&self) -> bool {
*self == PULL_CTLR::UP
}
#[doc = "Checks if the value of the field is `DWN`"]
#[inline]
pub fn is_dwn(&self) -> bool {
*self == PULL_CTLR::DWN
}
}
#[doc = r" Value of the field"]
pub struct SLEW_REDR {
bits: bool,
}
impl SLEW_REDR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = "Possible values of the field `IOCURR`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum IOCURRR {
#[doc = "Extended-Current (EC) mode: Min 8 mA for double drive strength IOs (min 4 mA for normal IOs) when IOSTR is set to AUTO"]
_4_8MA,
#[doc = "High-Current (HC) mode: Min 4 mA when IOSTR is set to AUTO"]
_4MA,
#[doc = "Low-Current (LC) mode: Min 2 mA when IOSTR is set to AUTO"]
_2MA,
#[doc = r" Reserved"]
_Reserved(u8),
}
impl IOCURRR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
IOCURRR::_4_8MA => 2,
IOCURRR::_4MA => 1,
IOCURRR::_2MA => 0,
IOCURRR::_Reserved(bits) => bits,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> IOCURRR {
match value {
2 => IOCURRR::_4_8MA,
1 => IOCURRR::_4MA,
0 => IOCURRR::_2MA,
i => IOCURRR::_Reserved(i),
}
}
#[doc = "Checks if the value of the field is `_4_8MA`"]
#[inline]
pub fn is_4_8ma(&self) -> bool {
*self == IOCURRR::_4_8MA
}
#[doc = "Checks if the value of the field is `_4MA`"]
#[inline]
pub fn is_4ma(&self) -> bool {
*self == IOCURRR::_4MA
}
#[doc = "Checks if the value of the field is `_2MA`"]
#[inline]
pub fn is_2ma(&self) -> bool {
*self == IOCURRR::_2MA
}
}
#[doc = "Possible values of the field `IOSTR`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum IOSTRR {
#[doc = "Maximum drive strength, controlled by AON_IOC:IOSTRMAX (min 2 mA @1.8V with default values)"]
MAX,
#[doc = "Medium drive strength, controlled by AON_IOC:IOSTRMED (min 2 mA @2.5V with default values)"]
MED,
#[doc = "Minimum drive strength, controlled by AON_IOC:IOSTRMIN (min 2 mA @3.3V with default values)"]
MIN,
#[doc = "Automatic drive strength, controlled by AON BATMON based on battery voltage. (min 2 mA @VDDS)"]
AUTO,
}
impl IOSTRR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
IOSTRR::MAX => 3,
IOSTRR::MED => 2,
IOSTRR::MIN => 1,
IOSTRR::AUTO => 0,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> IOSTRR {
match value {
3 => IOSTRR::MAX,
2 => IOSTRR::MED,
1 => IOSTRR::MIN,
0 => IOSTRR::AUTO,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `MAX`"]
#[inline]
pub fn is_max(&self) -> bool {
*self == IOSTRR::MAX
}
#[doc = "Checks if the value of the field is `MED`"]
#[inline]
pub fn is_med(&self) -> bool {
*self == IOSTRR::MED
}
#[doc = "Checks if the value of the field is `MIN`"]
#[inline]
pub fn is_min(&self) -> bool {
*self == IOSTRR::MIN
}
#[doc = "Checks if the value of the field is `AUTO`"]
#[inline]
pub fn is_auto(&self) -> bool {
*self == IOSTRR::AUTO
}
}
#[doc = r" Value of the field"]
pub struct RESERVED6R {
bits: u8,
}
impl RESERVED6R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = "Possible values of the field `PORT_ID`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PORT_IDR {
#[doc = "RF Core SMI Command Link In"]
RFC_SMI_CL_IN,
#[doc = "RF Core SMI Command Link Out"]
RFC_SMI_CL_OUT,
#[doc = "RF Core SMI Data Link In"]
RFC_SMI_DL_IN,
#[doc = "RF Core SMI Data Link Out"]
RFC_SMI_DL_OUT,
#[doc = "RF Core Data In 1"]
RFC_GPI1,
#[doc = "RF Core Data In 0"]
RFC_GPI0,
#[doc = "RF Core Data Out 3"]
RFC_GPO3,
#[doc = "RF Core Data Out 2"]
RFC_GPO2,
#[doc = "RF Core Data Out 1"]
RFC_GPO1,
#[doc = "RF Core Data Out 0"]
RFC_GPO0,
#[doc = "RF Core Trace"]
RFC_TRC,
#[doc = "I2S MCLK "]
I2S_MCLK,
#[doc = "I2S BCLK "]
I2S_BCLK,
#[doc = "I2S WCLK "]
I2S_WCLK,
#[doc = "I2S Data 1"]
I2S_AD1,
#[doc = "I2S Data 0"]
I2S_AD0,
#[doc = "SSI1 CLK"]
SSI1_CLK,
#[doc = "SSI1 FSS "]
SSI1_FSS,
#[doc = "SSI1 TX "]
SSI1_TX,
#[doc = "SSI1 RX "]
SSI1_RX,
#[doc = "CPU SWV "]
CPU_SWV,
#[doc = "PORT EVENT 7\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT7,
#[doc = "PORT EVENT 6\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT6,
#[doc = "PORT EVENT 5\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT5,
#[doc = "PORT EVENT 4\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT4,
#[doc = "PORT EVENT 3\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT3,
#[doc = "PORT EVENT 2\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT2,
#[doc = "PORT EVENT 1\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT1,
#[doc = "PORT EVENT 0\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT0,
#[doc = "UART0 RTS "]
UART0_RTS,
#[doc = "UART0 CTS "]
UART0_CTS,
#[doc = "UART0 TX "]
UART0_TX,
#[doc = "UART0 RX "]
UART0_RX,
#[doc = "I2C Clock"]
I2C_MSSCL,
#[doc = "I2C Data"]
I2C_MSSDA,
#[doc = "SSI0 CLK"]
SSI0_CLK,
#[doc = "SSI0 FSS "]
SSI0_FSS,
#[doc = "SSI0 TX "]
SSI0_TX,
#[doc = "SSI0 RX "]
SSI0_RX,
#[doc = "AUX IO "]
AUX_IO,
#[doc = "AON 32 KHz clock (SCLK_LF)"]
AON_CLK32K,
#[doc = "General Purpose IO "]
GPIO,
#[doc = r" Reserved"]
_Reserved(u8),
}
impl PORT_IDR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
PORT_IDR::RFC_SMI_CL_IN => 56,
PORT_IDR::RFC_SMI_CL_OUT => 55,
PORT_IDR::RFC_SMI_DL_IN => 54,
PORT_IDR::RFC_SMI_DL_OUT => 53,
PORT_IDR::RFC_GPI1 => 52,
PORT_IDR::RFC_GPI0 => 51,
PORT_IDR::RFC_GPO3 => 50,
PORT_IDR::RFC_GPO2 => 49,
PORT_IDR::RFC_GPO1 => 48,
PORT_IDR::RFC_GPO0 => 47,
PORT_IDR::RFC_TRC => 46,
PORT_IDR::I2S_MCLK => 41,
PORT_IDR::I2S_BCLK => 40,
PORT_IDR::I2S_WCLK => 39,
PORT_IDR::I2S_AD1 => 38,
PORT_IDR::I2S_AD0 => 37,
PORT_IDR::SSI1_CLK => 36,
PORT_IDR::SSI1_FSS => 35,
PORT_IDR::SSI1_TX => 34,
PORT_IDR::SSI1_RX => 33,
PORT_IDR::CPU_SWV => 32,
PORT_IDR::PORT_EVENT7 => 30,
PORT_IDR::PORT_EVENT6 => 29,
PORT_IDR::PORT_EVENT5 => 28,
PORT_IDR::PORT_EVENT4 => 27,
PORT_IDR::PORT_EVENT3 => 26,
PORT_IDR::PORT_EVENT2 => 25,
PORT_IDR::PORT_EVENT1 => 24,
PORT_IDR::PORT_EVENT0 => 23,
PORT_IDR::UART0_RTS => 18,
PORT_IDR::UART0_CTS => 17,
PORT_IDR::UART0_TX => 16,
PORT_IDR::UART0_RX => 15,
PORT_IDR::I2C_MSSCL => 14,
PORT_IDR::I2C_MSSDA => 13,
PORT_IDR::SSI0_CLK => 12,
PORT_IDR::SSI0_FSS => 11,
PORT_IDR::SSI0_TX => 10,
PORT_IDR::SSI0_RX => 9,
PORT_IDR::AUX_IO => 8,
PORT_IDR::AON_CLK32K => 7,
PORT_IDR::GPIO => 0,
PORT_IDR::_Reserved(bits) => bits,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> PORT_IDR {
match value {
56 => PORT_IDR::RFC_SMI_CL_IN,
55 => PORT_IDR::RFC_SMI_CL_OUT,
54 => PORT_IDR::RFC_SMI_DL_IN,
53 => PORT_IDR::RFC_SMI_DL_OUT,
52 => PORT_IDR::RFC_GPI1,
51 => PORT_IDR::RFC_GPI0,
50 => PORT_IDR::RFC_GPO3,
49 => PORT_IDR::RFC_GPO2,
48 => PORT_IDR::RFC_GPO1,
47 => PORT_IDR::RFC_GPO0,
46 => PORT_IDR::RFC_TRC,
41 => PORT_IDR::I2S_MCLK,
40 => PORT_IDR::I2S_BCLK,
39 => PORT_IDR::I2S_WCLK,
38 => PORT_IDR::I2S_AD1,
37 => PORT_IDR::I2S_AD0,
36 => PORT_IDR::SSI1_CLK,
35 => PORT_IDR::SSI1_FSS,
34 => PORT_IDR::SSI1_TX,
33 => PORT_IDR::SSI1_RX,
32 => PORT_IDR::CPU_SWV,
30 => PORT_IDR::PORT_EVENT7,
29 => PORT_IDR::PORT_EVENT6,
28 => PORT_IDR::PORT_EVENT5,
27 => PORT_IDR::PORT_EVENT4,
26 => PORT_IDR::PORT_EVENT3,
25 => PORT_IDR::PORT_EVENT2,
24 => PORT_IDR::PORT_EVENT1,
23 => PORT_IDR::PORT_EVENT0,
18 => PORT_IDR::UART0_RTS,
17 => PORT_IDR::UART0_CTS,
16 => PORT_IDR::UART0_TX,
15 => PORT_IDR::UART0_RX,
14 => PORT_IDR::I2C_MSSCL,
13 => PORT_IDR::I2C_MSSDA,
12 => PORT_IDR::SSI0_CLK,
11 => PORT_IDR::SSI0_FSS,
10 => PORT_IDR::SSI0_TX,
9 => PORT_IDR::SSI0_RX,
8 => PORT_IDR::AUX_IO,
7 => PORT_IDR::AON_CLK32K,
0 => PORT_IDR::GPIO,
i => PORT_IDR::_Reserved(i),
}
}
#[doc = "Checks if the value of the field is `RFC_SMI_CL_IN`"]
#[inline]
pub fn is_rfc_smi_cl_in(&self) -> bool {
*self == PORT_IDR::RFC_SMI_CL_IN
}
#[doc = "Checks if the value of the field is `RFC_SMI_CL_OUT`"]
#[inline]
pub fn is_rfc_smi_cl_out(&self) -> bool {
*self == PORT_IDR::RFC_SMI_CL_OUT
}
#[doc = "Checks if the value of the field is `RFC_SMI_DL_IN`"]
#[inline]
pub fn is_rfc_smi_dl_in(&self) -> bool {
*self == PORT_IDR::RFC_SMI_DL_IN
}
#[doc = "Checks if the value of the field is `RFC_SMI_DL_OUT`"]
#[inline]
pub fn is_rfc_smi_dl_out(&self) -> bool {
*self == PORT_IDR::RFC_SMI_DL_OUT
}
#[doc = "Checks if the value of the field is `RFC_GPI1`"]
#[inline]
pub fn is_rfc_gpi1(&self) -> bool {
*self == PORT_IDR::RFC_GPI1
}
#[doc = "Checks if the value of the field is `RFC_GPI0`"]
#[inline]
pub fn is_rfc_gpi0(&self) -> bool {
*self == PORT_IDR::RFC_GPI0
}
#[doc = "Checks if the value of the field is `RFC_GPO3`"]
#[inline]
pub fn is_rfc_gpo3(&self) -> bool {
*self == PORT_IDR::RFC_GPO3
}
#[doc = "Checks if the value of the field is `RFC_GPO2`"]
#[inline]
pub fn is_rfc_gpo2(&self) -> bool {
*self == PORT_IDR::RFC_GPO2
}
#[doc = "Checks if the value of the field is `RFC_GPO1`"]
#[inline]
pub fn is_rfc_gpo1(&self) -> bool {
*self == PORT_IDR::RFC_GPO1
}
#[doc = "Checks if the value of the field is `RFC_GPO0`"]
#[inline]
pub fn is_rfc_gpo0(&self) -> bool {
*self == PORT_IDR::RFC_GPO0
}
#[doc = "Checks if the value of the field is `RFC_TRC`"]
#[inline]
pub fn is_rfc_trc(&self) -> bool {
*self == PORT_IDR::RFC_TRC
}
#[doc = "Checks if the value of the field is `I2S_MCLK`"]
#[inline]
pub fn is_i2s_mclk(&self) -> bool {
*self == PORT_IDR::I2S_MCLK
}
#[doc = "Checks if the value of the field is `I2S_BCLK`"]
#[inline]
pub fn is_i2s_bclk(&self) -> bool {
*self == PORT_IDR::I2S_BCLK
}
#[doc = "Checks if the value of the field is `I2S_WCLK`"]
#[inline]
pub fn is_i2s_wclk(&self) -> bool {
*self == PORT_IDR::I2S_WCLK
}
#[doc = "Checks if the value of the field is `I2S_AD1`"]
#[inline]
pub fn is_i2s_ad1(&self) -> bool {
*self == PORT_IDR::I2S_AD1
}
#[doc = "Checks if the value of the field is `I2S_AD0`"]
#[inline]
pub fn is_i2s_ad0(&self) -> bool {
*self == PORT_IDR::I2S_AD0
}
#[doc = "Checks if the value of the field is `SSI1_CLK`"]
#[inline]
pub fn is_ssi1_clk(&self) -> bool {
*self == PORT_IDR::SSI1_CLK
}
#[doc = "Checks if the value of the field is `SSI1_FSS`"]
#[inline]
pub fn is_ssi1_fss(&self) -> bool {
*self == PORT_IDR::SSI1_FSS
}
#[doc = "Checks if the value of the field is `SSI1_TX`"]
#[inline]
pub fn is_ssi1_tx(&self) -> bool {
*self == PORT_IDR::SSI1_TX
}
#[doc = "Checks if the value of the field is `SSI1_RX`"]
#[inline]
pub fn is_ssi1_rx(&self) -> bool {
*self == PORT_IDR::SSI1_RX
}
#[doc = "Checks if the value of the field is `CPU_SWV`"]
#[inline]
pub fn is_cpu_swv(&self) -> bool {
*self == PORT_IDR::CPU_SWV
}
#[doc = "Checks if the value of the field is `PORT_EVENT7`"]
#[inline]
pub fn is_port_event7(&self) -> bool {
*self == PORT_IDR::PORT_EVENT7
}
#[doc = "Checks if the value of the field is `PORT_EVENT6`"]
#[inline]
pub fn is_port_event6(&self) -> bool {
*self == PORT_IDR::PORT_EVENT6
}
#[doc = "Checks if the value of the field is `PORT_EVENT5`"]
#[inline]
pub fn is_port_event5(&self) -> bool {
*self == PORT_IDR::PORT_EVENT5
}
#[doc = "Checks if the value of the field is `PORT_EVENT4`"]
#[inline]
pub fn is_port_event4(&self) -> bool {
*self == PORT_IDR::PORT_EVENT4
}
#[doc = "Checks if the value of the field is `PORT_EVENT3`"]
#[inline]
pub fn is_port_event3(&self) -> bool {
*self == PORT_IDR::PORT_EVENT3
}
#[doc = "Checks if the value of the field is `PORT_EVENT2`"]
#[inline]
pub fn is_port_event2(&self) -> bool {
*self == PORT_IDR::PORT_EVENT2
}
#[doc = "Checks if the value of the field is `PORT_EVENT1`"]
#[inline]
pub fn is_port_event1(&self) -> bool {
*self == PORT_IDR::PORT_EVENT1
}
#[doc = "Checks if the value of the field is `PORT_EVENT0`"]
#[inline]
pub fn is_port_event0(&self) -> bool {
*self == PORT_IDR::PORT_EVENT0
}
#[doc = "Checks if the value of the field is `UART0_RTS`"]
#[inline]
pub fn is_uart0_rts(&self) -> bool {
*self == PORT_IDR::UART0_RTS
}
#[doc = "Checks if the value of the field is `UART0_CTS`"]
#[inline]
pub fn is_uart0_cts(&self) -> bool {
*self == PORT_IDR::UART0_CTS
}
#[doc = "Checks if the value of the field is `UART0_TX`"]
#[inline]
pub fn is_uart0_tx(&self) -> bool {
*self == PORT_IDR::UART0_TX
}
#[doc = "Checks if the value of the field is `UART0_RX`"]
#[inline]
pub fn is_uart0_rx(&self) -> bool {
*self == PORT_IDR::UART0_RX
}
#[doc = "Checks if the value of the field is `I2C_MSSCL`"]
#[inline]
pub fn is_i2c_msscl(&self) -> bool {
*self == PORT_IDR::I2C_MSSCL
}
#[doc = "Checks if the value of the field is `I2C_MSSDA`"]
#[inline]
pub fn is_i2c_mssda(&self) -> bool {
*self == PORT_IDR::I2C_MSSDA
}
#[doc = "Checks if the value of the field is `SSI0_CLK`"]
#[inline]
pub fn is_ssi0_clk(&self) -> bool {
*self == PORT_IDR::SSI0_CLK
}
#[doc = "Checks if the value of the field is `SSI0_FSS`"]
#[inline]
pub fn is_ssi0_fss(&self) -> bool {
*self == PORT_IDR::SSI0_FSS
}
#[doc = "Checks if the value of the field is `SSI0_TX`"]
#[inline]
pub fn is_ssi0_tx(&self) -> bool {
*self == PORT_IDR::SSI0_TX
}
#[doc = "Checks if the value of the field is `SSI0_RX`"]
#[inline]
pub fn is_ssi0_rx(&self) -> bool {
*self == PORT_IDR::SSI0_RX
}
#[doc = "Checks if the value of the field is `AUX_IO`"]
#[inline]
pub fn is_aux_io(&self) -> bool {
*self == PORT_IDR::AUX_IO
}
#[doc = "Checks if the value of the field is `AON_CLK32K`"]
#[inline]
pub fn is_aon_clk32k(&self) -> bool {
*self == PORT_IDR::AON_CLK32K
}
#[doc = "Checks if the value of the field is `GPIO`"]
#[inline]
pub fn is_gpio(&self) -> bool {
*self == PORT_IDR::GPIO
}
}
#[doc = r" Proxy"]
pub struct _HYST_ENW<'a> {
w: &'a mut W,
}
impl<'a> _HYST_ENW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 30;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _IEW<'a> {
w: &'a mut W,
}
impl<'a> _IEW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 29;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _WU_CFGW<'a> {
w: &'a mut W,
}
impl<'a> _WU_CFGW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 27;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `IOMODE`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum IOMODEW {
#[doc = "Open Source\nInverted input/output\n\n"]
OPENSRC_INV,
#[doc = "Open Source\nNormal input / outut\n\n"]
OPENSRC,
#[doc = "Open Drain\nInverted input / output\n\n"]
OPENDR_INV,
#[doc = "Open Drain, \nNormal input / output\n\n"]
OPENDR,
#[doc = "Inverted input / ouput\n\n"]
INV,
#[doc = "Normal input / output\n\n"]
NORMAL,
}
impl IOMODEW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
IOMODEW::OPENSRC_INV => 7,
IOMODEW::OPENSRC => 6,
IOMODEW::OPENDR_INV => 5,
IOMODEW::OPENDR => 4,
IOMODEW::INV => 1,
IOMODEW::NORMAL => 0,
}
}
}
#[doc = r" Proxy"]
pub struct _IOMODEW<'a> {
w: &'a mut W,
}
impl<'a> _IOMODEW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: IOMODEW) -> &'a mut W {
unsafe { self.bits(variant._bits()) }
}
#[doc = "Open Source Inverted input/output"]
#[inline]
pub fn opensrc_inv(self) -> &'a mut W {
self.variant(IOMODEW::OPENSRC_INV)
}
#[doc = "Open Source Normal input / outut"]
#[inline]
pub fn opensrc(self) -> &'a mut W {
self.variant(IOMODEW::OPENSRC)
}
#[doc = "Open Drain Inverted input / output"]
#[inline]
pub fn opendr_inv(self) -> &'a mut W {
self.variant(IOMODEW::OPENDR_INV)
}
#[doc = "Open Drain, Normal input / output"]
#[inline]
pub fn opendr(self) -> &'a mut W {
self.variant(IOMODEW::OPENDR)
}
#[doc = "Inverted input / ouput"]
#[inline]
pub fn inv(self) -> &'a mut W {
self.variant(IOMODEW::INV)
}
#[doc = "Normal input / output"]
#[inline]
pub fn normal(self) -> &'a mut W {
self.variant(IOMODEW::NORMAL)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 7;
const OFFSET: u8 = 24;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _RESERVED19W<'a> {
w: &'a mut W,
}
impl<'a> _RESERVED19W<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 31;
const OFFSET: u8 = 19;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _EDGE_IRQ_ENW<'a> {
w: &'a mut W,
}
impl<'a> _EDGE_IRQ_ENW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 18;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `EDGE_DET`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EDGE_DETW {
#[doc = "Positive and negative edge detection"]
BOTH,
#[doc = "Positive edge detection"]
POS,
#[doc = "Negative edge detection"]
NEG,
#[doc = "No edge detection"]
NONE,
}
impl EDGE_DETW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
EDGE_DETW::BOTH => 3,
EDGE_DETW::POS => 2,
EDGE_DETW::NEG => 1,
EDGE_DETW::NONE => 0,
}
}
}
#[doc = r" Proxy"]
pub struct _EDGE_DETW<'a> {
w: &'a mut W,
}
impl<'a> _EDGE_DETW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: EDGE_DETW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "Positive and negative edge detection"]
#[inline]
pub fn both(self) -> &'a mut W {
self.variant(EDGE_DETW::BOTH)
}
#[doc = "Positive edge detection"]
#[inline]
pub fn pos(self) -> &'a mut W {
self.variant(EDGE_DETW::POS)
}
#[doc = "Negative edge detection"]
#[inline]
pub fn neg(self) -> &'a mut W {
self.variant(EDGE_DETW::NEG)
}
#[doc = "No edge detection"]
#[inline]
pub fn none(self) -> &'a mut W {
self.variant(EDGE_DETW::NONE)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PULL_CTL`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PULL_CTLW {
#[doc = "No pull"]
DIS,
#[doc = "Pull up"]
UP,
#[doc = "Pull down"]
DWN,
}
impl PULL_CTLW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
PULL_CTLW::DIS => 3,
PULL_CTLW::UP => 2,
PULL_CTLW::DWN => 1,
}
}
}
#[doc = r" Proxy"]
pub struct _PULL_CTLW<'a> {
w: &'a mut W,
}
impl<'a> _PULL_CTLW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PULL_CTLW) -> &'a mut W {
unsafe { self.bits(variant._bits()) }
}
#[doc = "No pull"]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(PULL_CTLW::DIS)
}
#[doc = "Pull up"]
#[inline]
pub fn up(self) -> &'a mut W {
self.variant(PULL_CTLW::UP)
}
#[doc = "Pull down"]
#[inline]
pub fn dwn(self) -> &'a mut W {
self.variant(PULL_CTLW::DWN)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 13;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _SLEW_REDW<'a> {
w: &'a mut W,
}
impl<'a> _SLEW_REDW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 12;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `IOCURR`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum IOCURRW {
#[doc = "Extended-Current (EC) mode: Min 8 mA for double drive strength IOs (min 4 mA for normal IOs) when IOSTR is set to AUTO"]
_4_8MA,
#[doc = "High-Current (HC) mode: Min 4 mA when IOSTR is set to AUTO"]
_4MA,
#[doc = "Low-Current (LC) mode: Min 2 mA when IOSTR is set to AUTO"]
_2MA,
}
impl IOCURRW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
IOCURRW::_4_8MA => 2,
IOCURRW::_4MA => 1,
IOCURRW::_2MA => 0,
}
}
}
#[doc = r" Proxy"]
pub struct _IOCURRW<'a> {
w: &'a mut W,
}
impl<'a> _IOCURRW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: IOCURRW) -> &'a mut W {
unsafe { self.bits(variant._bits()) }
}
#[doc = "Extended-Current (EC) mode: Min 8 mA for double drive strength IOs (min 4 mA for normal IOs) when IOSTR is set to AUTO"]
#[inline]
pub fn _4_8ma(self) -> &'a mut W {
self.variant(IOCURRW::_4_8MA)
}
#[doc = "High-Current (HC) mode: Min 4 mA when IOSTR is set to AUTO"]
#[inline]
pub fn _4ma(self) -> &'a mut W {
self.variant(IOCURRW::_4MA)
}
#[doc = "Low-Current (LC) mode: Min 2 mA when IOSTR is set to AUTO"]
#[inline]
pub fn _2ma(self) -> &'a mut W {
self.variant(IOCURRW::_2MA)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 10;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `IOSTR`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum IOSTRW {
#[doc = "Maximum drive strength, controlled by AON_IOC:IOSTRMAX (min 2 mA @1.8V with default values)"]
MAX,
#[doc = "Medium drive strength, controlled by AON_IOC:IOSTRMED (min 2 mA @2.5V with default values)"]
MED,
#[doc = "Minimum drive strength, controlled by AON_IOC:IOSTRMIN (min 2 mA @3.3V with default values)"]
MIN,
#[doc = "Automatic drive strength, controlled by AON BATMON based on battery voltage. (min 2 mA @VDDS)"]
AUTO,
}
impl IOSTRW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
IOSTRW::MAX => 3,
IOSTRW::MED => 2,
IOSTRW::MIN => 1,
IOSTRW::AUTO => 0,
}
}
}
#[doc = r" Proxy"]
pub struct _IOSTRW<'a> {
w: &'a mut W,
}
impl<'a> _IOSTRW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: IOSTRW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "Maximum drive strength, controlled by AON_IOC:IOSTRMAX (min 2 mA @1.8V with default values)"]
#[inline]
pub fn max(self) -> &'a mut W {
self.variant(IOSTRW::MAX)
}
#[doc = "Medium drive strength, controlled by AON_IOC:IOSTRMED (min 2 mA @2.5V with default values)"]
#[inline]
pub fn med(self) -> &'a mut W {
self.variant(IOSTRW::MED)
}
#[doc = "Minimum drive strength, controlled by AON_IOC:IOSTRMIN (min 2 mA @3.3V with default values)"]
#[inline]
pub fn min(self) -> &'a mut W {
self.variant(IOSTRW::MIN)
}
#[doc = "Automatic drive strength, controlled by AON BATMON based on battery voltage. (min 2 mA @VDDS)"]
#[inline]
pub fn auto(self) -> &'a mut W {
self.variant(IOSTRW::AUTO)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PORT_ID`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PORT_IDW {
#[doc = "RF Core SMI Command Link In"]
RFC_SMI_CL_IN,
#[doc = "RF Core SMI Command Link Out"]
RFC_SMI_CL_OUT,
#[doc = "RF Core SMI Data Link In"]
RFC_SMI_DL_IN,
#[doc = "RF Core SMI Data Link Out"]
RFC_SMI_DL_OUT,
#[doc = "RF Core Data In 1"]
RFC_GPI1,
#[doc = "RF Core Data In 0"]
RFC_GPI0,
#[doc = "RF Core Data Out 3"]
RFC_GPO3,
#[doc = "RF Core Data Out 2"]
RFC_GPO2,
#[doc = "RF Core Data Out 1"]
RFC_GPO1,
#[doc = "RF Core Data Out 0"]
RFC_GPO0,
#[doc = "RF Core Trace"]
RFC_TRC,
#[doc = "I2S MCLK "]
I2S_MCLK,
#[doc = "I2S BCLK "]
I2S_BCLK,
#[doc = "I2S WCLK "]
I2S_WCLK,
#[doc = "I2S Data 1"]
I2S_AD1,
#[doc = "I2S Data 0"]
I2S_AD0,
#[doc = "SSI1 CLK"]
SSI1_CLK,
#[doc = "SSI1 FSS "]
SSI1_FSS,
#[doc = "SSI1 TX "]
SSI1_TX,
#[doc = "SSI1 RX "]
SSI1_RX,
#[doc = "CPU SWV "]
CPU_SWV,
#[doc = "PORT EVENT 7\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT7,
#[doc = "PORT EVENT 6\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT6,
#[doc = "PORT EVENT 5\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT5,
#[doc = "PORT EVENT 4\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT4,
#[doc = "PORT EVENT 3\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT3,
#[doc = "PORT EVENT 2\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT2,
#[doc = "PORT EVENT 1\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT1,
#[doc = "PORT EVENT 0\nCan be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
PORT_EVENT0,
#[doc = "UART0 RTS "]
UART0_RTS,
#[doc = "UART0 CTS "]
UART0_CTS,
#[doc = "UART0 TX "]
UART0_TX,
#[doc = "UART0 RX "]
UART0_RX,
#[doc = "I2C Clock"]
I2C_MSSCL,
#[doc = "I2C Data"]
I2C_MSSDA,
#[doc = "SSI0 CLK"]
SSI0_CLK,
#[doc = "SSI0 FSS "]
SSI0_FSS,
#[doc = "SSI0 TX "]
SSI0_TX,
#[doc = "SSI0 RX "]
SSI0_RX,
#[doc = "AUX IO "]
AUX_IO,
#[doc = "AON 32 KHz clock (SCLK_LF)"]
AON_CLK32K,
#[doc = "General Purpose IO "]
GPIO,
}
impl PORT_IDW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
PORT_IDW::RFC_SMI_CL_IN => 56,
PORT_IDW::RFC_SMI_CL_OUT => 55,
PORT_IDW::RFC_SMI_DL_IN => 54,
PORT_IDW::RFC_SMI_DL_OUT => 53,
PORT_IDW::RFC_GPI1 => 52,
PORT_IDW::RFC_GPI0 => 51,
PORT_IDW::RFC_GPO3 => 50,
PORT_IDW::RFC_GPO2 => 49,
PORT_IDW::RFC_GPO1 => 48,
PORT_IDW::RFC_GPO0 => 47,
PORT_IDW::RFC_TRC => 46,
PORT_IDW::I2S_MCLK => 41,
PORT_IDW::I2S_BCLK => 40,
PORT_IDW::I2S_WCLK => 39,
PORT_IDW::I2S_AD1 => 38,
PORT_IDW::I2S_AD0 => 37,
PORT_IDW::SSI1_CLK => 36,
PORT_IDW::SSI1_FSS => 35,
PORT_IDW::SSI1_TX => 34,
PORT_IDW::SSI1_RX => 33,
PORT_IDW::CPU_SWV => 32,
PORT_IDW::PORT_EVENT7 => 30,
PORT_IDW::PORT_EVENT6 => 29,
PORT_IDW::PORT_EVENT5 => 28,
PORT_IDW::PORT_EVENT4 => 27,
PORT_IDW::PORT_EVENT3 => 26,
PORT_IDW::PORT_EVENT2 => 25,
PORT_IDW::PORT_EVENT1 => 24,
PORT_IDW::PORT_EVENT0 => 23,
PORT_IDW::UART0_RTS => 18,
PORT_IDW::UART0_CTS => 17,
PORT_IDW::UART0_TX => 16,
PORT_IDW::UART0_RX => 15,
PORT_IDW::I2C_MSSCL => 14,
PORT_IDW::I2C_MSSDA => 13,
PORT_IDW::SSI0_CLK => 12,
PORT_IDW::SSI0_FSS => 11,
PORT_IDW::SSI0_TX => 10,
PORT_IDW::SSI0_RX => 9,
PORT_IDW::AUX_IO => 8,
PORT_IDW::AON_CLK32K => 7,
PORT_IDW::GPIO => 0,
}
}
}
#[doc = r" Proxy"]
pub struct _PORT_IDW<'a> {
w: &'a mut W,
}
impl<'a> _PORT_IDW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PORT_IDW) -> &'a mut W {
unsafe { self.bits(variant._bits()) }
}
#[doc = "RF Core SMI Command Link In"]
#[inline]
pub fn rfc_smi_cl_in(self) -> &'a mut W {
self.variant(PORT_IDW::RFC_SMI_CL_IN)
}
#[doc = "RF Core SMI Command Link Out"]
#[inline]
pub fn rfc_smi_cl_out(self) -> &'a mut W {
self.variant(PORT_IDW::RFC_SMI_CL_OUT)
}
#[doc = "RF Core SMI Data Link In"]
#[inline]
pub fn rfc_smi_dl_in(self) -> &'a mut W {
self.variant(PORT_IDW::RFC_SMI_DL_IN)
}
#[doc = "RF Core SMI Data Link Out"]
#[inline]
pub fn rfc_smi_dl_out(self) -> &'a mut W {
self.variant(PORT_IDW::RFC_SMI_DL_OUT)
}
#[doc = "RF Core Data In 1"]
#[inline]
pub fn rfc_gpi1(self) -> &'a mut W {
self.variant(PORT_IDW::RFC_GPI1)
}
#[doc = "RF Core Data In 0"]
#[inline]
pub fn rfc_gpi0(self) -> &'a mut W {
self.variant(PORT_IDW::RFC_GPI0)
}
#[doc = "RF Core Data Out 3"]
#[inline]
pub fn rfc_gpo3(self) -> &'a mut W {
self.variant(PORT_IDW::RFC_GPO3)
}
#[doc = "RF Core Data Out 2"]
#[inline]
pub fn rfc_gpo2(self) -> &'a mut W {
self.variant(PORT_IDW::RFC_GPO2)
}
#[doc = "RF Core Data Out 1"]
#[inline]
pub fn rfc_gpo1(self) -> &'a mut W {
self.variant(PORT_IDW::RFC_GPO1)
}
#[doc = "RF Core Data Out 0"]
#[inline]
pub fn rfc_gpo0(self) -> &'a mut W {
self.variant(PORT_IDW::RFC_GPO0)
}
#[doc = "RF Core Trace"]
#[inline]
pub fn rfc_trc(self) -> &'a mut W {
self.variant(PORT_IDW::RFC_TRC)
}
#[doc = "I2S MCLK"]
#[inline]
pub fn i2s_mclk(self) -> &'a mut W {
self.variant(PORT_IDW::I2S_MCLK)
}
#[doc = "I2S BCLK"]
#[inline]
pub fn i2s_bclk(self) -> &'a mut W {
self.variant(PORT_IDW::I2S_BCLK)
}
#[doc = "I2S WCLK"]
#[inline]
pub fn i2s_wclk(self) -> &'a mut W {
self.variant(PORT_IDW::I2S_WCLK)
}
#[doc = "I2S Data 1"]
#[inline]
pub fn i2s_ad1(self) -> &'a mut W {
self.variant(PORT_IDW::I2S_AD1)
}
#[doc = "I2S Data 0"]
#[inline]
pub fn i2s_ad0(self) -> &'a mut W {
self.variant(PORT_IDW::I2S_AD0)
}
#[doc = "SSI1 CLK"]
#[inline]
pub fn ssi1_clk(self) -> &'a mut W {
self.variant(PORT_IDW::SSI1_CLK)
}
#[doc = "SSI1 FSS"]
#[inline]
pub fn ssi1_fss(self) -> &'a mut W {
self.variant(PORT_IDW::SSI1_FSS)
}
#[doc = "SSI1 TX"]
#[inline]
pub fn ssi1_tx(self) -> &'a mut W {
self.variant(PORT_IDW::SSI1_TX)
}
#[doc = "SSI1 RX"]
#[inline]
pub fn ssi1_rx(self) -> &'a mut W {
self.variant(PORT_IDW::SSI1_RX)
}
#[doc = "CPU SWV"]
#[inline]
pub fn cpu_swv(self) -> &'a mut W {
self.variant(PORT_IDW::CPU_SWV)
}
#[doc = "PORT EVENT 7 Can be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
#[inline]
pub fn port_event7(self) -> &'a mut W {
self.variant(PORT_IDW::PORT_EVENT7)
}
#[doc = "PORT EVENT 6 Can be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
#[inline]
pub fn port_event6(self) -> &'a mut W {
self.variant(PORT_IDW::PORT_EVENT6)
}
#[doc = "PORT EVENT 5 Can be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
#[inline]
pub fn port_event5(self) -> &'a mut W {
self.variant(PORT_IDW::PORT_EVENT5)
}
#[doc = "PORT EVENT 4 Can be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
#[inline]
pub fn port_event4(self) -> &'a mut W {
self.variant(PORT_IDW::PORT_EVENT4)
}
#[doc = "PORT EVENT 3 Can be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
#[inline]
pub fn port_event3(self) -> &'a mut W {
self.variant(PORT_IDW::PORT_EVENT3)
}
#[doc = "PORT EVENT 2 Can be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
#[inline]
pub fn port_event2(self) -> &'a mut W {
self.variant(PORT_IDW::PORT_EVENT2)
}
#[doc = "PORT EVENT 1 Can be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
#[inline]
pub fn port_event1(self) -> &'a mut W {
self.variant(PORT_IDW::PORT_EVENT1)
}
#[doc = "PORT EVENT 0 Can be used as a general purpose IO event by selecting it via registers in the EVENT module, e.g. EVENT:GPT0ACAPTSEL.EV, EVENT:UDMACH14BSEL.EV, etc"]
#[inline]
pub fn port_event0(self) -> &'a mut W {
self.variant(PORT_IDW::PORT_EVENT0)
}
#[doc = "UART0 RTS"]
#[inline]
pub fn uart0_rts(self) -> &'a mut W {
self.variant(PORT_IDW::UART0_RTS)
}
#[doc = "UART0 CTS"]
#[inline]
pub fn uart0_cts(self) -> &'a mut W {
self.variant(PORT_IDW::UART0_CTS)
}
#[doc = "UART0 TX"]
#[inline]
pub fn uart0_tx(self) -> &'a mut W {
self.variant(PORT_IDW::UART0_TX)
}
#[doc = "UART0 RX"]
#[inline]
pub fn uart0_rx(self) -> &'a mut W {
self.variant(PORT_IDW::UART0_RX)
}
#[doc = "I2C Clock"]
#[inline]
pub fn i2c_msscl(self) -> &'a mut W {
self.variant(PORT_IDW::I2C_MSSCL)
}
#[doc = "I2C Data"]
#[inline]
pub fn i2c_mssda(self) -> &'a mut W {
self.variant(PORT_IDW::I2C_MSSDA)
}
#[doc = "SSI0 CLK"]
#[inline]
pub fn ssi0_clk(self) -> &'a mut W {
self.variant(PORT_IDW::SSI0_CLK)
}
#[doc = "SSI0 FSS"]
#[inline]
pub fn ssi0_fss(self) -> &'a mut W {
self.variant(PORT_IDW::SSI0_FSS)
}
#[doc = "SSI0 TX"]
#[inline]
pub fn ssi0_tx(self) -> &'a mut W {
self.variant(PORT_IDW::SSI0_TX)
}
#[doc = "SSI0 RX"]
#[inline]
pub fn ssi0_rx(self) -> &'a mut W {
self.variant(PORT_IDW::SSI0_RX)
}
#[doc = "AUX IO"]
#[inline]
pub fn aux_io(self) -> &'a mut W {
self.variant(PORT_IDW::AUX_IO)
}
#[doc = "AON 32 KHz clock (SCLK_LF)"]
#[inline]
pub fn aon_clk32k(self) -> &'a mut W {
self.variant(PORT_IDW::AON_CLK32K)
}
#[doc = "General Purpose IO"]
#[inline]
pub fn gpio(self) -> &'a mut W {
self.variant(PORT_IDW::GPIO)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 63;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 31 - Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."]
#[inline]
pub fn reserved31(&self) -> RESERVED31R {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 31;
((self.bits >> OFFSET) & MASK as u32) != 0
};
RESERVED31R { bits }
}
#[doc = "Bit 30 - 0: Input hysteresis disable 1: Input hysteresis enable"]
#[inline]
pub fn hyst_en(&self) -> HYST_ENR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 30;
((self.bits >> OFFSET) & MASK as u32) != 0
};
HYST_ENR { bits }
}
#[doc = "Bit 29 - 0: Input disabled 1: Input enabled Note: If IO is configured for AUX ie. PORT_ID = 0x08, the enable will be ignored."]
#[inline]
pub fn ie(&self) -> IER {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 29;
((self.bits >> OFFSET) & MASK as u32) != 0
};
IER { bits }
}
#[doc = "Bits 27:28 - If DIO is configured GPIO or non-AON peripheral signals, i.e. PORT_ID 0x00 or >0x08: 00: No wake-up 01: No wake-up 10: Wakes up from shutdown if this pad is going low. 11: Wakes up from shutdown if this pad is going high. If IO is configured for AON peripheral signals or AUX ie. PORT_ID 0x01-0x08, this register only sets wakeup enable or not. 00, 01: Wakeup disabled 10, 11: Wakeup enabled Polarity is controlled from AON registers. Note:When the MSB is set, the IOC will deactivate the output enable for the DIO."]
#[inline]
pub fn wu_cfg(&self) -> WU_CFGR {
let bits = {
const MASK: u8 = 3;
const OFFSET: u8 = 27;
((self.bits >> OFFSET) & MASK as u32) as u8
};
WU_CFGR { bits }
}
#[doc = "Bits 24:26 - IO Mode N/A for IO configured for AON periph. signals and AUX ie. PORT_ID 0x01-0x08 AUX has its own open_source/drain configuration. 0x2: Reserved. Undefined behavior. 0x3: Reserved. Undefined behavior."]
#[inline]
pub fn iomode(&self) -> IOMODER {
IOMODER::_from({
const MASK: u8 = 7;
const OFFSET: u8 = 24;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bits 19:23 - Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."]
#[inline]
pub fn reserved19(&self) -> RESERVED19R {
let bits = {
const MASK: u8 = 31;
const OFFSET: u8 = 19;
((self.bits >> OFFSET) & MASK as u32) as u8
};
RESERVED19R { bits }
}
#[doc = "Bit 18 - 0: No interrupt generation 1: Enable interrupt generation for this IO (Only effective if EDGE_DET is enabled)"]
#[inline]
pub fn edge_irq_en(&self) -> EDGE_IRQ_ENR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 18;
((self.bits >> OFFSET) & MASK as u32) != 0
};
EDGE_IRQ_ENR { bits }
}
#[doc = "Bits 16:17 - Enable generation of edge detection events on this IO"]
#[inline]
pub fn edge_det(&self) -> EDGE_DETR {
EDGE_DETR::_from({
const MASK: u8 = 3;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bit 15 - Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."]
#[inline]
pub fn reserved15(&self) -> RESERVED15R {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 15;
((self.bits >> OFFSET) & MASK as u32) != 0
};
RESERVED15R { bits }
}
#[doc = "Bits 13:14 - Pull control"]
#[inline]
pub fn pull_ctl(&self) -> PULL_CTLR {
PULL_CTLR::_from({
const MASK: u8 = 3;
const OFFSET: u8 = 13;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bit 12 - 0: Normal slew rate 1: Enables reduced slew rate in output driver."]
#[inline]
pub fn slew_red(&self) -> SLEW_REDR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 12;
((self.bits >> OFFSET) & MASK as u32) != 0
};
SLEW_REDR { bits }
}
#[doc = "Bits 10:11 - Selects IO current mode of this IO."]
#[inline]
pub fn iocurr(&self) -> IOCURRR {
IOCURRR::_from({
const MASK: u8 = 3;
const OFFSET: u8 = 10;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bits 8:9 - Select source for drive strength control of this IO. This setting controls the drive strength of the Low-Current (LC) mode. Higher drive strength can be selected in IOCURR"]
#[inline]
pub fn iostr(&self) -> IOSTRR {
IOSTRR::_from({
const MASK: u8 = 3;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bits 6:7 - Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."]
#[inline]
pub fn reserved6(&self) -> RESERVED6R {
let bits = {
const MASK: u8 = 3;
const OFFSET: u8 = 6;
((self.bits >> OFFSET) & MASK as u32) as u8
};
RESERVED6R { bits }
}
#[doc = "Bits 0:5 - Selects usage for DIO0"]
#[inline]
pub fn port_id(&self) -> PORT_IDR {
PORT_IDR::_from({
const MASK: u8 = 63;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 30 - 0: Input hysteresis disable 1: Input hysteresis enable"]
#[inline]
pub fn hyst_en(&mut self) -> _HYST_ENW {
_HYST_ENW { w: self }
}
#[doc = "Bit 29 - 0: Input disabled 1: Input enabled Note: If IO is configured for AUX ie. PORT_ID = 0x08, the enable will be ignored."]
#[inline]
pub fn ie(&mut self) -> _IEW {
_IEW { w: self }
}
#[doc = "Bits 27:28 - If DIO is configured GPIO or non-AON peripheral signals, i.e. PORT_ID 0x00 or >0x08: 00: No wake-up 01: No wake-up 10: Wakes up from shutdown if this pad is going low. 11: Wakes up from shutdown if this pad is going high. If IO is configured for AON peripheral signals or AUX ie. PORT_ID 0x01-0x08, this register only sets wakeup enable or not. 00, 01: Wakeup disabled 10, 11: Wakeup enabled Polarity is controlled from AON registers. Note:When the MSB is set, the IOC will deactivate the output enable for the DIO."]
#[inline]
pub fn wu_cfg(&mut self) -> _WU_CFGW {
_WU_CFGW { w: self }
}
#[doc = "Bits 24:26 - IO Mode N/A for IO configured for AON periph. signals and AUX ie. PORT_ID 0x01-0x08 AUX has its own open_source/drain configuration. 0x2: Reserved. Undefined behavior. 0x3: Reserved. Undefined behavior."]
#[inline]
pub fn iomode(&mut self) -> _IOMODEW {
_IOMODEW { w: self }
}
#[doc = "Bits 19:23 - Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."]
#[inline]
pub fn reserved19(&mut self) -> _RESERVED19W {
_RESERVED19W { w: self }
}
#[doc = "Bit 18 - 0: No interrupt generation 1: Enable interrupt generation for this IO (Only effective if EDGE_DET is enabled)"]
#[inline]
pub fn edge_irq_en(&mut self) -> _EDGE_IRQ_ENW {
_EDGE_IRQ_ENW { w: self }
}
#[doc = "Bits 16:17 - Enable generation of edge detection events on this IO"]
#[inline]
pub fn edge_det(&mut self) -> _EDGE_DETW {
_EDGE_DETW { w: self }
}
#[doc = "Bits 13:14 - Pull control"]
#[inline]
pub fn pull_ctl(&mut self) -> _PULL_CTLW {
_PULL_CTLW { w: self }
}
#[doc = "Bit 12 - 0: Normal slew rate 1: Enables reduced slew rate in output driver."]
#[inline]
pub fn slew_red(&mut self) -> _SLEW_REDW {
_SLEW_REDW { w: self }
}
#[doc = "Bits 10:11 - Selects IO current mode of this IO."]
#[inline]
pub fn iocurr(&mut self) -> _IOCURRW {
_IOCURRW { w: self }
}
#[doc = "Bits 8:9 - Select source for drive strength control of this IO. This setting controls the drive strength of the Low-Current (LC) mode. Higher drive strength can be selected in IOCURR"]
#[inline]
pub fn iostr(&mut self) -> _IOSTRW {
_IOSTRW { w: self }
}
#[doc = "Bits 0:5 - Selects usage for DIO0"]
#[inline]
pub fn port_id(&mut self) -> _PORT_IDW {
_PORT_IDW { w: self }
}
}
| 31.21436 | 543 | 0.549635 |
28acb97f8e54324b84cc6571111ccdb645683c34
| 1,395 |
use clap::{App, Arg};
#[test]
fn two_conflicting_arguments() {
let a = App::new("two_conflicting_arguments")
.arg(
Arg::with_name("develop")
.long("develop")
.conflicts_with("production"),
)
.arg(
Arg::with_name("production")
.long("production")
.conflicts_with("develop"),
)
.try_get_matches_from(vec!["", "--develop", "--production"]);
assert!(a.is_err());
let a = a.unwrap_err();
assert_eq!(
a.cause,
"The argument \'--develop\' cannot be used with \'--production\'"
);
}
#[test]
fn three_conflicting_arguments() {
let a = App::new("two_conflicting_arguments")
.arg(
Arg::with_name("one")
.long("one")
.conflicts_with_all(&["two", "three"]),
)
.arg(
Arg::with_name("two")
.long("two")
.conflicts_with_all(&["one", "three"]),
)
.arg(
Arg::with_name("three")
.long("three")
.conflicts_with_all(&["one", "two"]),
)
.try_get_matches_from(vec!["", "--one", "--two", "--three"]);
assert!(a.is_err());
let a = a.unwrap_err();
assert_eq!(
a.cause,
"The argument \'--one\' cannot be used with \'--two\'"
);
}
| 26.320755 | 73 | 0.470251 |
4a6c0aea1e5b7cb238c04702df3b7f8610b3590c
| 622 |
use super::base::state::*;
use components::module::_common::play::prelude::*;
use shared::domain::{
jig::JigId,
module::{
body::cover::{ModuleData as RawData, Step},
ModuleId,
},
};
use std::rc::Rc;
pub type AppState = GenericState<RawData, (), Step, Base>;
pub fn create_state(jig_id: JigId, module_id: ModuleId) -> Rc<AppState> {
crate::debug::init(jig_id, module_id);
let mut opts = StateOpts::new(jig_id, module_id);
opts.force_raw = crate::debug::settings().data.clone();
opts.skip_load_jig = crate::debug::settings().skip_load_jig;
AppState::new(opts, Base::new)
}
| 28.272727 | 73 | 0.652733 |
3970376a4f0a2731d86338df93b3545bed60ff6c
| 9,222 |
/*
* Onshape REST API
*
* The Onshape REST API consumed by all clients.
*
* The version of the OpenAPI document: 1.104
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BtDocumentInfo {
#[serde(rename = "treeHref", skip_serializing_if = "Option::is_none")]
pub tree_href: Option<String>,
#[serde(rename = "isMutable", skip_serializing_if = "Option::is_none")]
pub is_mutable: Option<bool>,
#[serde(rename = "resourceType", skip_serializing_if = "Option::is_none")]
pub resource_type: Option<String>,
#[serde(rename = "description", skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "modifiedAt", skip_serializing_if = "Option::is_none")]
pub modified_at: Option<String>,
#[serde(rename = "createdAt", skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "createdBy", skip_serializing_if = "Option::is_none")]
pub created_by: Option<crate::models::BtUserBasicSummaryInfo>,
#[serde(rename = "modifiedBy", skip_serializing_if = "Option::is_none")]
pub modified_by: Option<crate::models::BtUserBasicSummaryInfo>,
#[serde(rename = "projectId", skip_serializing_if = "Option::is_none")]
pub project_id: Option<String>,
#[serde(rename = "canMove", skip_serializing_if = "Option::is_none")]
pub can_move: Option<bool>,
#[serde(rename = "isContainer", skip_serializing_if = "Option::is_none")]
pub is_container: Option<bool>,
#[serde(rename = "isEnterpriseOwned", skip_serializing_if = "Option::is_none")]
pub is_enterprise_owned: Option<bool>,
#[serde(rename = "hasPendingOwner", skip_serializing_if = "Option::is_none")]
pub has_pending_owner: Option<bool>,
#[serde(rename = "owner", skip_serializing_if = "Option::is_none")]
pub owner: Option<crate::models::BtOwnerInfo>,
#[serde(rename = "href", skip_serializing_if = "Option::is_none")]
pub href: Option<String>,
#[serde(rename = "viewRef", skip_serializing_if = "Option::is_none")]
pub view_ref: Option<String>,
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "id", skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "defaultElementId", skip_serializing_if = "Option::is_none")]
pub default_element_id: Option<String>,
#[serde(rename = "defaultWorkspace", skip_serializing_if = "Option::is_none")]
pub default_workspace: Option<crate::models::BtBaseInfo>,
#[serde(rename = "parentId", skip_serializing_if = "Option::is_none")]
pub parent_id: Option<String>,
#[serde(rename = "permissionSet", skip_serializing_if = "Option::is_none")]
pub permission_set: Option<serde_json::Value>,
#[serde(rename = "trash", skip_serializing_if = "Option::is_none")]
pub trash: Option<bool>,
#[serde(rename = "totalWorkspacesUpdating", skip_serializing_if = "Option::is_none")]
pub total_workspaces_updating: Option<i32>,
#[serde(rename = "totalWorkspacesScheduledForUpdate", skip_serializing_if = "Option::is_none")]
pub total_workspaces_scheduled_for_update: Option<i32>,
#[serde(rename = "canUnshare", skip_serializing_if = "Option::is_none")]
pub can_unshare: Option<bool>,
#[serde(rename = "thumbnail", skip_serializing_if = "Option::is_none")]
pub thumbnail: Option<crate::models::BtThumbnailInfo>,
#[serde(rename = "supportTeamUserAndShared", skip_serializing_if = "Option::is_none")]
pub support_team_user_and_shared: Option<bool>,
#[serde(rename = "likedByCurrentUser", skip_serializing_if = "Option::is_none")]
pub liked_by_current_user: Option<bool>,
#[serde(rename = "documentLabels", skip_serializing_if = "Option::is_none")]
pub document_labels: Option<Vec<crate::models::BtDocumentLabelInfo>>,
#[serde(rename = "numberOfTimesReferenced", skip_serializing_if = "Option::is_none")]
pub number_of_times_referenced: Option<i64>,
#[serde(rename = "numberOfTimesCopied", skip_serializing_if = "Option::is_none")]
pub number_of_times_copied: Option<i64>,
#[serde(rename = "likes", skip_serializing_if = "Option::is_none")]
pub likes: Option<i64>,
#[serde(rename = "recentVersion", skip_serializing_if = "Option::is_none")]
pub recent_version: Option<crate::models::BtBaseInfo>,
#[serde(rename = "hasRelevantInsertables", skip_serializing_if = "Option::is_none")]
pub has_relevant_insertables: Option<bool>,
#[serde(rename = "createdWithEducationPlan", skip_serializing_if = "Option::is_none")]
pub created_with_education_plan: Option<bool>,
#[serde(rename = "notRevisionManaged", skip_serializing_if = "Option::is_none")]
pub not_revision_managed: Option<bool>,
#[serde(rename = "anonymousAccessAllowed", skip_serializing_if = "Option::is_none")]
pub anonymous_access_allowed: Option<bool>,
#[serde(rename = "anonymousAllowsExport", skip_serializing_if = "Option::is_none")]
pub anonymous_allows_export: Option<bool>,
#[serde(rename = "tags", skip_serializing_if = "Option::is_none")]
pub tags: Option<Vec<String>>,
#[serde(rename = "trashedAt", skip_serializing_if = "Option::is_none")]
pub trashed_at: Option<String>,
#[serde(rename = "isOrphaned", skip_serializing_if = "Option::is_none")]
pub is_orphaned: Option<bool>,
#[serde(rename = "public", skip_serializing_if = "Option::is_none")]
pub public: Option<bool>,
#[serde(rename = "userAccountLimitsBreached", skip_serializing_if = "Option::is_none")]
pub user_account_limits_breached: Option<bool>,
#[serde(rename = "isUsingManagedWorkflow", skip_serializing_if = "Option::is_none")]
pub is_using_managed_workflow: Option<bool>,
#[serde(rename = "permission", skip_serializing_if = "Option::is_none")]
pub permission: Option<Permission>,
#[serde(rename = "hasReleaseRevisionableObjects", skip_serializing_if = "Option::is_none")]
pub has_release_revisionable_objects: Option<bool>,
#[serde(rename = "documentThumbnailElementId", skip_serializing_if = "Option::is_none")]
pub document_thumbnail_element_id: Option<String>,
#[serde(rename = "duplicateNameViolationError", skip_serializing_if = "Option::is_none")]
pub duplicate_name_violation_error: Option<String>,
#[serde(rename = "betaCapabilityIds", skip_serializing_if = "Option::is_none")]
pub beta_capability_ids: Option<Vec<String>>,
#[serde(rename = "isUpgradedToLatestVersion", skip_serializing_if = "Option::is_none")]
pub is_upgraded_to_latest_version: Option<bool>,
}
impl BtDocumentInfo {
pub fn new(json_type: String) -> BtDocumentInfo {
BtDocumentInfo {
tree_href: None,
is_mutable: None,
resource_type: None,
description: None,
modified_at: None,
created_at: None,
created_by: None,
modified_by: None,
project_id: None,
can_move: None,
is_container: None,
is_enterprise_owned: None,
has_pending_owner: None,
owner: None,
href: None,
view_ref: None,
name: None,
id: None,
default_element_id: None,
default_workspace: None,
parent_id: None,
permission_set: None,
trash: None,
total_workspaces_updating: None,
total_workspaces_scheduled_for_update: None,
can_unshare: None,
thumbnail: None,
support_team_user_and_shared: None,
liked_by_current_user: None,
document_labels: None,
number_of_times_referenced: None,
number_of_times_copied: None,
likes: None,
recent_version: None,
has_relevant_insertables: None,
created_with_education_plan: None,
not_revision_managed: None,
anonymous_access_allowed: None,
anonymous_allows_export: None,
tags: None,
trashed_at: None,
is_orphaned: None,
public: None,
user_account_limits_breached: None,
is_using_managed_workflow: None,
permission: None,
has_release_revisionable_objects: None,
document_thumbnail_element_id: None,
duplicate_name_violation_error: None,
beta_capability_ids: None,
is_upgraded_to_latest_version: None,
}
}
}
///
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Permission {
#[serde(rename = "NOACCESS")]
NOACCESS,
#[serde(rename = "ANONYMOUS_ACCESS")]
ANONYMOUSACCESS,
#[serde(rename = "READ")]
READ,
#[serde(rename = "READ_COPY_EXPORT")]
READCOPYEXPORT,
#[serde(rename = "COMMENT")]
COMMENT,
#[serde(rename = "WRITE")]
WRITE,
#[serde(rename = "RESHARE")]
RESHARE,
#[serde(rename = "FULL")]
FULL,
#[serde(rename = "OWNER")]
OWNER,
}
| 45.880597 | 99 | 0.674149 |
48874acc97d0e4af950fb5335a6f9093ea59746f
| 1,365 |
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
use std::env;
use containerd_shim::{ttrpc::context::Context, RemotePublisher};
use containerd_shim_client::events::task::TaskOOM;
fn main() {
let args: Vec<String> = env::args().collect();
// Must not start with unix://
let address = args
.get(1)
.ok_or("First argument must be containerd's TTRPC address to publish events")
.unwrap();
println!("Connecting: {}", &address);
let publisher = RemotePublisher::new(address).expect("Connect failed");
let mut event = TaskOOM::new();
event.set_container_id("123".into());
let ctx = Context::default();
println!("Sending event");
publisher
.publish(ctx, "/tasks/oom", "default", event)
.expect("Publish failed");
println!("Done");
}
| 29.042553 | 85 | 0.679121 |
e5a885897fbf4d60a6823c8761bbd9e204ff32ae
| 23,695 |
//! Vault management instructions.
use {
crate::pack::check_data_len,
arrayref::{array_mut_ref, array_ref, array_refs, mut_array_refs},
num_enum::TryFromPrimitive,
solana_program::program_error::ProgramError,
};
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum VaultInstruction {
/// Initialize on-chain records for a new user
/// # Account references are strategy specific,
/// see particular Vault instructions handlers for more info
UserInit,
/// Add liquidity to the Vault
/// # Account references are strategy specific,
/// see particular Vault instructions handlers for more info
AddLiquidity {
max_token_a_amount: u64,
max_token_b_amount: u64,
},
/// Lock liquidity in the Vault
/// # Account references are strategy specific,
/// see particular Vault instructions handlers for more info
LockLiquidity { amount: u64 },
/// Unlock liquidity in the Vault
/// # Account references are strategy specific,
/// see particular Vault instructions handlers for more info
UnlockLiquidity { amount: u64 },
/// Remove liquidity from the Vault
/// # Account references are protocol specific,
/// see particular Router instructions handlers for more info
RemoveLiquidity { amount: u64 },
/// Set minimum crank interval for the Vault
/// # Account references are protocol specific,
/// see particular Router instructions handlers for more info
SetMinCrankInterval { min_crank_interval: u32 },
/// Set fee for the Vault
/// # Account references are protocol specific,
/// see particular Router instructions handlers for more info
SetFee { fee: f32 },
/// Set underlying protocol fee for the Vault
/// # Account references are protocol specific,
/// see particular Router instructions handlers for more info
SetExternalFee { external_fee: f32 },
/// Disable new deposits to the Vault
/// # Account references are protocol specific,
/// see particular Router instructions handlers for more info
DisableDeposit,
/// Allow new deposits to the Vault
/// # Account references are protocol specific,
/// see particular Router instructions handlers for more info
EnableDeposit,
/// Disable withdrawals from the Vault
/// # Account references are protocol specific,
/// see particular Router instructions handlers for more info
DisableWithdrawal,
/// Allow withdrawals from the Vault
/// # Account references are protocol specific,
/// see particular Router instructions handlers for more info
EnableWithdrawal,
/// Run crank operation on the Vault
/// # Account references are protocol specific,
/// see particular Router instructions handlers for more info
Crank { step: u64 },
/// Initialize the Vault
/// # Account references are protocol specific,
/// see particular Router instructions handlers for more info
Init { step: u64 },
/// Shutdown the Vault
/// # Account references are protocol specific,
/// see particular Router instructions handlers for more info
Shutdown,
/// Withdraw collected fees
/// # Account references are protocol specific,
/// see particular Router instructions handlers for more info
WithdrawFees { amount: u64 },
}
#[repr(u8)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, TryFromPrimitive)]
pub enum VaultInstructionType {
UserInit,
AddLiquidity,
LockLiquidity,
UnlockLiquidity,
RemoveLiquidity,
SetMinCrankInterval,
SetFee,
SetExternalFee,
DisableDeposit,
EnableDeposit,
DisableWithdrawal,
EnableWithdrawal,
Crank,
Init,
Shutdown,
WithdrawFees,
}
impl VaultInstruction {
pub const MAX_LEN: usize = 17;
pub const USER_INIT_LEN: usize = 1;
pub const ADD_LIQUIDITY_LEN: usize = 17;
pub const LOCK_LIQUIDITY_LEN: usize = 9;
pub const UNLOCK_LIQUIDITY_LEN: usize = 9;
pub const REMOVE_LIQUIDITY_LEN: usize = 9;
pub const SET_MIN_CRANK_INTERVAL_LEN: usize = 5;
pub const SET_FEE_LEN: usize = 5;
pub const SET_EXTERNAL_FEE_LEN: usize = 5;
pub const DISABLE_DEPOSIT_LEN: usize = 1;
pub const ENABLE_DEPOSIT_LEN: usize = 1;
pub const DISABLE_WITHDRAWAL_LEN: usize = 1;
pub const ENABLE_WITHDRAWAL_LEN: usize = 1;
pub const CRANK_LEN: usize = 9;
pub const INIT_LEN: usize = 9;
pub const SHUTDOWN_LEN: usize = 1;
pub const WITHDRAW_FEES_LEN: usize = 9;
pub fn pack(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
match self {
Self::UserInit { .. } => self.pack_user_init(output),
Self::AddLiquidity { .. } => self.pack_add_liquidity(output),
Self::RemoveLiquidity { .. } => self.pack_remove_liquidity(output),
Self::LockLiquidity { .. } => self.pack_lock_liquidity(output),
Self::UnlockLiquidity { .. } => self.pack_unlock_liquidity(output),
Self::SetMinCrankInterval { .. } => self.pack_set_min_crank_interval(output),
Self::SetFee { .. } => self.pack_set_fee(output),
Self::SetExternalFee { .. } => self.pack_set_external_fee(output),
Self::DisableDeposit { .. } => self.pack_disable_deposit(output),
Self::EnableDeposit { .. } => self.pack_enable_deposit(output),
Self::DisableWithdrawal { .. } => self.pack_disable_withdrawal(output),
Self::EnableWithdrawal { .. } => self.pack_enable_withdrawal(output),
Self::Crank { .. } => self.pack_crank(output),
Self::Init { .. } => self.pack_init(output),
Self::Shutdown { .. } => self.pack_shutdown(output),
Self::WithdrawFees { .. } => self.pack_withdraw_fees(output),
}
}
pub fn to_vec(&self) -> Result<Vec<u8>, ProgramError> {
let mut output: [u8; VaultInstruction::MAX_LEN] = [0; VaultInstruction::MAX_LEN];
if let Ok(len) = self.pack(&mut output[..]) {
Ok(output[..len].to_vec())
} else {
Err(ProgramError::InvalidInstructionData)
}
}
pub fn unpack(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, 1)?;
let instruction_type = VaultInstructionType::try_from_primitive(input[0])
.or(Err(ProgramError::InvalidInstructionData))?;
match instruction_type {
VaultInstructionType::UserInit => VaultInstruction::unpack_user_init(input),
VaultInstructionType::AddLiquidity => VaultInstruction::unpack_add_liquidity(input),
VaultInstructionType::LockLiquidity => VaultInstruction::unpack_lock_liquidity(input),
VaultInstructionType::UnlockLiquidity => {
VaultInstruction::unpack_unlock_liquidity(input)
}
VaultInstructionType::RemoveLiquidity => {
VaultInstruction::unpack_remove_liquidity(input)
}
VaultInstructionType::SetMinCrankInterval => {
VaultInstruction::unpack_set_min_crank_interval(input)
}
VaultInstructionType::SetFee => VaultInstruction::unpack_set_fee(input),
VaultInstructionType::SetExternalFee => {
VaultInstruction::unpack_set_external_fee(input)
}
VaultInstructionType::DisableDeposit => VaultInstruction::unpack_disable_deposit(input),
VaultInstructionType::EnableDeposit => VaultInstruction::unpack_enable_deposit(input),
VaultInstructionType::DisableWithdrawal => {
VaultInstruction::unpack_disable_withdrawal(input)
}
VaultInstructionType::EnableWithdrawal => {
VaultInstruction::unpack_enable_withdrawal(input)
}
VaultInstructionType::Crank => VaultInstruction::unpack_crank(input),
VaultInstructionType::Init => VaultInstruction::unpack_init(input),
VaultInstructionType::Shutdown => VaultInstruction::unpack_shutdown(input),
VaultInstructionType::WithdrawFees => VaultInstruction::unpack_withdraw_fees(input),
}
}
fn pack_user_init(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::USER_INIT_LEN)?;
if let VaultInstruction::UserInit = self {
let instruction_type_out = array_mut_ref![output, 0, 1];
instruction_type_out[0] = VaultInstructionType::UserInit as u8;
Ok(VaultInstruction::USER_INIT_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_add_liquidity(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::ADD_LIQUIDITY_LEN)?;
if let VaultInstruction::AddLiquidity {
max_token_a_amount,
max_token_b_amount,
} = self
{
let output = array_mut_ref![output, 0, VaultInstruction::ADD_LIQUIDITY_LEN];
let (instruction_type_out, max_token_a_amount_out, max_token_b_amount_out) =
mut_array_refs![output, 1, 8, 8];
instruction_type_out[0] = VaultInstructionType::AddLiquidity as u8;
*max_token_a_amount_out = max_token_a_amount.to_le_bytes();
*max_token_b_amount_out = max_token_b_amount.to_le_bytes();
Ok(VaultInstruction::ADD_LIQUIDITY_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_lock_liquidity(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::LOCK_LIQUIDITY_LEN)?;
if let VaultInstruction::LockLiquidity { amount } = self {
let output = array_mut_ref![output, 0, VaultInstruction::LOCK_LIQUIDITY_LEN];
let (instruction_type_out, amount_out) = mut_array_refs![output, 1, 8];
instruction_type_out[0] = VaultInstructionType::LockLiquidity as u8;
*amount_out = amount.to_le_bytes();
Ok(VaultInstruction::LOCK_LIQUIDITY_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_unlock_liquidity(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::UNLOCK_LIQUIDITY_LEN)?;
if let VaultInstruction::UnlockLiquidity { amount } = self {
let output = array_mut_ref![output, 0, VaultInstruction::UNLOCK_LIQUIDITY_LEN];
let (instruction_type_out, amount_out) = mut_array_refs![output, 1, 8];
instruction_type_out[0] = VaultInstructionType::UnlockLiquidity as u8;
*amount_out = amount.to_le_bytes();
Ok(VaultInstruction::UNLOCK_LIQUIDITY_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_remove_liquidity(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::REMOVE_LIQUIDITY_LEN)?;
if let VaultInstruction::RemoveLiquidity { amount } = self {
let output = array_mut_ref![output, 0, VaultInstruction::REMOVE_LIQUIDITY_LEN];
let (instruction_type_out, amount_out) = mut_array_refs![output, 1, 8];
instruction_type_out[0] = VaultInstructionType::RemoveLiquidity as u8;
*amount_out = amount.to_le_bytes();
Ok(VaultInstruction::REMOVE_LIQUIDITY_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_set_min_crank_interval(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::SET_MIN_CRANK_INTERVAL_LEN)?;
if let VaultInstruction::SetMinCrankInterval { min_crank_interval } = self {
let output = array_mut_ref![output, 0, VaultInstruction::SET_MIN_CRANK_INTERVAL_LEN];
let (instruction_type_out, min_crank_interval_out) = mut_array_refs![output, 1, 4];
instruction_type_out[0] = VaultInstructionType::SetMinCrankInterval as u8;
*min_crank_interval_out = min_crank_interval.to_le_bytes();
Ok(VaultInstruction::SET_MIN_CRANK_INTERVAL_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_set_fee(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::SET_FEE_LEN)?;
if let VaultInstruction::SetFee { fee } = self {
let output = array_mut_ref![output, 0, VaultInstruction::SET_FEE_LEN];
let (instruction_type_out, fee_out) = mut_array_refs![output, 1, 4];
instruction_type_out[0] = VaultInstructionType::SetFee as u8;
*fee_out = fee.to_le_bytes();
Ok(VaultInstruction::SET_FEE_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_set_external_fee(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::SET_EXTERNAL_FEE_LEN)?;
if let VaultInstruction::SetExternalFee { external_fee } = self {
let output = array_mut_ref![output, 0, VaultInstruction::SET_EXTERNAL_FEE_LEN];
let (instruction_type_out, external_fee_out) = mut_array_refs![output, 1, 4];
instruction_type_out[0] = VaultInstructionType::SetExternalFee as u8;
*external_fee_out = external_fee.to_le_bytes();
Ok(VaultInstruction::SET_EXTERNAL_FEE_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_disable_deposit(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::DISABLE_DEPOSIT_LEN)?;
if let VaultInstruction::DisableDeposit = self {
let instruction_type_out = array_mut_ref![output, 0, 1];
instruction_type_out[0] = VaultInstructionType::DisableDeposit as u8;
Ok(VaultInstruction::DISABLE_DEPOSIT_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_enable_deposit(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::ENABLE_DEPOSIT_LEN)?;
if let VaultInstruction::EnableDeposit = self {
let instruction_type_out = array_mut_ref![output, 0, 1];
instruction_type_out[0] = VaultInstructionType::EnableDeposit as u8;
Ok(VaultInstruction::ENABLE_DEPOSIT_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_disable_withdrawal(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::DISABLE_WITHDRAWAL_LEN)?;
if let VaultInstruction::DisableWithdrawal = self {
let instruction_type_out = array_mut_ref![output, 0, 1];
instruction_type_out[0] = VaultInstructionType::DisableWithdrawal as u8;
Ok(VaultInstruction::DISABLE_WITHDRAWAL_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_enable_withdrawal(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::ENABLE_WITHDRAWAL_LEN)?;
if let VaultInstruction::EnableWithdrawal = self {
let instruction_type_out = array_mut_ref![output, 0, 1];
instruction_type_out[0] = VaultInstructionType::EnableWithdrawal as u8;
Ok(VaultInstruction::ENABLE_WITHDRAWAL_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_crank(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::CRANK_LEN)?;
if let VaultInstruction::Crank { step } = self {
let output = array_mut_ref![output, 0, VaultInstruction::CRANK_LEN];
let (instruction_type_out, step_out) = mut_array_refs![output, 1, 8];
instruction_type_out[0] = VaultInstructionType::Crank as u8;
*step_out = step.to_le_bytes();
Ok(VaultInstruction::CRANK_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_init(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::INIT_LEN)?;
if let VaultInstruction::Init { step } = self {
let output = array_mut_ref![output, 0, VaultInstruction::INIT_LEN];
let (instruction_type_out, step_out) = mut_array_refs![output, 1, 8];
instruction_type_out[0] = VaultInstructionType::Init as u8;
*step_out = step.to_le_bytes();
Ok(VaultInstruction::INIT_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_shutdown(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::SHUTDOWN_LEN)?;
if let VaultInstruction::Shutdown = self {
let instruction_type_out = array_mut_ref![output, 0, 1];
instruction_type_out[0] = VaultInstructionType::Shutdown as u8;
Ok(VaultInstruction::SHUTDOWN_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn pack_withdraw_fees(&self, output: &mut [u8]) -> Result<usize, ProgramError> {
check_data_len(output, VaultInstruction::WITHDRAW_FEES_LEN)?;
if let VaultInstruction::WithdrawFees { amount } = self {
let output = array_mut_ref![output, 0, VaultInstruction::WITHDRAW_FEES_LEN];
let (instruction_type_out, amount_out) = mut_array_refs![output, 1, 8];
instruction_type_out[0] = VaultInstructionType::WithdrawFees as u8;
*amount_out = amount.to_le_bytes();
Ok(VaultInstruction::WITHDRAW_FEES_LEN)
} else {
Err(ProgramError::InvalidInstructionData)
}
}
fn unpack_user_init(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::USER_INIT_LEN)?;
Ok(Self::UserInit)
}
fn unpack_add_liquidity(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::ADD_LIQUIDITY_LEN)?;
let input = array_ref![input, 1, VaultInstruction::ADD_LIQUIDITY_LEN - 1];
#[allow(clippy::ptr_offset_with_cast)]
let (max_token_a_amount, max_token_b_amount) = array_refs![input, 8, 8];
Ok(Self::AddLiquidity {
max_token_a_amount: u64::from_le_bytes(*max_token_a_amount),
max_token_b_amount: u64::from_le_bytes(*max_token_b_amount),
})
}
fn unpack_lock_liquidity(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::LOCK_LIQUIDITY_LEN)?;
Ok(Self::LockLiquidity {
amount: u64::from_le_bytes(*array_ref![input, 1, 8]),
})
}
fn unpack_unlock_liquidity(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::UNLOCK_LIQUIDITY_LEN)?;
Ok(Self::UnlockLiquidity {
amount: u64::from_le_bytes(*array_ref![input, 1, 8]),
})
}
fn unpack_remove_liquidity(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::REMOVE_LIQUIDITY_LEN)?;
Ok(Self::RemoveLiquidity {
amount: u64::from_le_bytes(*array_ref![input, 1, 8]),
})
}
fn unpack_set_min_crank_interval(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::SET_MIN_CRANK_INTERVAL_LEN)?;
Ok(Self::SetMinCrankInterval {
min_crank_interval: u32::from_le_bytes(*array_ref![input, 1, 4]),
})
}
fn unpack_set_fee(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::SET_FEE_LEN)?;
Ok(Self::SetFee {
fee: f32::from_le_bytes(*array_ref![input, 1, 4]),
})
}
fn unpack_set_external_fee(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::SET_EXTERNAL_FEE_LEN)?;
Ok(Self::SetExternalFee {
external_fee: f32::from_le_bytes(*array_ref![input, 1, 4]),
})
}
fn unpack_disable_deposit(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::DISABLE_DEPOSIT_LEN)?;
Ok(Self::DisableDeposit)
}
fn unpack_enable_deposit(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::ENABLE_DEPOSIT_LEN)?;
Ok(Self::EnableDeposit)
}
fn unpack_disable_withdrawal(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::DISABLE_WITHDRAWAL_LEN)?;
Ok(Self::DisableWithdrawal)
}
fn unpack_enable_withdrawal(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::ENABLE_WITHDRAWAL_LEN)?;
Ok(Self::EnableWithdrawal)
}
fn unpack_crank(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::CRANK_LEN)?;
Ok(Self::Crank {
step: u64::from_le_bytes(*array_ref![input, 1, 8]),
})
}
fn unpack_init(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::INIT_LEN)?;
Ok(Self::Init {
step: u64::from_le_bytes(*array_ref![input, 1, 8]),
})
}
fn unpack_shutdown(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::SHUTDOWN_LEN)?;
Ok(Self::Shutdown)
}
fn unpack_withdraw_fees(input: &[u8]) -> Result<VaultInstruction, ProgramError> {
check_data_len(input, VaultInstruction::WITHDRAW_FEES_LEN)?;
Ok(Self::WithdrawFees {
amount: u64::from_le_bytes(*array_ref![input, 1, 8]),
})
}
}
impl std::fmt::Display for VaultInstructionType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
VaultInstructionType::UserInit => write!(f, "UserInit"),
VaultInstructionType::AddLiquidity => write!(f, "AddLiquidity"),
VaultInstructionType::LockLiquidity => write!(f, "LockLiquidity"),
VaultInstructionType::UnlockLiquidity => write!(f, "UnlockLiquidity"),
VaultInstructionType::RemoveLiquidity => write!(f, "RemoveLiquidity"),
VaultInstructionType::SetMinCrankInterval => write!(f, "SetMinCrankInterval"),
VaultInstructionType::SetFee => write!(f, "SetFee"),
VaultInstructionType::SetExternalFee => write!(f, "SetExternalFee"),
VaultInstructionType::DisableDeposit => write!(f, "DisableDeposit"),
VaultInstructionType::EnableDeposit => write!(f, "EnableDeposit"),
VaultInstructionType::DisableWithdrawal => write!(f, "DisableWithdrawal"),
VaultInstructionType::EnableWithdrawal => write!(f, "EnableWithdrawal"),
VaultInstructionType::Crank => write!(f, "Crank"),
VaultInstructionType::Init => write!(f, "Init"),
VaultInstructionType::Shutdown => write!(f, "Shutdown"),
VaultInstructionType::WithdrawFees => write!(f, "WithdrawFees"),
}
}
}
| 40.093063 | 100 | 0.653809 |
bbdabf8b567a236defd190b027c925ab44969590
| 4,016 |
use crate::envvar;
use crate::param::{
CategoricalParamType, ContinousParamType, DiscreteParamType, FidelityParamType,
NormalParamType, NumParamType, OrdinalParamType, ParamName, ParamType, StrParamType,
};
use crate::rpc;
use anyhow::Context;
#[derive(Debug, structopt::StructOpt)]
#[structopt(rename_all = "kebab-case")]
pub struct AskOpt {
pub param_name: String,
#[structopt(long, short = "l")]
pub long_option: bool,
#[structopt(subcommand)]
pub param_spec: ParamSpec,
}
impl AskOpt {
pub fn ask(&self) -> anyhow::Result<String> {
let observation_id = envvar::get_observation_id()?;
let param_type = self
.param_spec
.to_param_type()
.with_context(|| format!("the specification of {:?} is invalid", self.param_name))?;
let req = rpc::AskReq {
observation_id,
param_name: ParamName::new(self.param_name.clone()),
param_type,
};
let res = rpc::call::<rpc::AskRpc>(req)?;
let v = res.to_string();
if self.long_option {
if matches!(self.param_spec, ParamSpec::Bool) && v == "true" {
Ok(format!("--{}", self.param_name))
} else {
Ok(format!("--{}={:?}", self.param_name, v))
}
} else {
Ok(v)
}
}
}
#[derive(Debug, structopt::StructOpt)]
#[structopt(rename_all = "kebab-case")]
pub enum ParamSpec {
Bool,
Choice {
choices: Vec<String>,
#[structopt(long)]
ordinal: bool,
},
Range {
min: f64,
max: f64,
#[structopt(long)]
ln: bool,
#[structopt(long)]
step: Option<f64>,
#[structopt(long)]
fidelity: bool,
},
Normal {
mean: f64,
stddev: f64,
},
}
impl ParamSpec {
fn to_param_type(&self) -> anyhow::Result<ParamType> {
match self {
Self::Bool => CategoricalParamType::new(vec!["false".to_owned(), "true".to_owned()])
.map(StrParamType::Categorical)
.map(ParamType::Str),
Self::Choice {
choices,
ordinal: false,
} => CategoricalParamType::new(choices.clone())
.map(StrParamType::Categorical)
.map(ParamType::Str),
Self::Choice {
choices,
ordinal: true,
} => OrdinalParamType::new(choices.clone())
.map(StrParamType::Ordinal)
.map(ParamType::Str),
Self::Normal { mean, stddev } => NormalParamType::new(*mean, *stddev)
.map(NumParamType::Normal)
.map(ParamType::Num),
Self::Range {
min,
max,
ln,
step: None,
fidelity: false,
} => ContinousParamType::new(*min, *max, *ln)
.map(NumParamType::Continous)
.map(ParamType::Num),
Self::Range {
min,
max,
ln: false,
step: Some(step),
fidelity: false,
} => DiscreteParamType::new(*min, *max, *step)
.map(NumParamType::Discrete)
.map(ParamType::Num),
Self::Range {
min,
max,
ln: false,
step,
fidelity: true,
} => FidelityParamType::new(*min, *max, *step)
.map(NumParamType::Fidelity)
.map(ParamType::Num),
Self::Range {
ln: true,
step: Some(_),
..
} => anyhow::bail!("Cannot specify both `--ln` and `--step` options."),
Self::Range {
ln: true,
fidelity: true,
..
} => anyhow::bail!("Cannot specify both `--ln` and `--fidelity` options."),
}
}
}
| 30.195489 | 96 | 0.480578 |
011b55e2f1c71f82cab8177c8bae843b53b45f9e
| 5,050 |
use sp_core::{Pair, Public, sr25519};
use node_template_runtime::{
AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig,
SudoConfig, SystemConfig, CouncilConfig, TechnicalCommitteeConfig, WASM_BINARY, Signature,
};
use sp_consensus_aura::sr25519::AuthorityId as AuraId;
use sp_finality_grandpa::AuthorityId as GrandpaId;
use sp_runtime::traits::{Verify, IdentifyAccount};
use sc_service::ChainType;
// The URL for the telemetry server.
// const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/";
/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type.
pub type ChainSpec = sc_service::GenericChainSpec<GenesisConfig>;
/// Generate a crypto pair from seed.
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
type AccountPublic = <Signature as Verify>::Signer;
/// Generate an account ID from seed.
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId where
AccountPublic: From<<TPublic::Pair as Pair>::Public>
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
/// Generate an Aura authority key.
pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) {
(
get_from_seed::<AuraId>(s),
get_from_seed::<GrandpaId>(s),
)
}
pub fn development_config() -> Result<ChainSpec, String> {
let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm binary not available".to_string())?;
Ok(ChainSpec::from_genesis(
// Name
"Development",
// ID
"dev",
ChainType::Development,
move || testnet_genesis(
wasm_binary,
// Initial PoA authorities
vec![
authority_keys_from_seed("Alice"),
],
// Sudo account
get_account_id_from_seed::<sr25519::Public>("Alice"),
// Pre-funded accounts
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
],
true,
),
// Bootnodes
vec![],
// Telemetry
None,
// Protocol ID
None,
// Properties
None,
// Extensions
None,
))
}
pub fn local_testnet_config() -> Result<ChainSpec, String> {
let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm binary not available".to_string())?;
Ok(ChainSpec::from_genesis(
// Name
"Local Testnet",
// ID
"local_testnet",
ChainType::Local,
move || testnet_genesis(
wasm_binary,
// Initial PoA authorities
vec![
authority_keys_from_seed("Alice"),
authority_keys_from_seed("Bob"),
],
// Sudo account
get_account_id_from_seed::<sr25519::Public>("Alice"),
// Pre-funded accounts
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Charlie"),
get_account_id_from_seed::<sr25519::Public>("Dave"),
get_account_id_from_seed::<sr25519::Public>("Eve"),
get_account_id_from_seed::<sr25519::Public>("Ferdie"),
get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
get_account_id_from_seed::<sr25519::Public>("Charlie//stash"),
get_account_id_from_seed::<sr25519::Public>("Dave//stash"),
get_account_id_from_seed::<sr25519::Public>("Eve//stash"),
get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
],
true,
),
// Bootnodes
vec![],
// Telemetry
None,
// Protocol ID
None,
// Properties
None,
// Extensions
None,
))
}
/// Configure initial storage state for FRAME modules.
fn testnet_genesis(
wasm_binary: &[u8],
initial_authorities: Vec<(AuraId, GrandpaId)>,
root_key: AccountId,
endowed_accounts: Vec<AccountId>,
_enable_println: bool,
) -> GenesisConfig {
GenesisConfig {
frame_system: Some(SystemConfig {
// Add Wasm runtime to storage.
code: wasm_binary.to_vec(),
changes_trie_config: Default::default(),
}),
pallet_balances: Some(BalancesConfig {
// Configure endowed accounts with initial balance of 1 << 60.
balances: endowed_accounts.iter().cloned().map(|k|(k, 1 << 60)).collect(),
}),
pallet_aura: Some(AuraConfig {
authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(),
}),
pallet_grandpa: Some(GrandpaConfig {
authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(),
}),
pallet_sudo: Some(SudoConfig {
// Assign network admin rights.
key: root_key,
}),
pallet_collective_Instance1: Some(CouncilConfig {
members: vec![],
phantom: Default::default(),
}),
pallet_collective_Instance2: Some(TechnicalCommitteeConfig {
members: vec![],
phantom: Default::default(),
}),
pallet_membership_Instance1: Some(Default::default()),
pallet_elections_phragmen: Some(Default::default()),
pallet_treasury: Some(Default::default()),
}
}
| 29.881657 | 99 | 0.703168 |
628d6edf6f7c3c05a9e8aea4a21622552d9507b7
| 6,210 |
#![deny(unused_must_use)]
//! Network tests for Dynamic Honey Badger.
extern crate env_logger;
extern crate hbbft;
extern crate itertools;
extern crate log;
extern crate rand;
extern crate rand_derive;
extern crate serde_derive;
extern crate threshold_crypto as crypto;
mod network;
use std::collections::BTreeMap;
use std::iter;
use std::sync::Arc;
use itertools::Itertools;
use log::info;
use rand::{Isaac64Rng, Rng};
use hbbft::dynamic_honey_badger::{Batch, Change, ChangeState, DynamicHoneyBadger, Input};
use hbbft::sender_queue::{SenderQueue, Step};
use hbbft::transaction_queue::TransactionQueue;
use hbbft::NetworkInfo;
use network::{Adversary, MessageScheduler, NodeId, SilentAdversary, TestNetwork, TestNode};
type UsizeDhb = SenderQueue<DynamicHoneyBadger<Vec<usize>, NodeId>>;
/// Proposes `num_txs` values and expects nodes to output and order them.
fn test_dynamic_honey_badger<A>(mut network: TestNetwork<A, UsizeDhb>, num_txs: usize)
where
A: Adversary<UsizeDhb>,
{
let mut rng = rand::thread_rng().gen::<Isaac64Rng>();
let new_queue = |id: &NodeId| (*id, (0..num_txs).collect::<Vec<usize>>());
let mut queues: BTreeMap<_, _> = network.nodes.keys().map(new_queue).collect();
for (id, queue) in &mut queues {
network.input(*id, Input::User(queue.choose(&mut rng, 3, 10)));
}
let netinfo = network.observer.instance().algo().netinfo().clone();
let pub_keys_add = netinfo.public_key_map().clone();
let mut pub_keys_rm = pub_keys_add.clone();
pub_keys_rm.remove(&NodeId(0));
network.input_all(Input::Change(Change::NodeChange(pub_keys_rm.clone())));
let has_remove = |node: &TestNode<UsizeDhb>| {
node.outputs().iter().any(|batch| match batch.change() {
ChangeState::Complete(Change::NodeChange(pub_keys)) => pub_keys == &pub_keys_rm,
_ => false,
})
};
let has_add = |node: &TestNode<UsizeDhb>| {
node.outputs().iter().any(|batch| match batch.change() {
ChangeState::Complete(Change::NodeChange(pub_keys)) => pub_keys == &pub_keys_add,
_ => false,
})
};
// Returns `true` if the node has not output all transactions yet.
let node_busy = |node: &TestNode<UsizeDhb>| {
if !has_remove(node) || !has_add(node) {
return true;
}
node.outputs().iter().flat_map(Batch::iter).unique().count() < num_txs
};
let mut rng = rand::thread_rng();
let mut input_add = false; // Whether the vote to add node 0 has already been input.
// Handle messages in random order until all nodes have output all transactions.
while network.nodes.values().any(node_busy) {
// If a node is expecting input, take it from the queue. Otherwise handle a message.
let input_ids: Vec<_> = network
.nodes
.iter()
.filter(|(_, node)| {
node_busy(*node)
&& !node.instance().algo().has_input()
&& node.instance().algo().netinfo().is_validator()
// Wait until all nodes have completed removing 0, before inputting `Add`.
&& (input_add || !has_remove(node))
// If there's only one node, it will immediately output on input. Make sure we
// first process all incoming messages before providing input again.
&& (network.nodes.len() > 2 || node.queue.is_empty())
}).map(|(id, _)| *id)
.collect();
if let Some(id) = rng.choose(&input_ids) {
let queue = queues.get_mut(id).unwrap();
queue.remove_multiple(network.nodes[id].outputs().iter().flat_map(Batch::iter));
network.input(*id, Input::User(queue.choose(&mut rng, 3, 10)));
}
network.step();
// Once all nodes have processed the removal of node 0, add it again.
if !input_add && network.nodes.values().all(has_remove) {
network.input_all(Input::Change(Change::NodeChange(pub_keys_add.clone())));
input_add = true;
}
}
network.verify_batches();
}
// Allow passing `netinfo` by value. `TestNetwork` expects this function signature.
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
fn new_dynamic_hb(
netinfo: Arc<NetworkInfo<NodeId>>,
) -> (UsizeDhb, Step<DynamicHoneyBadger<Vec<usize>, NodeId>>) {
let observer = NodeId(netinfo.num_nodes());
let our_id = *netinfo.our_id();
let peer_ids = netinfo
.all_ids()
.filter(|&&them| them != our_id)
.cloned()
.chain(iter::once(observer));
SenderQueue::builder(
DynamicHoneyBadger::builder().build((*netinfo).clone()),
peer_ids,
).build(our_id)
}
fn test_dynamic_honey_badger_different_sizes<A, F>(new_adversary: F, num_txs: usize)
where
A: Adversary<UsizeDhb>,
F: Fn(usize, usize, BTreeMap<NodeId, Arc<NetworkInfo<NodeId>>>) -> A,
{
// This returns an error in all but the first test.
let _ = env_logger::try_init();
let mut rng = rand::thread_rng();
let sizes = vec![2, 3, 5, rng.gen_range(6, 10)];
for size in sizes {
// The test is removing one correct node, so we allow fewer faulty ones.
let num_adv_nodes = (size - 2) / 3;
let num_good_nodes = size - num_adv_nodes;
info!(
"Network size: {} good nodes, {} faulty nodes",
num_good_nodes, num_adv_nodes
);
let adversary = |adv_nodes| new_adversary(num_good_nodes, num_adv_nodes, adv_nodes);
let network =
TestNetwork::new_with_step(num_good_nodes, num_adv_nodes, adversary, new_dynamic_hb);
test_dynamic_honey_badger(network, num_txs);
}
}
#[test]
fn test_dynamic_honey_badger_random_delivery_silent() {
let new_adversary = |_: usize, _: usize, _| SilentAdversary::new(MessageScheduler::Random);
test_dynamic_honey_badger_different_sizes(new_adversary, 10);
}
#[test]
fn test_dynamic_honey_badger_first_delivery_silent() {
let new_adversary = |_: usize, _: usize, _| SilentAdversary::new(MessageScheduler::First);
test_dynamic_honey_badger_different_sizes(new_adversary, 10);
}
| 38.571429 | 98 | 0.644444 |
76a6e332081b6dd27df6ec2a19c01d24d73035d1
| 9,936 |
#![deny(rust_2018_idioms)]
#![warn(clippy::pedantic)]
#![allow(clippy::redundant_closure_for_method_calls)]
use anyhow::{bail, Context, Result};
use argh::FromArgs;
use serde::Deserialize;
use std::collections::HashMap;
use std::fs::{self, File};
use std::io;
use std::path::{Path, PathBuf};
use url::Url;
const DEFAULT_LICENSES_CONF: &str = "Licenses.toml";
/// Stores arguments
#[derive(FromArgs, PartialEq, Debug)]
struct Args {
/// configuration file with the licenses to be used
#[argh(option, short = 'l', default = "DEFAULT_LICENSES_CONF.to_string()")]
licenses_file: String,
#[argh(subcommand)]
subcommand: Subcommand,
}
/// Stores the subcommand to be executed
#[derive(FromArgs, Debug, PartialEq)]
#[argh(subcommand)]
enum Subcommand {
SpdxId(SpdxIdArgs),
Path(PathArgs),
Fetch(FetchArgs),
}
/// Returns the spdx-id for the package
#[derive(FromArgs, Debug, PartialEq)]
#[argh(subcommand, name = "spdx-id")]
struct SpdxIdArgs {
/// the package name used to look up for the licenses
#[argh(positional)]
package_name: String,
}
/// Creates a copy of the licenses files in the dest directory
#[derive(FromArgs, Debug, PartialEq)]
#[argh(subcommand, name = "fetch")]
struct FetchArgs {
/// the destination folder for the licenses
#[argh(positional)]
destination: PathBuf,
}
/// Prints out a space-separated list of the paths to the licenses files
#[derive(FromArgs, Debug, PartialEq)]
#[argh(subcommand, name = "path")]
struct PathArgs {
/// the package name used to look up for the licenses
#[argh(positional)]
package_name: String,
/// the source folder where the licenses are
#[argh(option, short = 'p')]
prefix: Option<PathBuf>,
}
/// Holds the configurations for package's licenses
#[derive(Deserialize, Debug)]
struct PackageLicense {
// The SPDX identifier for the package
#[serde(rename(deserialize = "spdx-id"))]
spdx_id: String,
// The licenses that apply to the package
licenses: Vec<License>,
}
/// Holds the configurations for a license
#[derive(Deserialize, Debug, Clone)]
struct License {
// The path to the license to fetch
#[serde(rename(deserialize = "license-url"))]
license_url: Option<Url>,
// The file name used to store the license
path: String,
}
/// Prints the spdx id for the package
fn print_spdx_id<S>(packages_licenses: &HashMap<String, PackageLicense>, package: S) -> Result<()>
where
S: AsRef<str>,
{
let package = package.as_ref();
let package_license = packages_licenses.get(package).context(format!(
"Couldn't find configuration for package '{}'",
package
))?;
println!("{}", package_license.spdx_id);
Ok(())
}
/// Prints a space separated list of paths
fn print_paths<S>(
packages_licenses: &HashMap<String, PackageLicense>,
package_name: S,
prefix: Option<PathBuf>,
) -> Result<()>
where
S: AsRef<str>,
{
let package_name = package_name.as_ref();
let package_license = packages_licenses.get(package_name).context(format!(
"Couldn't find configuration for package '{}'",
package_name
))?;
println!(
"{}",
get_license_destinations(package_license, prefix).join(" ")
);
Ok(())
}
/// Fetches all the licenses for the passed map of package licenses
async fn fetch_all_licenses<P>(
packages_licenses: &HashMap<String, PackageLicense>,
dest: P,
) -> Result<()>
where
P: AsRef<Path>,
{
for package_license in packages_licenses.values() {
fetch_licenses(package_license, &dest).await?;
}
Ok(())
}
/// Fetches the licenses in the `PackageLicense` object, and creates a copy of them in `dest`
async fn fetch_licenses<P>(package_license: &PackageLicense, dest: P) -> Result<()>
where
P: AsRef<Path>,
{
let dest = dest.as_ref();
for license in &package_license.licenses {
let path: PathBuf = dest.join(&license.path);
if path.exists() {
// Skip if the file already exists
continue;
}
if let Some(license_url) = &license.license_url {
match license_url.scheme() {
"file" => {
fs::copy(license_url.path(), &path)
.context(format!("Failed to copy file from '{}'", license_url.path()))?;
}
"http" | "https" => {
let content = reqwest::get(license_url.clone())
.await
.context(format!(
"Failed to download file from '{}'",
license_url.to_string()
))?
.text()
.await?;
let mut dest = File::create(&path).context(format!(
"Failed to create file '{}'",
path.display().to_string()
))?;
io::copy(&mut content.as_bytes(), &mut dest).context(format!(
"Failed to copy content to '{}'",
path.display().to_string()
))?;
}
_ => bail!(
"Invalid scheme for '{}', valid options are: ['file://', 'http://', 'https://']",
license_url
),
};
}
}
Ok(())
}
/// Returns a list of paths to the destination files for the licenses
fn get_license_destinations(
package_license: &PackageLicense,
dest: Option<PathBuf>,
) -> Vec<String> {
let mut all_paths = Vec::new();
let dest = match dest {
None => Path::new("").into(),
Some(dest) => dest,
};
for license in &package_license.licenses {
all_paths.push(dest.join(&license.path).display().to_string());
}
all_paths
}
/// Parses a map of `PackageLicense` objects from an array of bytes
fn parse_licenses_file<P>(licenses_file: P) -> Result<HashMap<String, PackageLicense>>
where
P: AsRef<Path>,
{
let licenses_file = licenses_file.as_ref();
Ok(toml::from_slice(&fs::read(&licenses_file).context(
format!("Failed to read file '{}'", licenses_file.display()),
)?)?)
}
#[tokio::main]
async fn main() -> Result<()> {
let args: Args = argh::from_env();
let packages_licenses = parse_licenses_file(&args.licenses_file)?;
match args.subcommand {
Subcommand::SpdxId(spdxid_args) => {
print_spdx_id(&packages_licenses, spdxid_args.package_name)?;
}
Subcommand::Path(path_args) => {
print_paths(&packages_licenses, path_args.package_name, path_args.prefix)?;
}
Subcommand::Fetch(fetch_args) => {
fetch_all_licenses(&packages_licenses, fetch_args.destination).await?;
}
}
Ok(())
}
#[cfg(test)]
mod test_packages_licenses {
use super::{get_license_destinations, parse_licenses_file};
use anyhow::Result;
use std::io;
static TEST_PACKAGES_LICENSES: &str = include_str!("../tests/data/test-packages-licenses.toml");
#[test]
fn test_parse_toml_file() -> Result<()> {
let mut tmplicense = tempfile::NamedTempFile::new()?;
io::copy(&mut TEST_PACKAGES_LICENSES.as_bytes(), &mut tmplicense)?;
assert!(parse_licenses_file(tmplicense).is_ok());
Ok(())
}
#[test]
fn test_use_path() -> Result<()> {
let mut tmplicense = tempfile::NamedTempFile::new()?;
io::copy(&mut TEST_PACKAGES_LICENSES.as_bytes(), &mut tmplicense)?;
let packages_licences = parse_licenses_file(tmplicense)?;
let package_license = packages_licences.get("the-package").unwrap();
// Original file name is `license.txt`
assert!(
get_license_destinations(package_license, Some("./dest".into()))
== vec!["./dest/license-path.txt"]
);
Ok(())
}
}
#[cfg(test)]
mod test_fetch_license {
use super::{fetch_licenses, License, PackageLicense};
use anyhow::Result;
use httptest::{matchers::request, responders::status_code, Expectation, Server};
use std::fs;
use url::Url;
#[tokio::test]
async fn test_fetch_license_from_file() -> Result<()> {
let tmpdir = tempfile::tempdir()?;
let tmplicense = tempfile::NamedTempFile::new()?;
let package_license = PackageLicense {
spdx_id: "spdx-id".to_string(),
licenses: vec![License {
license_url: Some(Url::parse(&format!(
"file://{}",
tmplicense.path().display().to_string()
))?),
path: String::from("license-file.txt"),
}],
};
fetch_licenses(&package_license, &tmpdir).await?;
assert!(tmpdir
.path()
.join(String::from("license-file.txt"))
.exists());
Ok(())
}
#[tokio::test]
async fn test_fetch_license_from_http() -> Result<()> {
let tmpdir = tempfile::tempdir()?;
let server = Server::run();
let license_body = "A cool body for the license";
server.expect(
Expectation::matching(request::method_path("GET", "/license.txt"))
.respond_with(status_code(200).body(license_body)),
);
let url = server.url("/license.txt");
let package_license = PackageLicense {
spdx_id: "spdx-id".to_string(),
licenses: vec![License {
license_url: Some(Url::parse(&url.to_string())?),
path: String::from("license-file.txt"),
}],
};
let path = tmpdir.path().join(String::from("license-file.txt"));
fetch_licenses(&package_license, &tmpdir).await?;
assert!(path.exists());
let content = fs::read(path)?;
assert!(content == license_body.as_bytes());
Ok(())
}
}
| 31.05 | 101 | 0.591184 |
e504b4705daf286dd7a0396cf2181341c25b1501
| 2,632 |
// compile-pass
// ignore-cloudabi no std::fs
// Regression test for #20797.
use std::default::Default;
use std::io;
use std::fs;
use std::path::PathBuf;
pub trait PathExtensions {
fn is_dir(&self) -> bool { false }
}
impl PathExtensions for PathBuf {}
/// A strategy for acquiring more subpaths to walk.
pub trait Strategy {
type P: PathExtensions;
/// Gets additional subpaths from a given path.
fn get_more(&self, item: &Self::P) -> io::Result<Vec<Self::P>>;
/// Determine whether a path should be walked further.
/// This is run against each item from `get_more()`.
fn prune(&self, p: &Self::P) -> bool;
}
/// The basic fully-recursive strategy. Nothing is pruned.
#[derive(Copy, Clone, Default)]
pub struct Recursive;
impl Strategy for Recursive {
type P = PathBuf;
fn get_more(&self, p: &PathBuf) -> io::Result<Vec<PathBuf>> {
Ok(fs::read_dir(p).unwrap().map(|s| s.unwrap().path()).collect())
}
fn prune(&self, _: &PathBuf) -> bool { false }
}
/// A directory walker of `P` using strategy `S`.
pub struct Subpaths<S: Strategy> {
stack: Vec<S::P>,
strategy: S,
}
impl<S: Strategy> Subpaths<S> {
/// Creates a directory walker with a root path and strategy.
pub fn new(p: &S::P, strategy: S) -> io::Result<Subpaths<S>> {
let stack = strategy.get_more(p)?;
Ok(Subpaths { stack: stack, strategy: strategy })
}
}
impl<S: Default + Strategy> Subpaths<S> {
/// Creates a directory walker with a root path and a default strategy.
pub fn walk(p: &S::P) -> io::Result<Subpaths<S>> {
Subpaths::new(p, Default::default())
}
}
impl<S: Default + Strategy> Default for Subpaths<S> {
fn default() -> Subpaths<S> {
Subpaths { stack: Vec::new(), strategy: Default::default() }
}
}
impl<S: Strategy> Iterator for Subpaths<S> {
type Item = S::P;
fn next (&mut self) -> Option<S::P> {
let mut opt_path = self.stack.pop();
while opt_path.is_some() && self.strategy.prune(opt_path.as_ref().unwrap()) {
opt_path = self.stack.pop();
}
match opt_path {
Some(path) => {
if path.is_dir() {
let result = self.strategy.get_more(&path);
match result {
Ok(dirs) => { self.stack.extend(dirs); },
Err(..) => { }
}
}
Some(path)
}
None => None,
}
}
}
fn _foo() {
let _walker: Subpaths<Recursive> = Subpaths::walk(&PathBuf::from("/home")).unwrap();
}
fn main() {}
| 27.705263 | 88 | 0.569149 |
146cd0028d724276d0c6222663260dedfed6f6d9
| 3,962 |
use std::io::{self, Read, Write};
use std::net::{TcpListener, TcpStream};
use std::fs;
use std::str;
use std::path::Path;
mod lib;
use lib::State;
use std::env;
fn main() {
let mut args = env::args().skip(1);
let mut host = "127.0.0.1".to_string();
let mut port = "7878".to_string();
loop {
match args.next() {
Some(x) if x == "--host" => {
host = args.next().unwrap_or(host);
},
Some(x) if x == "--port" => {
port = args.next().unwrap_or(port);
},
Some(x) => {
println!("unknown argument: {}", x);
}
None => {
break;
}
}
}
let listener = TcpListener::bind(format!("{}:{}", host, port)).unwrap();
listener.set_nonblocking(true).expect("Cannot set non-blocking");
let mut state = if Path::new("store.red").exists() {
State::deserialize(read_file())
} else {
State::new()
};
for stream in listener.incoming() {
match stream {
Ok(s) => {
handle_conn(s, &mut state);
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
continue;
}
Err(e) => panic!("Encountered IO error: {}", e),
}
}
}
fn handle_conn(mut stream: TcpStream, state: &mut State) {
let mut buf = vec![];
loop {
match stream.read_to_end(&mut buf) {
Ok(_) => {
let iter = buf.split(|c| *c == 10);
for bytes in iter {
handle_buf_slice(bytes, &stream, state);
}
break;
},
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
// TODO: handle idle waiting for fd for linux
}
Err(e) => panic!("encountered IO error: {}", e),
};
};
}
fn handle_buf_slice(bytes: &[u8], mut stream: &TcpStream, state: &mut State) {
match bytes {
// GET key
[103, 101, 116, ..] => {
let (_, key) = bytes.split_at(4);
match state.get(str::from_utf8(&key).unwrap()) {
Some(value) => {
let _ = stream.write(value.as_bytes());
let _ = stream.write(&[10]);
},
None => {
let _ = stream.write(&[110, 105, 108, 10]); // nil
}
}
}
// SADD member
[115, 97, 100, 100, ..] => {
let (_, rhs) = bytes.split_at(5);
state.sadd(
String::from_utf8(rhs.to_vec()).unwrap(),
);
}
// SMEMBERS
[115, 109, 101, 109, 98, 101, 114, 115, ..] => {
for member in state.smembers() {
let _ = stream.write(member.as_bytes());
let _ = stream.write(&[10]);
}
}
// SREM member
[115, 114, 101, 109, ..] => {
let (_, rhs) = bytes.split_at(5);
state.srem(str::from_utf8(&rhs).unwrap());
}
// SET key value
[115, 101, 116, ..] => {
let (_, rhs) = bytes.split_at(4);
let mut iter = rhs.split(|c| *c == 32); // space
let key = iter.next().unwrap();
let val = iter.next().unwrap();
state.set(
String::from_utf8(key.to_vec()).unwrap(),
String::from_utf8(val.to_vec()).unwrap()
);
let _ = stream.write(&[79, 75, 10]); // OK
}
// DEBUG
[100, 101, 98, 117, 103, ..] => {
println!("{:#?}", state);
}
[] => {
// Reached end of stream.
}
_ => {
println!("unknown operation");
}
}
}
fn read_file() -> String {
fs::read_to_string("store.red")
.expect("Failed reading from file")
}
| 28.099291 | 78 | 0.4263 |
0a9247d58a02d53c42c10c74891f7f02a7929ae4
| 7,475 |
use std::{
collections::{HashMap, HashSet},
default::Default,
};
use rand::{rngs::OsRng, Rng};
use thiserror::Error;
use x25519_dalek;
use zeroize::Zeroize;
use oasis_core_runtime::{
common::{
crypto::signature::{PublicKey as OasisPublicKey, Signature, SignatureBundle},
namespace::Namespace,
sgx::avr::EnclaveIdentity,
},
impl_bytes,
};
impl_bytes!(KeyPairId, 32, "A 256-bit key pair identifier.");
impl_bytes!(PublicKey, 32, "A public key.");
/// A private key.
#[derive(Clone, Default, cbor::Encode, cbor::Decode, Zeroize)]
#[cbor(transparent)]
#[zeroize(drop)]
pub struct PrivateKey(pub [u8; 32]);
/// A state encryption key.
#[derive(Clone, Default, cbor::Encode, cbor::Decode, Zeroize)]
#[cbor(transparent)]
#[zeroize(drop)]
pub struct StateKey(pub [u8; 32]);
impl AsRef<[u8]> for StateKey {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
/// A 256-bit master secret.
#[derive(Clone, Default, cbor::Encode, cbor::Decode, Zeroize)]
#[cbor(transparent)]
#[zeroize(drop)]
pub struct MasterSecret(pub [u8; 32]);
impl AsRef<[u8]> for MasterSecret {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
/// Key manager initialization request.
#[derive(Clone, cbor::Encode, cbor::Decode)]
pub struct InitRequest {
/// Checksum for validating replication.
pub checksum: Vec<u8>,
/// Policy for queries/replication.
pub policy: Vec<u8>,
/// True iff the enclave may generate a new master secret.
pub may_generate: bool,
}
/// Key manager initialization response.
#[derive(Clone, cbor::Encode, cbor::Decode)]
pub struct InitResponse {
/// True iff the key manager thinks it's running in a secure mode.
pub is_secure: bool,
/// Checksum for validating replication.
pub checksum: Vec<u8>,
/// Checksum for identifying policy.
pub policy_checksum: Vec<u8>,
}
/// Context used for the init response signature.
pub const INIT_RESPONSE_CONTEXT: &'static [u8] = b"oasis-core/keymanager: init response";
/// Signed InitResponse.
#[derive(Clone, cbor::Encode, cbor::Decode)]
pub struct SignedInitResponse {
/// InitResponse.
pub init_response: InitResponse,
/// Sign(init_response).
pub signature: Signature,
}
/// Key manager replication request.
#[derive(Clone, cbor::Encode, cbor::Decode)]
pub struct ReplicateRequest {
// Empty.
}
/// Key manager replication response.
#[derive(Clone, cbor::Encode, cbor::Decode)]
pub struct ReplicateResponse {
pub master_secret: MasterSecret,
}
/// Request runtime/key pair id tuple.
#[derive(Clone, cbor::Encode, cbor::Decode)]
pub struct RequestIds {
/// Runtime ID.
pub runtime_id: Namespace,
/// Key pair ID.
pub key_pair_id: KeyPairId,
}
impl RequestIds {
pub fn new(runtime_id: Namespace, key_pair_id: KeyPairId) -> Self {
Self {
runtime_id,
key_pair_id,
}
}
pub fn to_cache_key(&self) -> Vec<u8> {
let mut k = self.runtime_id.as_ref().to_vec();
k.extend_from_slice(self.key_pair_id.as_ref());
k
}
}
/// A key pair managed by the key manager.
#[derive(Clone, cbor::Encode, cbor::Decode)]
pub struct KeyPair {
/// Input key pair (pk, sk)
pub input_keypair: InputKeyPair,
/// State encryption key
pub state_key: StateKey,
/// Checksum of the key manager state.
pub checksum: Vec<u8>,
}
impl KeyPair {
/// Generate a new random key (for testing).
pub fn generate_mock() -> Self {
let mut rng = OsRng {};
let sk = x25519_dalek::StaticSecret::new(&mut rng);
let pk = x25519_dalek::PublicKey::from(&sk);
let mut state_key = StateKey::default();
rng.fill(&mut state_key.0);
KeyPair::new(
PublicKey(*pk.as_bytes()),
PrivateKey(sk.to_bytes()),
state_key,
vec![],
)
}
/// Create a `KeyPair`.
pub fn new(pk: PublicKey, sk: PrivateKey, k: StateKey, sum: Vec<u8>) -> Self {
Self {
input_keypair: InputKeyPair { pk, sk },
state_key: k,
checksum: sum,
}
}
/// Create a `KeyPair` with only the public key.
pub fn from_public_key(k: PublicKey, sum: Vec<u8>) -> Self {
Self {
input_keypair: InputKeyPair {
pk: k,
sk: PrivateKey::default(),
},
state_key: StateKey::default(),
checksum: sum,
}
}
}
#[derive(Clone, cbor::Encode, cbor::Decode)]
pub struct InputKeyPair {
/// Public key.
pub pk: PublicKey,
/// Private key.
pub sk: PrivateKey,
}
/// Context used for the public key signature.
pub const PUBLIC_KEY_CONTEXT: [u8; 8] = *b"EkKmPubK";
/// Signed public key.
#[derive(Clone, Debug, PartialEq, Eq, cbor::Encode, cbor::Decode)]
pub struct SignedPublicKey {
/// Public key.
pub key: PublicKey,
/// Checksum of the key manager state.
pub checksum: Vec<u8>,
/// Sign(sk, (key || checksum)) from the key manager.
pub signature: Signature,
}
/// Key manager error.
#[derive(Error, Debug)]
pub enum KeyManagerError {
#[error("client session is not authenticated")]
NotAuthenticated,
#[error("client session authentication is invalid")]
InvalidAuthentication,
#[error("key manager is not initialized")]
NotInitialized,
#[error("key manager state corrupted")]
StateCorrupted,
#[error("key manager replication required")]
ReplicationRequired,
#[error("policy rollback")]
PolicyRollback,
#[error("policy alteration, without serial increment")]
PolicyChanged,
#[error("policy is malformed or invalid")]
PolicyInvalid,
#[error("policy failed signature verification")]
PolicyInvalidSignature,
#[error("policy has insufficient signatures")]
PolicyInsufficientSignatures,
#[error(transparent)]
Other(anyhow::Error),
}
/// Key manager access control policy.
#[derive(Clone, Debug, cbor::Encode, cbor::Decode)]
pub struct PolicySGX {
pub serial: u32,
pub id: Namespace,
pub enclaves: HashMap<EnclaveIdentity, EnclavePolicySGX>,
}
/// Per enclave key manager access control policy.
#[derive(Clone, Debug, cbor::Encode, cbor::Decode)]
pub struct EnclavePolicySGX {
pub may_query: HashMap<Namespace, Vec<EnclaveIdentity>>,
pub may_replicate: Vec<EnclaveIdentity>,
}
/// Signed key manager access control policy.
#[derive(Clone, Debug, cbor::Encode, cbor::Decode)]
pub struct SignedPolicySGX {
pub policy: PolicySGX,
pub signatures: Vec<SignatureBundle>,
}
/// Set of trusted key manager policy signing keys.
#[derive(Clone, Debug, cbor::Encode, cbor::Decode)]
pub struct TrustedPolicySigners {
/// Set of trusted signers.
pub signers: HashSet<OasisPublicKey>,
/// Threshold for determining if enough valid signatures are present.
pub threshold: u64,
}
impl Default for TrustedPolicySigners {
fn default() -> Self {
Self {
signers: HashSet::new(),
threshold: 9001,
}
}
}
/// Name of the `get_or_create_keys` method.
pub const METHOD_GET_OR_CREATE_KEYS: &str = "get_or_create_keys";
/// Name of the `get_public_key` method.
pub const METHOD_GET_PUBLIC_KEY: &str = "get_public_key";
/// Name of the `replicate_master_secret` method.
pub const METHOD_REPLICATE_MASTER_SECRET: &str = "replicate_master_secret";
/// Name of the `init` local method.
pub const LOCAL_METHOD_INIT: &str = "init";
| 27.481618 | 89 | 0.650435 |
edf957e95504e0ffb5e8c80ca80b2a2a49e71e9c
| 688 |
// Remove all comments and other human-readable data from a list of keys
extern crate authorized_keys;
use authorized_keys::openssh::v2::*;
use std::iter::FromIterator;
use std::str::FromStr;
const SAMPLE_FILE: &str = include_str!("./sanitize_keys_data.txt");
fn main() {
let key_file = KeysFile::from_str(SAMPLE_FILE).expect("that was a valid authorized_keys file!");
println!("Before:\n{}", SAMPLE_FILE);
println!(
"After:\n{}",
KeysFile::from_iter(key_file.into_iter().flat_map(|line| match line {
KeysFileLine::Comment(_) => None,
KeysFileLine::Key(key) => Some(KeysFileLine::Key(key.remove_comments())),
}))
);
}
| 28.666667 | 100 | 0.65843 |
fc361b4c8aa3addee6620549ed7465125fb820c9
| 8,157 |
use crate::generator::dart::dart_comments;
use crate::generator::dart::ty::*;
use crate::ir::*;
use crate::type_dart_generator_struct;
type_dart_generator_struct!(TypeEnumRefGenerator, IrTypeEnumRef);
impl TypeDartGeneratorTrait for TypeEnumRefGenerator<'_> {
fn api2wire_body(&self) -> Option<String> {
if !self.ir.is_struct {
Some("return raw.index;".to_owned())
} else {
None
}
}
fn api_fill_to_wire_body(&self) -> Option<String> {
if self.ir.is_struct {
Some(
self.ir
.get(self.context.ir_file)
.variants()
.iter()
.enumerate()
.map(|(idx, variant)| {
if let IrVariantKind::Value = &variant.kind {
format!(
"if (apiObj is {}) {{ wireObj.tag = {}; return; }}",
variant.name, idx
)
} else {
let r = format!("wireObj.kind.ref.{}.ref", variant.name);
let body: Vec<_> = match &variant.kind {
IrVariantKind::Struct(st) => st
.fields
.iter()
.map(|field| {
format!(
"{}.{} = _api2wire_{}(apiObj.{});",
r,
field.name.rust_style(),
field.ty.safe_ident(),
field.name.dart_style()
)
})
.collect(),
_ => unreachable!(),
};
format!(
"if (apiObj is {0}) {{
wireObj.tag = {1};
wireObj.kind = inner.inflate_{2}_{0}();
{3}
}}",
variant.name,
idx,
self.ir.name,
body.join("\n")
)
}
})
.collect::<Vec<_>>()
.join("\n"),
)
} else {
None
}
}
fn wire2api_body(&self) -> String {
if self.ir.is_struct {
let enu = self.ir.get(self.context.ir_file);
let variants = enu
.variants()
.iter()
.enumerate()
.map(|(idx, variant)| {
let args = match &variant.kind {
IrVariantKind::Value => "".to_owned(),
IrVariantKind::Struct(st) => st
.fields
.iter()
.enumerate()
.map(|(idx, field)| {
let val = format!(
"_wire2api_{}(raw[{}]),",
field.ty.safe_ident(),
idx + 1
);
if st.is_fields_named {
format!("{}: {}", field.name.dart_style(), val)
} else {
val
}
})
.collect::<Vec<_>>()
.join(""),
};
format!("case {}: return {}({});", idx, variant.name, args)
})
.collect::<Vec<_>>();
format!(
"switch (raw[0]) {{
{}
default: throw Exception(\"unreachable\");
}}",
variants.join("\n"),
)
} else {
format!("return {}.values[raw];", self.ir.name)
}
}
fn structs(&self) -> String {
let src = self.ir.get(self.context.ir_file);
let comments = dart_comments(&src.comments);
if src.is_struct() {
let variants = src
.variants()
.iter()
.map(|variant| {
let args = match &variant.kind {
IrVariantKind::Value => "".to_owned(),
IrVariantKind::Struct(IrStruct {
is_fields_named: false,
fields,
..
}) => {
let types = fields.iter().map(|field| &field.ty).collect::<Vec<_>>();
let split = optional_boundary_index(&types);
let types = fields
.iter()
.map(|field| {
format!(
"{}{} {},",
dart_comments(&field.comments),
field.ty.dart_api_type(),
field.name.dart_style()
)
})
.collect::<Vec<_>>();
if let Some(idx) = split {
let before = &types[..idx];
let after = &types[idx..];
format!("{}[{}]", before.join(""), after.join(""))
} else {
types.join("")
}
}
IrVariantKind::Struct(st) => {
let fields = st
.fields
.iter()
.map(|field| {
format!(
"{}{}{} {},",
dart_comments(&field.comments),
field.ty.dart_required_modifier(),
field.ty.dart_api_type(),
field.name.dart_style()
)
})
.collect::<Vec<_>>();
format!("{{ {} }}", fields.join(""))
}
};
format!(
"{}const factory {}.{}({}) = {};",
dart_comments(&variant.comments),
self.ir.name,
variant.name.dart_style(),
args,
variant.name.rust_style(),
)
})
.collect::<Vec<_>>();
format!(
"@freezed
class {0} with _${0} {{
{1}
}}",
self.ir.name,
variants.join("\n")
)
} else {
let variants = src
.variants()
.iter()
.map(|variant| {
format!(
"{}{},",
dart_comments(&variant.comments),
variant.name.rust_style()
)
})
.collect::<Vec<_>>()
.join("\n");
format!(
"{}enum {} {{
{}
}}",
comments, self.ir.name, variants
)
}
}
}
| 39.216346 | 97 | 0.281353 |
7ab627d54e15d15c59cfc8558f5ffc599f2d06e5
| 915 |
/*
* Rustのスレッド(メッセージ送受信)
* CreatedAt: 2019-07-06
*/
use std::thread;
use std::sync::mpsc;
use std::time::Duration;
fn main() {
let (tx, rx) = mpsc::channel();
let tx1 = mpsc::Sender::clone(&tx);
thread::spawn(move || {
let vals = vec![
String::from("1"),
String::from("2"),
String::from("3"),
String::from("4"),
];
for val in vals {
tx1.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
thread::spawn(move || {
let vals = vec![
String::from("A"),
String::from("B"),
String::from("C"),
String::from("D"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
}
| 23.461538 | 50 | 0.446995 |
4b02726911d96a02b18b5ee8848cc6b6dae01915
| 1,049 |
/*
* scaledjobs.keda.sh
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: 1
*
* Generated by: https://openapi-generator.tech
*/
/// ScaledJobJobTargetRefTemplateSpecDnsConfigOptions : PodDNSConfigOption defines DNS resolver options of a pod.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default, JsonSchema)]
pub struct ScaledJobJobTargetRefTemplateSpecDnsConfigOptions {
/// Required.
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "value", skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
impl ScaledJobJobTargetRefTemplateSpecDnsConfigOptions {
/// PodDNSConfigOption defines DNS resolver options of a pod.
pub fn new() -> ScaledJobJobTargetRefTemplateSpecDnsConfigOptions {
ScaledJobJobTargetRefTemplateSpecDnsConfigOptions {
name: None,
value: None,
}
}
}
| 29.971429 | 113 | 0.722593 |
7aaaa2ebba7c4a2d097c01f95ee55df8563d7e9d
| 17,720 |
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use DOMDOMWindow;
use DOMDocument;
use DOMElement;
use DOMEventTarget;
use DOMHTMLElement;
use DOMNode;
use DOMObject;
use glib::GString;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::SignalHandlerId;
use glib::signal::connect_raw;
use glib::translate::*;
use glib_sys;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
use webkit2_webextension_sys;
glib_wrapper! {
pub struct DOMHTMLIFrameElement(Object<webkit2_webextension_sys::WebKitDOMHTMLIFrameElement, webkit2_webextension_sys::WebKitDOMHTMLIFrameElementClass, DOMHTMLIFrameElementClass>) @extends DOMHTMLElement, DOMElement, DOMNode, DOMObject, @implements DOMEventTarget;
match fn {
get_type => || webkit2_webextension_sys::webkit_dom_html_iframe_element_get_type(),
}
}
pub const NONE_DOMHTMLI_FRAME_ELEMENT: Option<&DOMHTMLIFrameElement> = None;
pub trait DOMHTMLIFrameElementExt: 'static {
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_align(&self) -> Option<GString>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_content_document(&self) -> Option<DOMDocument>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_content_window(&self) -> Option<DOMDOMWindow>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_frame_border(&self) -> Option<GString>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_height(&self) -> Option<GString>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_long_desc(&self) -> Option<GString>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_margin_height(&self) -> Option<GString>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_margin_width(&self) -> Option<GString>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_name(&self) -> Option<GString>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_scrolling(&self) -> Option<GString>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_src(&self) -> Option<GString>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_width(&self) -> Option<GString>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_align(&self, value: &str);
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_frame_border(&self, value: &str);
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_height(&self, value: &str);
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_long_desc(&self, value: &str);
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_margin_height(&self, value: &str);
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_margin_width(&self, value: &str);
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_name(&self, value: &str);
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_scrolling(&self, value: &str);
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_src(&self, value: &str);
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_width(&self, value: &str);
fn connect_property_align_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_content_document_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_content_window_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_frame_border_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_height_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_long_desc_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_margin_height_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_margin_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_name_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_scrolling_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_src_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<DOMHTMLIFrameElement>> DOMHTMLIFrameElementExt for O {
fn get_align(&self) -> Option<GString> {
unsafe {
from_glib_full(webkit2_webextension_sys::webkit_dom_html_iframe_element_get_align(self.as_ref().to_glib_none().0))
}
}
fn get_content_document(&self) -> Option<DOMDocument> {
unsafe {
from_glib_none(webkit2_webextension_sys::webkit_dom_html_iframe_element_get_content_document(self.as_ref().to_glib_none().0))
}
}
fn get_content_window(&self) -> Option<DOMDOMWindow> {
unsafe {
from_glib_full(webkit2_webextension_sys::webkit_dom_html_iframe_element_get_content_window(self.as_ref().to_glib_none().0))
}
}
fn get_frame_border(&self) -> Option<GString> {
unsafe {
from_glib_full(webkit2_webextension_sys::webkit_dom_html_iframe_element_get_frame_border(self.as_ref().to_glib_none().0))
}
}
fn get_height(&self) -> Option<GString> {
unsafe {
from_glib_full(webkit2_webextension_sys::webkit_dom_html_iframe_element_get_height(self.as_ref().to_glib_none().0))
}
}
fn get_long_desc(&self) -> Option<GString> {
unsafe {
from_glib_full(webkit2_webextension_sys::webkit_dom_html_iframe_element_get_long_desc(self.as_ref().to_glib_none().0))
}
}
fn get_margin_height(&self) -> Option<GString> {
unsafe {
from_glib_full(webkit2_webextension_sys::webkit_dom_html_iframe_element_get_margin_height(self.as_ref().to_glib_none().0))
}
}
fn get_margin_width(&self) -> Option<GString> {
unsafe {
from_glib_full(webkit2_webextension_sys::webkit_dom_html_iframe_element_get_margin_width(self.as_ref().to_glib_none().0))
}
}
fn get_name(&self) -> Option<GString> {
unsafe {
from_glib_full(webkit2_webextension_sys::webkit_dom_html_iframe_element_get_name(self.as_ref().to_glib_none().0))
}
}
fn get_scrolling(&self) -> Option<GString> {
unsafe {
from_glib_full(webkit2_webextension_sys::webkit_dom_html_iframe_element_get_scrolling(self.as_ref().to_glib_none().0))
}
}
fn get_src(&self) -> Option<GString> {
unsafe {
from_glib_full(webkit2_webextension_sys::webkit_dom_html_iframe_element_get_src(self.as_ref().to_glib_none().0))
}
}
fn get_width(&self) -> Option<GString> {
unsafe {
from_glib_full(webkit2_webextension_sys::webkit_dom_html_iframe_element_get_width(self.as_ref().to_glib_none().0))
}
}
fn set_align(&self, value: &str) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_iframe_element_set_align(self.as_ref().to_glib_none().0, value.to_glib_none().0);
}
}
fn set_frame_border(&self, value: &str) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_iframe_element_set_frame_border(self.as_ref().to_glib_none().0, value.to_glib_none().0);
}
}
fn set_height(&self, value: &str) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_iframe_element_set_height(self.as_ref().to_glib_none().0, value.to_glib_none().0);
}
}
fn set_long_desc(&self, value: &str) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_iframe_element_set_long_desc(self.as_ref().to_glib_none().0, value.to_glib_none().0);
}
}
fn set_margin_height(&self, value: &str) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_iframe_element_set_margin_height(self.as_ref().to_glib_none().0, value.to_glib_none().0);
}
}
fn set_margin_width(&self, value: &str) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_iframe_element_set_margin_width(self.as_ref().to_glib_none().0, value.to_glib_none().0);
}
}
fn set_name(&self, value: &str) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_iframe_element_set_name(self.as_ref().to_glib_none().0, value.to_glib_none().0);
}
}
fn set_scrolling(&self, value: &str) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_iframe_element_set_scrolling(self.as_ref().to_glib_none().0, value.to_glib_none().0);
}
}
fn set_src(&self, value: &str) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_iframe_element_set_src(self.as_ref().to_glib_none().0, value.to_glib_none().0);
}
}
fn set_width(&self, value: &str) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_iframe_element_set_width(self.as_ref().to_glib_none().0, value.to_glib_none().0);
}
}
fn connect_property_align_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::align\0".as_ptr() as *const _,
Some(transmute(notify_align_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_content_document_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::content-document\0".as_ptr() as *const _,
Some(transmute(notify_content_document_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_content_window_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::content-window\0".as_ptr() as *const _,
Some(transmute(notify_content_window_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_frame_border_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::frame-border\0".as_ptr() as *const _,
Some(transmute(notify_frame_border_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_height_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::height\0".as_ptr() as *const _,
Some(transmute(notify_height_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_long_desc_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::long-desc\0".as_ptr() as *const _,
Some(transmute(notify_long_desc_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_margin_height_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::margin-height\0".as_ptr() as *const _,
Some(transmute(notify_margin_height_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_margin_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::margin-width\0".as_ptr() as *const _,
Some(transmute(notify_margin_width_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_name_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::name\0".as_ptr() as *const _,
Some(transmute(notify_name_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_scrolling_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::scrolling\0".as_ptr() as *const _,
Some(transmute(notify_scrolling_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_src_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::src\0".as_ptr() as *const _,
Some(transmute(notify_src_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::width\0".as_ptr() as *const _,
Some(transmute(notify_width_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
}
unsafe extern "C" fn notify_align_trampoline<P, F: Fn(&P) + 'static>(this: *mut webkit2_webextension_sys::WebKitDOMHTMLIFrameElement, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMHTMLIFrameElement> {
let f: &F = &*(f as *const F);
f(&DOMHTMLIFrameElement::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_content_document_trampoline<P, F: Fn(&P) + 'static>(this: *mut webkit2_webextension_sys::WebKitDOMHTMLIFrameElement, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMHTMLIFrameElement> {
let f: &F = &*(f as *const F);
f(&DOMHTMLIFrameElement::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_content_window_trampoline<P, F: Fn(&P) + 'static>(this: *mut webkit2_webextension_sys::WebKitDOMHTMLIFrameElement, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMHTMLIFrameElement> {
let f: &F = &*(f as *const F);
f(&DOMHTMLIFrameElement::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_frame_border_trampoline<P, F: Fn(&P) + 'static>(this: *mut webkit2_webextension_sys::WebKitDOMHTMLIFrameElement, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMHTMLIFrameElement> {
let f: &F = &*(f as *const F);
f(&DOMHTMLIFrameElement::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_height_trampoline<P, F: Fn(&P) + 'static>(this: *mut webkit2_webextension_sys::WebKitDOMHTMLIFrameElement, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMHTMLIFrameElement> {
let f: &F = &*(f as *const F);
f(&DOMHTMLIFrameElement::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_long_desc_trampoline<P, F: Fn(&P) + 'static>(this: *mut webkit2_webextension_sys::WebKitDOMHTMLIFrameElement, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMHTMLIFrameElement> {
let f: &F = &*(f as *const F);
f(&DOMHTMLIFrameElement::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_margin_height_trampoline<P, F: Fn(&P) + 'static>(this: *mut webkit2_webextension_sys::WebKitDOMHTMLIFrameElement, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMHTMLIFrameElement> {
let f: &F = &*(f as *const F);
f(&DOMHTMLIFrameElement::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_margin_width_trampoline<P, F: Fn(&P) + 'static>(this: *mut webkit2_webextension_sys::WebKitDOMHTMLIFrameElement, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMHTMLIFrameElement> {
let f: &F = &*(f as *const F);
f(&DOMHTMLIFrameElement::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_name_trampoline<P, F: Fn(&P) + 'static>(this: *mut webkit2_webextension_sys::WebKitDOMHTMLIFrameElement, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMHTMLIFrameElement> {
let f: &F = &*(f as *const F);
f(&DOMHTMLIFrameElement::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_scrolling_trampoline<P, F: Fn(&P) + 'static>(this: *mut webkit2_webextension_sys::WebKitDOMHTMLIFrameElement, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMHTMLIFrameElement> {
let f: &F = &*(f as *const F);
f(&DOMHTMLIFrameElement::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_src_trampoline<P, F: Fn(&P) + 'static>(this: *mut webkit2_webextension_sys::WebKitDOMHTMLIFrameElement, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMHTMLIFrameElement> {
let f: &F = &*(f as *const F);
f(&DOMHTMLIFrameElement::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_width_trampoline<P, F: Fn(&P) + 'static>(this: *mut webkit2_webextension_sys::WebKitDOMHTMLIFrameElement, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMHTMLIFrameElement> {
let f: &F = &*(f as *const F);
f(&DOMHTMLIFrameElement::from_glib_borrow(this).unsafe_cast())
}
impl fmt::Display for DOMHTMLIFrameElement {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "DOMHTMLIFrameElement")
}
}
| 40.923788 | 268 | 0.665632 |
1e81eaa168f4c4d1d0e81eba90b259c45b982b4b
| 3,036 |
// bytecode file
use heap::Heap;
use list::List;
use opcode::{BitstrEndian, BitstrSign, BitstrType, BlockTag, Opcode};
use serde_json;
use serde_json::Result;
use std::fs::File;
use std::io::prelude::*;
use std::sync::Arc;
use value;
use value::{Bitstr, CompiledCode, Value};
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Bytecode {
pub version: usize,
pub module: String,
pub main: BcCompiledCode,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct BcCompiledCode {
pub name: Option<String>,
pub arity: usize,
pub consts: Vec<BcConst>,
pub opcodes: Vec<Opcode>,
pub locals: usize,
pub frame: usize,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum BcConst {
Atom(String),
Int(String),
String(String),
Function(BcCompiledCode),
Bitstr(u32, u64, BitstrType, BitstrSign, BitstrEndian, Option<u8>),
Block(BlockTag, Vec<BcConst>),
}
pub fn from_file(file: &str) -> Result<Bytecode> {
let mut file = File::open(file).expect("cannot open");
let mut buf = String::new();
file.read_to_string(&mut buf).unwrap();
serde_json::from_str(&buf)
}
impl BcConst {
pub fn to_value(&self, heap: &Heap) -> Value {
match self {
BcConst::Atom(value) => Value::Atom(Arc::new(value.clone())),
BcConst::Int(value) => Value::Int(value.parse().unwrap()),
BcConst::String(value) => Value::String(Arc::new(value.clone())),
BcConst::Function(value) => {
let code = value.to_value_code(heap);
Value::CompiledCode(Arc::new(code.clone()))
}
BcConst::Bitstr(size, value, _ty, _sign, _endian, _unit) => {
// TODO
// Bitstr.from_spec(size,value,ty,sign,endian,unit)
Value::Bitstr(Arc::new(Bitstr {
size: *size,
value: *value,
}))
}
BcConst::Block(BlockTag::Binary, vals) => Value::Binary(Arc::new(
vals.iter().map(|val| val.to_value(heap)).collect(),
)),
BcConst::Block(BlockTag::List, vals) => {
let elts = vals.iter().map(|val| val.to_value(heap)).collect();
Value::List(List::from_list(heap, elts).get_id(heap))
}
BcConst::Block(BlockTag::Tuple, vals) => Value::Tuple(Arc::new(
vals.iter().map(|val| val.to_value(heap)).collect(),
)),
_ => panic!("# not impl {:?}", self),
}
}
}
impl BcCompiledCode {
pub fn to_value_code(&self, heap: &Heap) -> value::CompiledCode {
let consts: Vec<Value> = self
.consts
.iter()
.map(|cons| BcConst::to_value(cons, heap))
.collect();
CompiledCode {
name: self.name.clone(),
arity: self.arity,
consts,
ops: self.opcodes.clone(),
locals: self.locals,
frame: self.frame,
}
}
}
| 30.979592 | 79 | 0.555336 |
0a56fd11a583a7b4ad9452132493950bf36d10cc
| 4,799 |
// Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_base::tokio;
use common_exception::Result;
use futures::TryStreamExt;
use pretty_assertions::assert_eq;
use crate::catalogs::Table;
use crate::catalogs::ToReadDataSourcePlan;
use crate::configs::Config;
use crate::datasources::database::system::ConfigsTable;
use crate::tests::try_create_context_with_config;
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_configs_table() -> Result<()> {
let config = Config::default();
let ctx = try_create_context_with_config(config)?;
ctx.get_settings().set_max_threads(8)?;
let table: Arc<dyn Table> = Arc::new(ConfigsTable::create(1));
let io_ctx = ctx.get_cluster_table_io_context()?;
let io_ctx = Arc::new(io_ctx);
let source_plan = table.read_plan(io_ctx.clone(), None)?;
let stream = table.read(io_ctx, &source_plan).await?;
let result = stream.try_collect::<Vec<_>>().await?;
let block = &result[0];
assert_eq!(block.num_columns(), 4);
assert_eq!(block.num_rows(), 30);
let expected = vec![
"+-----------------------------------+------------------+-------+-------------+",
"| name | value | group | description |",
"+-----------------------------------+------------------+-------+-------------+",
"| api_tls_server_cert | | query | |",
"| api_tls_server_key | | query | |",
"| api_tls_server_root_ca_cert | | query | |",
"| clickhouse_handler_host | 127.0.0.1 | query | |",
"| clickhouse_handler_port | 9000 | query | |",
"| cluster_id | | query | |",
"| flight_api_address | 127.0.0.1:9090 | query | |",
"| http_api_address | 127.0.0.1:8080 | query | |",
"| http_handler_host | 127.0.0.1 | query | |",
"| http_handler_port | 8000 | query | |",
"| log_dir | ./_logs | log | |",
"| log_level | INFO | log | |",
"| max_active_sessions | 256 | query | |",
"| meta_address | | meta | |",
"| meta_client_timeout_in_second | 10 | meta | |",
"| meta_embedded_dir | ./_meta_embedded | meta | |",
"| meta_password | | meta | |",
"| meta_username | root | meta | |",
"| metric_api_address | 127.0.0.1:7070 | query | |",
"| mysql_handler_host | 127.0.0.1 | query | |",
"| mysql_handler_port | 3307 | query | |",
"| num_cpus | 8 | query | |",
"| rpc_tls_meta_server_root_ca_cert | | meta | |",
"| rpc_tls_meta_service_domain_name | localhost | meta | |",
"| rpc_tls_query_server_root_ca_cert | | query | |",
"| rpc_tls_query_service_domain_name | localhost | query | |",
"| rpc_tls_server_cert | | query | |",
"| rpc_tls_server_key | | query | |",
"| tenant_id | | query | |",
"| wait_timeout_mills | 5000 | query | |",
"+-----------------------------------+------------------+-------+-------------+",
];
common_datablocks::assert_blocks_sorted_eq(expected, result.as_slice());
Ok(())
}
| 57.130952 | 89 | 0.435716 |
fee454a6b647c5cdf353586951a87db37648b5ed
| 14,527 |
use crate::store::Storer;
use oci_distribution::client::ImageData;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use async_trait::async_trait;
use log::debug;
use oci_distribution::Reference;
use tokio::sync::Mutex;
use tokio::sync::RwLock;
use super::client::Client;
use crate::store::LocalStore;
/// A module store that keeps modules cached on the file system
///
/// This type is generic over the type of client used
/// to fetch modules from a remote store. This client is expected
/// to be a [`Client`]
pub type FileStore<C> = LocalStore<FileStorer, C>;
impl<C: Client + Send> FileStore<C> {
/// Create a new `FileStore`
pub fn new<T: AsRef<Path>>(client: C, root_dir: T) -> Self {
Self {
storer: Arc::new(RwLock::new(FileStorer {
root_dir: root_dir.as_ref().into(),
})),
client: Arc::new(Mutex::new(client)),
}
}
}
pub struct FileStorer {
root_dir: PathBuf,
}
impl FileStorer {
/// Create a new `FileStorer`
pub fn new<T: AsRef<Path>>(root_dir: T) -> Self {
Self {
root_dir: root_dir.as_ref().into(),
}
}
fn pull_path(&self, r: &Reference) -> PathBuf {
let mut path = self.root_dir.join(r.registry());
path.push(r.repository());
path.push(r.tag().unwrap_or("latest"));
path
}
fn pull_file_path(&self, r: &Reference) -> PathBuf {
self.pull_path(r).join("module.wasm")
}
fn digest_file_path(&self, r: &Reference) -> PathBuf {
self.pull_path(r).join("digest.txt")
}
}
#[async_trait]
impl Storer for FileStorer {
async fn get_local(&self, image_ref: &Reference) -> anyhow::Result<Vec<u8>> {
let path = self.pull_file_path(image_ref);
if !path.exists() {
return Err(anyhow::anyhow!(
"Image ref {} not available locally",
image_ref
));
}
debug!("Fetching image ref '{:?}' from disk", image_ref);
Ok(tokio::fs::read(path).await?)
}
async fn store(&mut self, image_ref: &Reference, image_data: ImageData) -> anyhow::Result<()> {
tokio::fs::create_dir_all(self.pull_path(image_ref)).await?;
let digest_path = self.digest_file_path(image_ref);
// We delete the digest file before writing the image file, rather
// than simply overwriting the digest file after writing the image file.
// This addresses failure modes where, for example, the image file
// gets updated but the digest file write fails and the store ends
// up associating the wrong digest with the file on disk.
if digest_path.exists() {
tokio::fs::remove_file(&digest_path).await?;
}
let module_path = self.pull_file_path(image_ref);
tokio::fs::write(&module_path, image_data.content).await?;
if let Some(d) = image_data.digest {
tokio::fs::write(&digest_path, d).await?;
}
Ok(())
}
async fn is_present(&self, image_ref: &Reference) -> bool {
let path = self.pull_file_path(image_ref);
path.exists()
}
async fn is_present_with_digest(&self, image_ref: &Reference, digest: String) -> bool {
let path = self.digest_file_path(image_ref);
path.exists() && file_content_is(path, digest).await
}
}
impl<C: Client + Send> Clone for FileStore<C> {
fn clone(&self) -> Self {
Self {
storer: self.storer.clone(),
client: self.client.clone(),
}
}
}
async fn file_content_is(path: PathBuf, text: String) -> bool {
match tokio::fs::read(path).await {
Err(_) => false,
Ok(content) => {
let file_text = String::from_utf8_lossy(&content);
file_text == text
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::store::PullPolicy;
use crate::store::Store;
use oci_distribution::client::ImageData;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::sync::RwLock;
#[tokio::test]
async fn can_parse_pull_policies() {
assert_eq!(None, PullPolicy::parse(None).unwrap());
assert_eq!(
PullPolicy::Always,
PullPolicy::parse(Some("Always".to_owned()))
.unwrap()
.unwrap()
);
assert_eq!(
PullPolicy::IfNotPresent,
PullPolicy::parse(Some("IfNotPresent".to_owned()))
.unwrap()
.unwrap()
);
assert_eq!(
PullPolicy::Never,
PullPolicy::parse(Some("Never".to_owned()))
.unwrap()
.unwrap()
);
assert!(
PullPolicy::parse(Some("IfMoonMadeOfGreenCheese".to_owned())).is_err(),
"Expected parse failure but didn't get one"
);
}
#[derive(Clone)]
struct FakeImageClient {
images: Arc<RwLock<HashMap<String, ImageData>>>,
}
impl FakeImageClient {
fn new(entries: Vec<(&'static str, Vec<u8>, &'static str)>) -> Self {
let client = FakeImageClient {
images: Default::default(),
};
for (name, content, digest) in entries {
let mut images = client
.images
.write()
.expect("should be able to write to images");
images.insert(
name.to_owned(),
ImageData {
content,
digest: Some(digest.to_owned()),
},
);
}
client
}
fn update(&mut self, key: &str, content: Vec<u8>, digest: &str) -> () {
let mut images = self
.images
.write()
.expect("should be able to write to images");
images.insert(
key.to_owned(),
ImageData {
content,
digest: Some(digest.to_owned()),
},
);
}
}
#[async_trait]
impl Client for FakeImageClient {
async fn pull(&mut self, image_ref: &Reference) -> anyhow::Result<ImageData> {
let images = self
.images
.read()
.expect("should be able to read from images");
match images.get(image_ref.whole()) {
Some(v) => Ok(v.clone()),
None => Err(anyhow::anyhow!("error pulling module")),
}
}
}
struct TemporaryDirectory {
path: PathBuf,
}
impl Drop for TemporaryDirectory {
fn drop(&mut self) -> () {
std::fs::remove_dir_all(&self.path).expect("Failed to remove temp directory");
}
}
fn create_temp_dir() -> TemporaryDirectory {
let os_temp_dir = std::env::temp_dir();
let subdirectory = PathBuf::from(format!("krustlet-fms-tests-{}", uuid::Uuid::new_v4()));
let path = os_temp_dir.join(subdirectory);
std::fs::create_dir(&path).expect("Failed to create temp directory");
TemporaryDirectory { path }
}
#[tokio::test]
async fn file_module_store_can_pull_if_policy_if_not_present() -> anyhow::Result<()> {
let fake_client = FakeImageClient::new(vec![("foo/bar:1.0", vec![1, 2, 3], "sha256:123")]);
let fake_ref = Reference::try_from("foo/bar:1.0")?;
let scratch_dir = create_temp_dir();
let store = FileStore::new(fake_client, &scratch_dir.path);
let module_bytes = store.get(&fake_ref, Some(PullPolicy::IfNotPresent)).await?;
assert_eq!(3, module_bytes.len());
assert_eq!(2, module_bytes[1]);
Ok(())
}
#[tokio::test]
async fn file_module_store_can_pull_if_policy_always() -> anyhow::Result<()> {
let fake_client = FakeImageClient::new(vec![("foo/bar:1.0", vec![1, 2, 3], "sha256:123")]);
let fake_ref = Reference::try_from("foo/bar:1.0")?;
let scratch_dir = create_temp_dir();
let store = FileStore::new(fake_client, &scratch_dir.path);
let module_bytes = store.get(&fake_ref, Some(PullPolicy::Always)).await?;
assert_eq!(3, module_bytes.len());
assert_eq!(2, module_bytes[1]);
Ok(())
}
#[tokio::test]
async fn file_module_store_does_not_pull_if_policy_never() -> anyhow::Result<()> {
let fake_client = FakeImageClient::new(vec![("foo/bar:1.0", vec![1, 2, 3], "sha256:123")]);
let fake_ref = Reference::try_from("foo/bar:1.0")?;
let scratch_dir = create_temp_dir();
let store = FileStore::new(fake_client, &scratch_dir.path);
let module_bytes = store.get(&fake_ref, Some(PullPolicy::Never)).await;
assert!(
module_bytes.is_err(),
"expected get with pull policy Never to fail but it worked"
);
Ok(())
}
#[tokio::test]
async fn file_module_store_can_reuse_cached_if_policy_never() -> anyhow::Result<()> {
let fake_client = FakeImageClient::new(vec![("foo/bar:1.0", vec![1, 2, 3], "sha256:123")]);
let fake_ref = Reference::try_from("foo/bar:1.0")?;
let scratch_dir = create_temp_dir();
let store = FileStore::new(fake_client, &scratch_dir.path);
let prime_cache = store.get(&fake_ref, Some(PullPolicy::Always)).await;
assert!(prime_cache.is_ok());
let module_bytes = store.get(&fake_ref, Some(PullPolicy::Never)).await?;
assert_eq!(3, module_bytes.len());
assert_eq!(2, module_bytes[1]);
Ok(())
}
#[tokio::test]
async fn file_module_store_ignores_updates_if_policy_if_not_present() -> anyhow::Result<()> {
let mut fake_client =
FakeImageClient::new(vec![("foo/bar:1.0", vec![1, 2, 3], "sha256:123")]);
let fake_ref = Reference::try_from("foo/bar:1.0")?;
let scratch_dir = create_temp_dir();
let store = FileStore::new(fake_client.clone(), &scratch_dir.path);
let module_bytes_orig = store.get(&fake_ref, Some(PullPolicy::IfNotPresent)).await?;
assert_eq!(3, module_bytes_orig.len());
assert_eq!(2, module_bytes_orig[1]);
fake_client.update("foo/bar:1.0", vec![4, 5, 6, 7], "sha256:4567");
let module_bytes_after = store.get(&fake_ref, Some(PullPolicy::IfNotPresent)).await?;
assert_eq!(3, module_bytes_after.len());
assert_eq!(2, module_bytes_after[1]);
Ok(())
}
#[tokio::test]
async fn file_module_store_gets_updates_if_policy_always() -> anyhow::Result<()> {
let mut fake_client =
FakeImageClient::new(vec![("foo/bar:1.0", vec![1, 2, 3], "sha256:123")]);
let fake_ref = Reference::try_from("foo/bar:1.0")?;
let scratch_dir = create_temp_dir();
let store = FileStore::new(fake_client.clone(), &scratch_dir.path);
let module_bytes_orig = store.get(&fake_ref, Some(PullPolicy::IfNotPresent)).await?;
assert_eq!(3, module_bytes_orig.len());
assert_eq!(2, module_bytes_orig[1]);
fake_client.update("foo/bar:1.0", vec![4, 5, 6, 7], "sha256:4567");
let module_bytes_after = store.get(&fake_ref, Some(PullPolicy::Always)).await?;
assert_eq!(4, module_bytes_after.len());
assert_eq!(5, module_bytes_after[1]);
Ok(())
}
#[tokio::test]
async fn file_module_store_copes_with_no_tag() -> anyhow::Result<()> {
let fake_client = FakeImageClient::new(vec![("foo/bar", vec![2, 3], "sha256:23")]);
let fake_ref = Reference::try_from("foo/bar")?;
let scratch_dir = create_temp_dir();
let store = FileStore::new(fake_client, &scratch_dir.path);
let module_bytes = store.get(&fake_ref, Some(PullPolicy::Always)).await?;
assert_eq!(2, module_bytes.len());
assert_eq!(3, module_bytes[1]);
Ok(())
}
#[tokio::test]
async fn file_module_store_can_pull_if_tag_given_but_policy_omitted() -> anyhow::Result<()> {
let mut fake_client =
FakeImageClient::new(vec![("foo/bar:2.0", vec![6, 7, 8], "sha256:678")]);
let fake_ref = Reference::try_from("foo/bar:2.0")?;
let scratch_dir = create_temp_dir();
let store = FileStore::new(fake_client.clone(), &scratch_dir.path);
let module_bytes_orig = store.get(&fake_ref, None).await?;
assert_eq!(3, module_bytes_orig.len());
assert_eq!(7, module_bytes_orig[1]);
fake_client.update("foo/bar:2.0", vec![8, 9], "sha256:89");
// But with no policy it should *not* re-fetch a tag that's in cache
let module_bytes_after = store.get(&fake_ref, None).await?;
assert_eq!(3, module_bytes_after.len());
assert_eq!(7, module_bytes_after[1]);
Ok(())
}
#[tokio::test]
async fn file_module_store_always_pulls_if_tag_latest_and_policy_omitted() -> anyhow::Result<()>
{
let mut fake_client =
FakeImageClient::new(vec![("foo/bar:latest", vec![3, 4], "sha256:34")]);
let fake_ref = Reference::try_from("foo/bar:latest")?;
let scratch_dir = create_temp_dir();
let store = FileStore::new(fake_client.clone(), &scratch_dir.path);
let module_bytes_orig = store.get(&fake_ref, None).await?;
assert_eq!(2, module_bytes_orig.len());
assert_eq!(4, module_bytes_orig[1]);
fake_client.update("foo/bar:latest", vec![5, 6, 7], "sha256:567");
let module_bytes_after = store.get(&fake_ref, None).await?;
assert_eq!(3, module_bytes_after.len());
assert_eq!(6, module_bytes_after[1]);
Ok(())
}
#[tokio::test]
async fn file_module_store_always_pulls_if_tag_and_policy_omitted() -> anyhow::Result<()> {
let mut fake_client = FakeImageClient::new(vec![("foo/bar", vec![3, 4], "sha256:34")]);
let fake_ref = Reference::try_from("foo/bar")?;
let scratch_dir = create_temp_dir();
let store = FileStore::new(fake_client.clone(), &scratch_dir.path);
let module_bytes_orig = store.get(&fake_ref, None).await?;
assert_eq!(2, module_bytes_orig.len());
assert_eq!(4, module_bytes_orig[1]);
fake_client.update("foo/bar", vec![5, 6, 7], "sha256:567");
let module_bytes_after = store.get(&fake_ref, None).await?;
assert_eq!(3, module_bytes_after.len());
assert_eq!(6, module_bytes_after[1]);
Ok(())
}
}
| 37.830729 | 100 | 0.583878 |
f4bc36274f14fa77c8edb2dfebf8179781cb3815
| 8,438 |
use std::sync::Arc;
use k8s_openapi::api::{apps::v1::Deployment, core::v1::Pod};
use kube::{
api::{Api, DeleteParams, ListParams, PostParams},
Client,
};
use serde_json::json;
use crate::core::runtime::ClusterDescriptor;
use crate::core::{
cluster::TaskResourceInfo,
env::{StreamApp, StreamExecutionEnvironment},
};
use crate::deployment::TResourceManager;
use crate::runtime::context::Context;
use crate::runtime::ClusterDescriptor;
use crate::utils::thread::async_runtime_single;
#[derive(Clone)]
pub(crate) struct KubernetesResourceManager {
context: Arc<Context>,
cluster_descriptor: Option<ClusterDescriptor>,
}
impl KubernetesResourceManager {
pub fn new(context: Arc<Context>) -> Self {
KubernetesResourceManager {
context,
cluster_descriptor: None,
}
}
}
impl TResourceManager for KubernetesResourceManager {
fn prepare(&mut self, _context: &Context, job_descriptor: &ClusterDescriptor) {
self.cluster_descriptor = Some(job_descriptor.clone());
}
fn worker_allocate<S>(
&self,
_stream_app_clone: &S,
_stream_env: &StreamExecutionEnvironment,
) -> anyhow::Result<Vec<TaskResourceInfo>>
where
S: StreamApp + 'static,
{
let cluster_descriptor = self.cluster_descriptor.as_ref().unwrap();
let coordinator_manager = &cluster_descriptor.coordinator_manager;
let mut task_infos = Vec::new();
let namespace = "default";
let image_path = &self.context.image_path;
let limits = &ContainerLimits {
cpu: coordinator_manager.v_cores as usize,
memory: format!("{}Mi", coordinator_manager.memory_mb),
};
let application_id = coordinator_manager.application_id.as_str();
let rt = tokio::runtime::Runtime::new()?;
let job_deploy_id =
rt.block_on(async { get_job_deploy_id(namespace, application_id).await.unwrap() });
let coordinator_address = coordinator_manager.coordinator_address.as_str();
for task_manager_descriptor in &cluster_descriptor.worker_managers {
let task_manager_id = task_manager_descriptor.task_manager_id.clone();
let task_manager_name = format!(
"{}-{}",
application_id,
parse_name(task_manager_id.as_str())
);
rt.block_on(async {
match allocate_worker(
coordinator_address,
task_manager_id.as_str(),
task_manager_name.as_str(),
application_id,
namespace,
job_deploy_id.as_str(),
image_path,
limits,
)
.await
{
Ok(o) => {
let pod_uid = o.clone();
let mut task_info =
TaskResourceInfo::new(pod_uid, String::new(), task_manager_id.clone());
task_info
.resource_info
.insert("task_manager_name".to_string(), task_manager_name);
task_infos.push(task_info);
info!(
"worker id :{}, task_manager_id {} allocate success",
task_manager_id.clone(),
o.clone()
);
}
_ => {
error!("worker {} allocate failed", task_manager_id)
}
}
});
}
Ok(task_infos)
}
fn stop_workers(&self, task_ids: Vec<TaskResourceInfo>) -> anyhow::Result<()> {
let mut tasks: Vec<String> = Vec::new();
for task in task_ids {
if let Some(task_id) = task.task_id() {
tasks.push(format!("uid={}", task_id));
}
tasks.push(format!("name={}", task.resource_info["task_manager_name"]));
}
let namespace = "default";
return async_runtime_single().block_on(async { stop_worker(namespace, tasks).await });
}
}
#[derive(Clone, Debug)]
struct ContainerLimits {
cpu: usize,
memory: String,
}
async fn allocate_worker(
coordinator_address: &str,
task_manager_id: &str,
task_manager_name: &str,
cluster_name: &str,
namespace: &str,
job_deploy_id: &str,
image_path: &str,
limits: &ContainerLimits,
) -> anyhow::Result<String> {
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::namespaced(client, namespace);
let p: Pod = serde_json::from_value(json!(
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": task_manager_name,
"labels":{
"app":"rlink",
"commpent":"jobmanager",
"type":"rlinl-on-k8s"
},
"ownerReferences":[{
"kind":"Deployment",
"apiVersion": "apps/v1",
"name":cluster_name,
"uid":job_deploy_id,
"controller": true,
"blockOwnerDeletion": true
}]
},
"spec": {
"containers": [
{
"name":task_manager_name,
"image": image_path,
"limits":{
"cpu":limits.cpu,
"memory":limits.memory
},
"args":[
"cluster_mode=kubernetes",
"manager_type=Worker",
format!("application_id={}",cluster_name),
format!("task_manager_id={}",task_manager_id),
format!("coordinator_address={}",coordinator_address),
]
}
],
"restartPolicy":"OnFailure"
}
}
))?;
let pp = PostParams::default();
let mut uid = String::new();
match pods.create(&pp, &p).await {
Ok(pod) => {
info!("create worker({})pod success", task_manager_name);
// uid = Meta::meta(&pod).uid.clone().expect("kind has metadata.uid");
uid = pod.metadata.uid.expect("kind has metadata.uid").to_string();
// wait for it..
}
Err(kube::Error::Api(ae)) => {
error!("{:?}", ae);
assert_eq!(ae.code, 409)
} // if you skipped delete, for instance
Err(e) => return Err(e.into()), // any other case is probably bad
}
Ok(uid)
}
async fn stop_worker(namespace: &str, task_ids: Vec<String>) -> anyhow::Result<()> {
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::namespaced(client, namespace);
let dp = DeleteParams::default();
let mut lp = ListParams::default();
for task_id in task_ids {
lp = lp.fields(task_id.as_str());
}
match pods.delete_collection(&dp, &lp).await {
Ok(_o) => info!("stop worker success"),
Err(e) => error!("stop worker failed. {:?}", e),
};
Ok(())
}
async fn get_job_deploy_id(namespace: &str, cluster_name: &str) -> anyhow::Result<String> {
info!(
"get application {} deploy id on namespace :{}",
cluster_name, namespace
);
let client = Client::try_default().await?;
let deployment: Api<Deployment> = Api::namespaced(client, namespace);
let mut uid = String::new();
match deployment.get(cluster_name).await {
Ok(d) => {
if let Some(id) = d.metadata.uid {
info!(
"get application {} deploy id on namespace {} success:{}",
cluster_name, namespace, id
);
uid = id;
}
}
_ => {}
}
Ok(uid)
}
fn parse_name(name: &str) -> String {
return name.replace("_", "-");
}
| 34.72428 | 100 | 0.498815 |
e281b73a68d2490ad9855dbb80bfc120bc272710
| 362 |
#[doc = "Reader of register STATUS"]
pub type R = crate::R<u32, super::STATUS>;
#[doc = "Reader of field `STATUS`"]
pub type STATUS_R = crate::R<u8, u8>;
impl R {
#[doc = "Bits 0:3 - The IRK that was used last time an address was resolved"]
#[inline(always)]
pub fn status(&self) -> STATUS_R {
STATUS_R::new((self.bits & 0x0f) as u8)
}
}
| 30.166667 | 81 | 0.610497 |
f5b86710b3c1a8c7d93abc8b6af7aae757c40e9e
| 3,281 |
use crate::{
event::{self, Event},
internal_events::{UdpEventReceived, UdpSocketError},
shutdown::ShutdownSignal,
sources::Source,
Pipeline,
};
use bytes05::BytesMut;
use codec::BytesDelimitedCodec;
use futures::{compat::Future01CompatExt, FutureExt, TryFutureExt};
use futures01::Sink;
use serde::{Deserialize, Serialize};
use std::net::SocketAddr;
use string_cache::DefaultAtom as Atom;
use tokio::net::UdpSocket;
use tokio_util::codec::Decoder;
/// UDP processes messages per packet, where messages are separated by newline.
#[derive(Deserialize, Serialize, Debug, Clone)]
#[serde(deny_unknown_fields)]
pub struct UdpConfig {
pub address: SocketAddr,
#[serde(default = "default_max_length")]
pub max_length: usize,
pub host_key: Option<Atom>,
}
fn default_max_length() -> usize {
bytesize::kib(100u64) as usize
}
impl UdpConfig {
pub fn new(address: SocketAddr) -> Self {
Self {
address,
max_length: default_max_length(),
host_key: None,
}
}
}
pub fn udp(
address: SocketAddr,
max_length: usize,
host_key: Atom,
shutdown: ShutdownSignal,
out: Pipeline,
) -> Source {
let mut out = out.sink_map_err(|e| error!("error sending event: {:?}", e));
Box::new(
async move {
let mut socket = UdpSocket::bind(&address)
.await
.expect("failed to bind to udp listener socket");
info!(message = "listening.", %address);
let mut shutdown = shutdown.compat();
let mut buf = BytesMut::with_capacity(max_length);
loop {
buf.resize(max_length, 0);
tokio::select! {
recv = socket.recv_from(&mut buf) => {
let (byte_size, address) = recv.map_err(|error| {
emit!(UdpSocketError { error });
})?;
let mut payload = buf.split_to(byte_size);
// UDP processes messages per payload, where messages are separated by newline
// and stretch to end of payload.
let mut decoder = BytesDelimitedCodec::new(b'\n');
while let Ok(Some(line)) = decoder.decode_eof(&mut payload) {
let mut event = Event::from(line);
event
.as_mut_log()
.insert(event::log_schema().source_type_key(), "socket");
event
.as_mut_log()
.insert(host_key.clone(), address.to_string());
emit!(UdpEventReceived { byte_size });
tokio::select!{
result = out.send(event).compat() => {
out = result?;
}
_ = &mut shutdown => return Ok(()),
}
}
}
_ = &mut shutdown => return Ok(()),
}
}
}
.boxed()
.compat(),
)
}
| 32.485149 | 102 | 0.493752 |
2392cdd4fe7c44bb88c9d29821692b554f1c0c7b
| 2,275 |
use crate::background_thread::BackgroundThread;
use crate::Event;
use crate::Image;
use super::WindowInner;
/// A event handler.
pub type EventHandler = Box<dyn FnMut(&mut EventHandlerContext) + Send>;
/// The context for a registered event handler.
pub struct EventHandlerContext<'a> {
/// The vector to add spawned tasks too.
background_tasks: &'a mut Vec<BackgroundThread<()>>,
/// Flag to indicate if the event should be passed to other handlers.
stop_propagation: bool,
/// Flag to indicate the handler should be removed.
remove_handler: bool,
/// The event to be handled.
event: &'a Event,
/// The window that triggered the event.
window: &'a mut WindowInner,
}
impl<'a> EventHandlerContext<'a> {
pub(crate) fn new(
background_tasks: &'a mut Vec<BackgroundThread<()>>,
event: &'a Event,
window: &'a mut WindowInner,
) -> Self {
Self {
background_tasks,
stop_propagation: false,
remove_handler: false,
event,
window,
}
}
/// Stop propagation of the event to other handlers.
pub fn stop_propagation(&mut self) {
self.stop_propagation = true;
}
/// Check if we should stop propagation of the event.
pub(crate) fn should_stop_propagation(&self) -> bool {
self.stop_propagation
}
/// Remove the event handler after it returns.
pub fn remove_handler(&mut self) {
self.remove_handler = true;
}
/// Check if we should remove the event handler after it returns.
pub(crate) fn should_remove_handler(&self) -> bool {
self.stop_propagation
}
/// Get the event.
pub fn event(&self) -> &'a Event {
self.event
}
/// Get the currently displayed image for the window.
pub fn image(&self) -> Option<&Image> {
self.window.image()
}
/// Get the window that triggered the event.
pub fn window<'b>(&'b self) -> &'b WindowInner {
self.window
}
/// Get the window that triggered the event.
pub fn window_mut<'b>(&'b mut self) -> &'b mut WindowInner {
self.window
}
/// Spawn a background task.
///
/// The task will run in a new thread.
/// The thread will be joined when [`crate::stop`] is called.
/// If this is not desired, simply spawn a thread manually.
pub fn spawn_task<F: FnOnce() + Send + 'static>(&mut self, task: F) {
self.background_tasks.push(BackgroundThread::new(task));
}
}
| 25 | 72 | 0.688352 |
c1e1d9645d10c86d1f9ee7f70a53044ab54e4d59
| 533 |
// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(tool_lints)]
#![deny(clippy::all)]
#![allow(unused_imports)]
use std::*;
fn main() { }
| 28.052632 | 69 | 0.72045 |
f5f9b465cae88d201c49a77a49dbdb8a0540d6d4
| 10,906 |
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use super::encoded::RawEncodeSnapshot;
use super::raw_mvcc::RawMvccSnapshot;
use crate::storage::kv::Result;
use crate::storage::kv::{Cursor, ScanMode, Snapshot};
use crate::storage::Statistics;
use api_version::{APIV1TTL, APIV2};
use engine_traits::{CfName, IterOptions, DATA_KEY_PREFIX_LEN};
use kvproto::kvrpcpb::{ApiVersion, KeyRange};
use std::time::Duration;
use tikv_util::time::Instant;
use txn_types::{Key, KvPair};
use yatp::task::future::reschedule;
const MAX_TIME_SLICE: Duration = Duration::from_millis(2);
const MAX_BATCH_SIZE: usize = 1024;
// TODO: refactor to utilize generic type `APIVersion` and eliminate matching `api_version`.
pub enum RawStore<S: Snapshot> {
V1(RawStoreInner<S>),
V1TTL(RawStoreInner<RawEncodeSnapshot<S, APIV1TTL>>),
V2(RawStoreInner<RawEncodeSnapshot<RawMvccSnapshot<S>, APIV2>>),
}
impl<'a, S: Snapshot> RawStore<S> {
pub fn new(snapshot: S, api_version: ApiVersion) -> Self {
match api_version {
ApiVersion::V1 => RawStore::V1(RawStoreInner::new(snapshot)),
ApiVersion::V1ttl => RawStore::V1TTL(RawStoreInner::new(
RawEncodeSnapshot::from_snapshot(snapshot),
)),
ApiVersion::V2 => RawStore::V2(RawStoreInner::new(RawEncodeSnapshot::from_snapshot(
RawMvccSnapshot::from_snapshot(snapshot),
))),
}
}
pub fn raw_get_key_value(
&self,
cf: CfName,
key: &Key,
stats: &mut Statistics,
) -> Result<Option<Vec<u8>>> {
match self {
RawStore::V1(inner) => inner.raw_get_key_value(cf, key, stats),
RawStore::V1TTL(inner) => inner.raw_get_key_value(cf, key, stats),
RawStore::V2(inner) => inner.raw_get_key_value(cf, key, stats),
}
}
pub fn raw_get_key_ttl(
&self,
cf: CfName,
key: &'a Key,
stats: &'a mut Statistics,
) -> Result<Option<u64>> {
match self {
RawStore::V1(_) => panic!("get ttl on non-ttl store"),
RawStore::V1TTL(inner) => inner.snapshot.get_key_ttl_cf(cf, key, stats),
RawStore::V2(inner) => inner.snapshot.get_key_ttl_cf(cf, key, stats),
}
}
pub async fn forward_raw_scan(
&'a self,
cf: CfName,
start_key: &'a Key,
end_key: Option<&'a Key>,
limit: usize,
statistics: &'a mut Statistics,
key_only: bool,
) -> Result<Vec<Result<KvPair>>> {
let mut option = IterOptions::default();
if let Some(end) = end_key {
option.set_upper_bound(end.as_encoded(), DATA_KEY_PREFIX_LEN);
}
match self {
RawStore::V1(inner) => {
if key_only {
option.set_key_only(key_only);
}
inner
.forward_raw_scan(cf, start_key, limit, statistics, option, key_only)
.await
}
RawStore::V1TTL(inner) => {
inner
.forward_raw_scan(cf, start_key, limit, statistics, option, key_only)
.await
}
RawStore::V2(inner) => {
inner
.forward_raw_scan(cf, start_key, limit, statistics, option, key_only)
.await
}
}
}
pub async fn reverse_raw_scan(
&'a self,
cf: CfName,
start_key: &'a Key,
end_key: Option<&'a Key>,
limit: usize,
statistics: &'a mut Statistics,
key_only: bool,
) -> Result<Vec<Result<KvPair>>> {
let mut option = IterOptions::default();
if let Some(end) = end_key {
option.set_lower_bound(end.as_encoded(), DATA_KEY_PREFIX_LEN);
}
match self {
RawStore::V1(inner) => {
if key_only {
option.set_key_only(key_only);
}
inner
.reverse_raw_scan(cf, start_key, limit, statistics, option, key_only)
.await
}
RawStore::V1TTL(inner) => {
inner
.reverse_raw_scan(cf, start_key, limit, statistics, option, key_only)
.await
}
RawStore::V2(inner) => {
inner
.reverse_raw_scan(cf, start_key, limit, statistics, option, key_only)
.await
}
}
}
pub async fn raw_checksum_ranges(
&'a self,
cf: CfName,
ranges: &[KeyRange],
statistics: &'a mut Vec<Statistics>,
) -> Result<(u64, u64, u64)> {
match self {
RawStore::V1(inner) => inner.raw_checksum_ranges(cf, ranges, statistics).await,
RawStore::V1TTL(inner) => inner.raw_checksum_ranges(cf, ranges, statistics).await,
RawStore::V2(inner) => inner.raw_checksum_ranges(cf, ranges, statistics).await,
}
}
}
pub struct RawStoreInner<S: Snapshot> {
snapshot: S,
}
impl<'a, S: Snapshot> RawStoreInner<S> {
pub fn new(snapshot: S) -> Self {
RawStoreInner { snapshot }
}
pub fn raw_get_key_value(
&self,
cf: CfName,
key: &Key,
stats: &mut Statistics,
) -> Result<Option<Vec<u8>>> {
// no scan_count for this kind of op.
let key_len = key.as_encoded().len();
self.snapshot.get_cf(cf, key).map(|value| {
stats.data.flow_stats.read_keys = 1;
stats.data.flow_stats.read_bytes =
key_len + value.as_ref().map(|v| v.len()).unwrap_or(0);
value
})
}
/// Scan raw keys in [`start_key`, `end_key`), returns at most `limit` keys. If `end_key` is
/// `None`, it means unbounded.
///
/// If `key_only` is true, the value corresponding to the key will not be read. Only scanned
/// keys will be returned.
pub async fn forward_raw_scan(
&'a self,
cf: CfName,
start_key: &'a Key,
limit: usize,
statistics: &'a mut Statistics,
option: IterOptions,
key_only: bool,
) -> Result<Vec<Result<KvPair>>> {
if limit == 0 {
return Ok(vec![]);
}
let mut cursor = Cursor::new(self.snapshot.iter_cf(cf, option)?, ScanMode::Forward, false);
let statistics = statistics.mut_cf_statistics(cf);
if !cursor.seek(start_key, statistics)? {
return Ok(vec![]);
}
let mut pairs = vec![];
let mut row_count = 0;
let mut time_slice_start = Instant::now();
while cursor.valid()? {
row_count += 1;
if row_count >= MAX_BATCH_SIZE {
if time_slice_start.saturating_elapsed() > MAX_TIME_SLICE {
reschedule().await;
time_slice_start = Instant::now();
}
row_count = 0;
}
pairs.push(Ok((
cursor.key(statistics).to_owned(),
if key_only {
vec![]
} else {
cursor.value(statistics).to_owned()
},
)));
if pairs.len() < limit {
cursor.next(statistics);
} else {
break;
}
}
Ok(pairs)
}
/// Scan raw keys in [`end_key`, `start_key`) in reverse order, returns at most `limit` keys. If
/// `start_key` is `None`, it means it's unbounded.
///
/// If `key_only` is true, the value
/// corresponding to the key will not be read out. Only scanned keys will be returned.
pub async fn reverse_raw_scan(
&'a self,
cf: CfName,
start_key: &'a Key,
limit: usize,
statistics: &'a mut Statistics,
option: IterOptions,
key_only: bool,
) -> Result<Vec<Result<KvPair>>> {
if limit == 0 {
return Ok(vec![]);
}
let mut cursor = Cursor::new(
self.snapshot.iter_cf(cf, option)?,
ScanMode::Backward,
false,
);
let statistics = statistics.mut_cf_statistics(cf);
if !cursor.reverse_seek(start_key, statistics)? {
return Ok(vec![]);
}
let mut pairs = vec![];
let mut row_count = 0;
let mut time_slice_start = Instant::now();
while cursor.valid()? {
row_count += 1;
if row_count >= MAX_BATCH_SIZE {
if time_slice_start.saturating_elapsed() > MAX_TIME_SLICE {
reschedule().await;
time_slice_start = Instant::now();
}
row_count = 0;
}
pairs.push(Ok((
cursor.key(statistics).to_owned(),
if key_only {
vec![]
} else {
cursor.value(statistics).to_owned()
},
)));
if pairs.len() < limit {
cursor.prev(statistics);
} else {
break;
}
}
Ok(pairs)
}
pub async fn raw_checksum_ranges(
&'a self,
cf: CfName,
ranges: &[KeyRange],
statistics: &'a mut Vec<Statistics>,
) -> Result<(u64, u64, u64)> {
let mut total_bytes = 0;
let mut total_kvs = 0;
let mut digest = crc64fast::Digest::new();
let mut row_count = 0;
let mut time_slice_start = Instant::now();
for r in ranges {
let mut stats = Statistics::default();
let cf_stats = stats.mut_cf_statistics(cf);
let mut opts = IterOptions::new(None, None, false);
opts.set_upper_bound(r.get_end_key(), DATA_KEY_PREFIX_LEN);
let mut cursor =
Cursor::new(self.snapshot.iter_cf(cf, opts)?, ScanMode::Forward, false);
cursor.seek(&Key::from_encoded(r.get_start_key().to_vec()), cf_stats)?;
while cursor.valid()? {
row_count += 1;
if row_count >= MAX_BATCH_SIZE {
if time_slice_start.saturating_elapsed() > MAX_TIME_SLICE {
reschedule().await;
time_slice_start = Instant::now();
}
row_count = 0;
}
let k = cursor.key(cf_stats);
let v = cursor.value(cf_stats);
digest.write(k);
digest.write(v);
total_kvs += 1;
total_bytes += k.len() + v.len();
cursor.next(cf_stats);
}
statistics.push(stats);
}
Ok((digest.sum64(), total_kvs, total_bytes as u64))
}
}
| 33.764706 | 100 | 0.520172 |
4a63e05cd05fa6b6c7638fc913acf5946315ca4c
| 9,728 |
use prelude::*;
use densearray::prelude::*;
use rng::xorshift::{Xorshiftplus128Rng};
use rand::{Rng};
use std::cell::{RefCell};
use std::cmp::{min};
use std::marker::{PhantomData};
use std::num::{Zero};
use std::rc::{Rc};
pub trait GradUpdate<T, Loss, S, IoBuf: ?Sized> where T: Copy, Loss: DiffLoss<S, IoBuf> {
type Cfg: Clone;
fn initialize(cfg: Self::Cfg, loss: &mut Loss) -> Self where Self: Sized { unimplemented!(); }
fn reset(&mut self, loss: &mut Loss, rng: &mut Xorshiftplus128Rng) { unimplemented!(); }
fn begin_iteration(&mut self, loss: &mut Loss);
fn end_iteration(&mut self, minibatch_sz: usize, loss: &mut Loss);
fn step(&mut self, iter_count: usize, loss: &mut Loss);
fn sync(&mut self) {}
//fn pre_step(&mut self, loss: &mut Loss);
//fn accumulate(&mut self, minibatch_sz: usize, loss: &mut Loss) { unimplemented!(); }
//fn step(&mut self, minibatch_sz: usize, iter_count: usize, loss: &mut Loss);
// FIXME(20161120): no point to saving/loading the temporary parameter.
fn upload_param(&mut self, loss: &mut Loss) { unimplemented!(); }
fn download_param(&mut self, loss: &mut Loss) { unimplemented!(); }
fn load_param(&mut self, src_param: &mut [T]) { unimplemented!(); }
fn save_param(&mut self, dst_param: &mut [T]) { unimplemented!(); }
}
pub struct StochasticGradWorker<T, Update, Loss, S, IoBuf: ?Sized> where T: Copy, Update: GradUpdate<T, Loss, S, IoBuf>, Loss: DiffLoss<S, IoBuf> {
batch_sz: usize,
minibatch_sz: usize,
grad_sz: usize,
//cfg: Update::Cfg,
iter_count: usize,
loss: Rc<RefCell<Loss>>,
cache: Vec<S>,
update: Update,
//param_saved: Vec<T>,
//dirty_param: bool,
stopwatch: Stopwatch,
_marker: PhantomData<(T, fn (IoBuf))>,
}
impl<T, Update, Loss, S, IoBuf: ?Sized> StochasticGradWorker<T, Update, Loss, S, IoBuf> where T: Copy + Zero, Update: GradUpdate<T, Loss, S, IoBuf>, Loss: DiffLoss<S, IoBuf> {
pub fn new(batch_sz: usize, minibatch_sz: usize, /*cfg: Update::Cfg,*/ update: Update, loss: Rc<RefCell<Loss>>) -> StochasticGradWorker<T, Update, Loss, S, IoBuf> {
let grad_sz = loss.borrow_mut().diff_param_sz();
let cache = Vec::with_capacity(minibatch_sz);
//let mut param_saved = Vec::with_capacity(grad_sz);
//param_saved.resize(grad_sz, T::zero());
StochasticGradWorker{
batch_sz: batch_sz,
minibatch_sz: minibatch_sz,
grad_sz: grad_sz,
//cfg: cfg.clone(),
iter_count: 0,
loss: loss.clone(),
cache: cache,
update: update, //GradUpdate::initialize(cfg, &mut *loss.borrow_mut()),
//param_saved: param_saved,
//dirty_param: true,
stopwatch: Stopwatch::new(),
_marker: PhantomData,
}
}
}
impl<T, Update, Loss, S, IoBuf: ?Sized> StochasticGradWorker<T, Update, Loss, S, IoBuf> where T: Copy, Update: GradUpdate<T, Loss, S, IoBuf>, Loss: DiffLoss<S, IoBuf> {
pub fn init(&mut self, rng: &mut Xorshiftplus128Rng) {
//let mut loss = self.loss.borrow_mut();
//loss.init_param(rng);
self.stopwatch.lap();
self.update.reset(&mut *self.loss.borrow_mut(), rng);
self.stopwatch.lap();
//println!("DEBUG: sg: init: {:.6}", self.stopwatch.elapsed());
}
pub fn step(&mut self, samples: &mut Iterator<Item=S>) {
self.stopwatch.lap();
self.cache.clear();
for sample in samples.take(self.minibatch_sz) {
self.cache.push(sample);
}
assert_eq!(self.minibatch_sz, self.cache.len());
self.stopwatch.lap();
//println!("DEBUG: sg: step: fetching samples: {:.6}", self.stopwatch.elapsed());
let mut loss = self.loss.borrow_mut();
self.update.begin_iteration(&mut *loss);
loss.save_rng_state();
loss.next_iteration();
loss.reset_loss();
loss.reset_grad();
let num_batches = (self.minibatch_sz + self.batch_sz - 1) / self.batch_sz;
for batch in 0 .. num_batches {
let batch_start = batch * self.batch_sz;
let batch_end = min((batch + 1) * self.batch_sz, self.minibatch_sz);
self.stopwatch.lap();
loss.load_batch(&self.cache[batch_start .. batch_end]);
self.stopwatch.lap();
//println!("DEBUG: sg: step: loading batch: {:.6}", self.stopwatch.elapsed());
loss.forward(OpPhase::Learning);
self.stopwatch.lap();
//println!("DEBUG: sg: step: forward: {:.6}", self.stopwatch.elapsed());
loss.backward();
self.stopwatch.lap();
//println!("DEBUG: sg: step: backward: {:.6}", self.stopwatch.elapsed());
}
self.update.end_iteration(self.minibatch_sz, &mut *loss);
self.update.step(self.iter_count, &mut *loss);
loss.update_nondiff_param(self.iter_count);
self.stopwatch.lap();
//println!("DEBUG: sg: step: update: {:.6}", self.stopwatch.elapsed());
self.iter_count += 1;
self.cache.clear();
//self.dirty_param = true;
}
pub fn eval(&mut self, epoch_sz: usize, samples: &mut Iterator<Item=S>) {
let mut loss = self.loss.borrow_mut();
loss.reset_loss();
/*if self.dirty_param {
//self.update.save_param(&mut self.param_saved);
//loss.load_diff_param(&mut self.param_saved);
loss.store_diff_param(&mut self.param_saved);
self.dirty_param = false;
}*/
self.cache.clear();
for mut sample in samples.take(epoch_sz) {
self.cache.push(sample);
if self.cache.len() == self.batch_sz {
loss.load_batch(&self.cache);
loss.forward(OpPhase::Inference);
self.cache.clear();
}
}
if self.cache.len() > 0 {
loss.load_batch(&self.cache);
loss.forward(OpPhase::Inference);
self.cache.clear();
}
}
}
impl<Update, Loss, S, IoBuf: ?Sized> StochasticGradWorker<f32, Update, Loss, S, IoBuf> where Update: GradUpdate<f32, Loss, S, IoBuf>, Loss: DiffLoss<S, IoBuf> + LossReport<ClassLossStats> {
pub fn update_stats(&self, stats: &mut ClassLossStats) {
let mut operator = self.loss.borrow_mut();
operator.update_stats(self.iter_count, stats);
}
}
pub struct FastStochasticGradWorker<T, Update, Loss, S, IoBuf: ?Sized> where T: Copy, Update: GradUpdate<T, Loss, S, IoBuf>, Loss: DiffLoss<S, IoBuf> {
minibatch_sz: usize,
grad_sz: usize,
iter_count: usize,
loss: Rc<RefCell<Loss>>,
cache: Vec<S>,
update: Update,
stopwatch: Stopwatch,
_marker: PhantomData<(T, fn (IoBuf))>,
}
impl<T, Update, Loss, S, IoBuf: ?Sized> FastStochasticGradWorker<T, Update, Loss, S, IoBuf> where T: Copy + Zero, Update: GradUpdate<T, Loss, S, IoBuf>, Loss: DiffLoss<S, IoBuf> {
pub fn new(minibatch_sz: usize, /*cfg: Update::Cfg,*/ update: Update, loss: Rc<RefCell<Loss>>) -> FastStochasticGradWorker<T, Update, Loss, S, IoBuf> {
let grad_sz = loss.borrow_mut().diff_param_sz();
let cache = Vec::with_capacity(minibatch_sz);
FastStochasticGradWorker{
minibatch_sz: minibatch_sz,
grad_sz: grad_sz,
iter_count: 0,
loss: loss.clone(),
cache: cache,
update: update,
stopwatch: Stopwatch::new(),
_marker: PhantomData,
}
}
}
impl<T, Update, Loss, S, IoBuf: ?Sized> FastStochasticGradWorker<T, Update, Loss, S, IoBuf> where T: Copy, Update: GradUpdate<T, Loss, S, IoBuf>, Loss: DiffLoss<S, IoBuf> {
pub fn init(&mut self, rng: &mut Xorshiftplus128Rng) {
self.stopwatch.lap();
self.update.reset(&mut *self.loss.borrow_mut(), rng);
self.stopwatch.lap();
println!("DEBUG: sg: init: {:.6}", self.stopwatch.elapsed());
}
pub fn step(&mut self, samples: &mut Iterator<Item=S>) {
self.stopwatch.lap();
self.cache.clear();
for sample in samples.take(self.minibatch_sz) {
self.cache.push(sample);
}
assert_eq!(self.minibatch_sz, self.cache.len());
self.stopwatch.lap();
println!("DEBUG: sg: step: fetching samples: {:.6}", self.stopwatch.elapsed());
let mut loss = self.loss.borrow_mut();
self.update.begin_iteration(&mut *loss);
loss.save_rng_state();
loss.next_iteration();
loss.reset_loss();
loss.reset_grad();
{
self.stopwatch.lap();
loss.load_batch(&self.cache);
self.stopwatch.lap();
println!("DEBUG: sg: step: loading batch: {:.6}", self.stopwatch.elapsed());
loss.forward(OpPhase::Learning);
self.stopwatch.lap();
println!("DEBUG: sg: step: forward: {:.6}", self.stopwatch.elapsed());
}
self.update.end_iteration(self.minibatch_sz, &mut *loss);
self.update.step(self.iter_count, &mut *loss);
loss.update_nondiff_param(self.iter_count);
self.stopwatch.lap();
println!("DEBUG: sg: step: backward + update: {:.6}", self.stopwatch.elapsed());
self.iter_count += 1;
self.cache.clear();
}
pub fn eval(&mut self, epoch_sz: usize, samples: &mut Iterator<Item=S>) {
let mut loss = self.loss.borrow_mut();
loss.reset_loss();
self.cache.clear();
for mut sample in samples.take(epoch_sz) {
self.cache.push(sample);
if self.cache.len() == self.minibatch_sz {
loss.load_batch(&self.cache);
loss.forward(OpPhase::Inference);
self.cache.clear();
}
}
if self.cache.len() > 0 {
loss.load_batch(&self.cache);
loss.forward(OpPhase::Inference);
self.cache.clear();
}
}
pub fn sync(&mut self) {
self.update.sync();
}
}
impl<Update, Loss, S, IoBuf: ?Sized> FastStochasticGradWorker<f32, Update, Loss, S, IoBuf> where Update: GradUpdate<f32, Loss, S, IoBuf>, Loss: DiffLoss<S, IoBuf> + LossReport<ClassLossStats> {
pub fn update_stats(&self, stats: &mut ClassLossStats) {
let mut operator = self.loss.borrow_mut();
operator.update_stats(self.iter_count, stats);
}
}
| 37.705426 | 193 | 0.638055 |
f4c31d774375b5eb8356ed77b1b3eb635e3d0009
| 31,833 |
// This crate uses standard host-centric USB terminology for transfer
// directions. Therefore an OUT transfer refers to a host-to-device transfer,
// and an IN transfer refers to a device-to-host transfer. This is mainly a
// concern for implementing new USB peripheral drivers and USB classes, and
// people doing that should be familiar with the USB standard. http://ww1.microchip.com/downloads/en/DeviceDoc/60001507E.pdf
// http://ww1.microchip.com/downloads/en/AppNotes/Atmel-42261-SAM-D21-USB_Application-Note_AT06475.pdf
use super::Descriptors;
use crate::calibration::{usb_transn_cal, usb_transp_cal, usb_trim_cal};
use crate::clock;
use crate::gpio::{AlternateG, AnyPin, Pin, PA24, PA25};
use crate::pac;
use crate::pac::usb::DEVICE;
use crate::pac::{PM, USB};
use crate::usb::devicedesc::DeviceDescBank;
use core::cell::{Ref, RefCell, RefMut};
use core::marker::PhantomData;
use core::mem;
use cortex_m::interrupt::{free as disable_interrupts, Mutex};
use cortex_m::singleton;
use usb_device::bus::PollResult;
use usb_device::endpoint::{EndpointAddress, EndpointType};
use usb_device::{Result as UsbResult, UsbDirection, UsbError};
/// EndpointTypeBits represents valid values for the EPTYPE fields in
/// the EPCFGn registers.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum EndpointTypeBits {
Disabled = 0,
Control = 1,
Isochronous = 2,
Bulk = 3,
Interrupt = 4,
#[allow(unused)]
DualBank = 5,
}
impl Default for EndpointTypeBits {
fn default() -> Self {
EndpointTypeBits::Disabled
}
}
impl From<EndpointType> for EndpointTypeBits {
fn from(ep_type: EndpointType) -> EndpointTypeBits {
match ep_type {
EndpointType::Control => EndpointTypeBits::Control,
EndpointType::Isochronous => EndpointTypeBits::Isochronous,
EndpointType::Bulk => EndpointTypeBits::Bulk,
EndpointType::Interrupt => EndpointTypeBits::Interrupt,
}
}
}
/// EPConfig tracks the desired configuration for one side of an endpoint.
#[derive(Default, Clone, Copy)]
struct EPConfig {
ep_type: EndpointTypeBits,
allocated_size: u16,
max_packet_size: u16,
addr: usize,
}
impl EPConfig {
fn new(
ep_type: EndpointType,
allocated_size: u16,
max_packet_size: u16,
buffer_addr: *mut u8,
) -> Self {
Self {
ep_type: ep_type.into(),
allocated_size,
max_packet_size,
addr: buffer_addr as usize,
}
}
}
// EndpointInfo represents the desired configuration for an endpoint pair.
#[derive(Default)]
struct EndpointInfo {
bank0: EPConfig,
bank1: EPConfig,
}
impl EndpointInfo {
fn new() -> Self {
Default::default()
}
}
/// AllEndpoints tracks the desired configuration of all endpoints managed
/// by the USB peripheral.
struct AllEndpoints {
endpoints: [EndpointInfo; 8],
}
impl AllEndpoints {
fn new() -> Self {
Self {
endpoints: [
EndpointInfo::new(),
EndpointInfo::new(),
EndpointInfo::new(),
EndpointInfo::new(),
EndpointInfo::new(),
EndpointInfo::new(),
EndpointInfo::new(),
EndpointInfo::new(),
],
}
}
fn find_free_endpoint(&self, dir: UsbDirection) -> UsbResult<usize> {
// start with 1 because 0 is reserved for Control
for idx in 1..8 {
let ep_type = match dir {
UsbDirection::Out => self.endpoints[idx].bank0.ep_type,
UsbDirection::In => self.endpoints[idx].bank1.ep_type,
};
if ep_type == EndpointTypeBits::Disabled {
return Ok(idx);
}
}
Err(UsbError::EndpointOverflow)
}
#[allow(clippy::too_many_arguments)]
fn allocate_endpoint(
&mut self,
dir: UsbDirection,
idx: usize,
ep_type: EndpointType,
allocated_size: u16,
max_packet_size: u16,
_interval: u8,
buffer_addr: *mut u8,
) -> UsbResult<EndpointAddress> {
let bank = match dir {
UsbDirection::Out => &mut self.endpoints[idx].bank0,
UsbDirection::In => &mut self.endpoints[idx].bank1,
};
if bank.ep_type != EndpointTypeBits::Disabled {
return Err(UsbError::EndpointOverflow);
}
*bank = EPConfig::new(ep_type, allocated_size, max_packet_size, buffer_addr);
Ok(EndpointAddress::from_parts(idx, dir))
}
}
// FIXME: replace with more general heap?
const BUFFER_SIZE: usize = 2048;
fn buffer() -> &'static mut [u8; BUFFER_SIZE] {
singleton!(: [u8; BUFFER_SIZE] = [0; BUFFER_SIZE] ).unwrap()
}
struct BufferAllocator {
buffers: &'static mut [u8; BUFFER_SIZE],
next_buf: u16,
}
impl BufferAllocator {
fn new() -> Self {
Self {
next_buf: 0,
buffers: buffer(),
}
}
fn allocate_buffer(&mut self, size: u16) -> UsbResult<*mut u8> {
debug_assert!(size & 1 == 0);
let start_addr = &mut self.buffers[self.next_buf as usize] as *mut u8;
let buf_end = unsafe { start_addr.add(BUFFER_SIZE) };
// The address must be 32-bit aligned, so allow for that here
// by offsetting by an appropriate alignment.
let offset = start_addr.align_offset(mem::align_of::<u32>());
let start_addr = unsafe { start_addr.add(offset) };
if start_addr >= buf_end {
return Err(UsbError::EndpointMemoryOverflow);
}
let end_addr = unsafe { start_addr.offset(size as isize) };
if end_addr > buf_end {
return Err(UsbError::EndpointMemoryOverflow);
}
self.next_buf = unsafe { end_addr.sub(self.buffers.as_ptr() as usize) as u16 };
Ok(start_addr)
}
}
struct Inner {
desc: RefCell<Descriptors>,
_dm_pad: Pin<PA24, AlternateG>,
_dp_pad: Pin<PA25, AlternateG>,
endpoints: RefCell<AllEndpoints>,
buffers: RefCell<BufferAllocator>,
}
pub struct UsbBus {
inner: Mutex<RefCell<Inner>>,
}
/// Generate a method that allows returning the endpoint register
/// for a given endpoint index. This helps very slightly with
/// two inconvenient issues:
/// - the SVD file translation generates a sequence of elements like ecfg0,
/// efcg1 rather than an array, so we have to manually translate the indices
/// - rust doesn't currently have a great solution for generating identifier
/// names, so we have to pass in a list of the possible names.
macro_rules! ep {
($name:ident, $type:ident, $e0:ident, $e1:ident, $e2:ident,
$e3:ident, $e4:ident, $e5:ident, $e6:ident, $e7:ident) => {
#[allow(unused)]
#[inline]
fn $name(&self, endpoint: usize) -> &pac::usb::device::$type {
match endpoint {
0 => &self.usb().$e0,
1 => &self.usb().$e1,
2 => &self.usb().$e2,
3 => &self.usb().$e3,
4 => &self.usb().$e4,
5 => &self.usb().$e5,
6 => &self.usb().$e6,
7 => &self.usb().$e7,
_ => unreachable!(),
}
}
};
}
struct Bank<'a, T> {
address: EndpointAddress,
usb: &'a DEVICE,
desc: RefMut<'a, super::Descriptors>,
_phantom: PhantomData<T>,
endpoints: Ref<'a, AllEndpoints>,
}
impl<'a, T> Bank<'a, T> {
fn usb(&self) -> &DEVICE {
self.usb
}
#[inline]
fn index(&self) -> usize {
self.address.index()
}
#[inline]
fn config(&mut self) -> &EPConfig {
let ep = &self.endpoints.endpoints[self.address.index()];
if self.address.is_out() {
&ep.bank0
} else {
&ep.bank1
}
}
}
/// InBank represents In direction banks, Bank #1
struct InBank;
/// OutBank represents Out direction banks, Bank #0
struct OutBank;
impl<'a> Bank<'a, InBank> {
fn desc_bank(&mut self) -> &mut DeviceDescBank {
let idx = self.index();
self.desc.bank(idx, 1)
}
/// Returns true if Bank 1 is Ready and thus has data that can be written
#[inline]
fn is_ready(&self) -> bool {
self.epstatus(self.index()).read().bk1rdy().bit()
}
/// Set Bank 1 Ready.
/// Ready means that the buffer contains data that can be sent.
#[inline]
fn set_ready(&self, ready: bool) {
if ready {
self.epstatusset(self.index())
.write(|w| w.bk1rdy().set_bit());
} else {
self.epstatusclr(self.index())
.write(|w| w.bk1rdy().set_bit());
}
}
/// Acknowledges the signal that the last packet was sent.
#[inline]
fn clear_transfer_complete(&self) {
// Clear bits in epintflag by writing them to 1
self.epintflag(self.index())
.write(|w| w.trcpt1().set_bit().trfail1().set_bit());
}
/// Indicates if a transfer is complete or pending.
#[inline]
fn is_transfer_complete(&self) -> bool {
self.epintflag(self.index()).read().trcpt1().bit()
}
/// Writes out endpoint configuration to its in-memory descriptor.
fn flush_config(&mut self) {
let config = *self.config();
{
let desc = self.desc_bank();
desc.set_address(config.addr as *mut u8);
desc.set_endpoint_size(config.max_packet_size);
desc.set_multi_packet_size(0);
desc.set_byte_count(0);
}
}
/// Enables endpoint-specific interrupts.
fn setup_ep_interrupts(&mut self) {
self.epintenset(self.index())
.write(|w| w.trcpt1().set_bit());
}
/// Prepares to transfer a series of bytes by copying the data into the
/// bank1 buffer. The caller must call set_ready() to finalize the
/// transfer.
pub fn write(&mut self, buf: &[u8]) -> UsbResult<usize> {
let size = buf.len().min(self.config().allocated_size as usize);
let desc = self.desc_bank();
unsafe {
buf.as_ptr()
.copy_to_nonoverlapping(desc.get_address(), size);
}
desc.set_multi_packet_size(0);
desc.set_byte_count(size as u16);
Ok(size)
}
fn is_stalled(&self) -> bool {
self.epintflag(self.index()).read().stall1().bit()
}
fn set_stall(&mut self, stall: bool) {
if stall {
self.epstatusset(self.index())
.write(|w| w.stallrq1().set_bit())
} else {
self.epstatusclr(self.index())
.write(|w| w.stallrq1().set_bit())
}
}
}
impl<'a> Bank<'a, OutBank> {
fn desc_bank(&mut self) -> &mut DeviceDescBank {
let idx = self.index();
self.desc.bank(idx, 0)
}
/// Returns true if Bank 0 is Ready and thus has data that can be read.
#[inline]
fn is_ready(&self) -> bool {
self.epstatus(self.index()).read().bk0rdy().bit()
}
/// Set Bank 0 Ready.
/// Ready means that the buffer contains data that can be read.
#[inline]
fn set_ready(&self, ready: bool) {
if ready {
self.epstatusset(self.index())
.write(|w| w.bk0rdy().set_bit());
} else {
self.epstatusclr(self.index())
.write(|w| w.bk0rdy().set_bit());
}
}
/// Acknowledges the signal that data has been received.
#[inline]
fn clear_transfer_complete(&self) {
// Clear bits in epintflag by writing them to 1
self.epintflag(self.index())
.write(|w| w.trcpt0().set_bit().trfail0().set_bit());
}
/// Returns true if a Received Setup interrupt has occurred.
/// This indicates that the read buffer holds a SETUP packet.
#[inline]
fn received_setup_interrupt(&self) -> bool {
self.epintflag(self.index()).read().rxstp().bit()
}
/// Acknowledges the signal that a SETUP packet was received
/// successfully.
#[inline]
fn clear_received_setup_interrupt(&self) {
// Clear bits in epintflag by writing them to 1
self.epintflag(self.index()).write(|w| w.rxstp().set_bit());
}
/// Writes out endpoint configuration to its in-memory descriptor.
fn flush_config(&mut self) {
let config = *self.config();
{
let desc = self.desc_bank();
desc.set_address(config.addr as *mut u8);
desc.set_endpoint_size(config.max_packet_size);
desc.set_multi_packet_size(0);
desc.set_byte_count(0);
}
}
/// Enables endpoint-specific interrupts.
fn setup_ep_interrupts(&mut self) {
self.epintenset(self.index())
.write(|w| w.rxstp().set_bit().trcpt0().set_bit());
}
/// Copies data from the bank0 buffer to the provided array. The caller
/// must call set_ready to indicate the buffer is free for the next
/// transfer.
pub fn read(&mut self, buf: &mut [u8]) -> UsbResult<usize> {
let desc = self.desc_bank();
let size = desc.get_byte_count() as usize;
if size > buf.len() {
return Err(UsbError::BufferOverflow);
}
unsafe {
desc.get_address()
.copy_to_nonoverlapping(buf.as_mut_ptr(), size);
}
desc.set_byte_count(0);
desc.set_multi_packet_size(0);
Ok(size)
}
fn is_stalled(&self) -> bool {
self.epintflag(self.index()).read().stall0().bit()
}
fn set_stall(&mut self, stall: bool) {
if stall {
self.epstatusset(self.index())
.write(|w| w.stallrq0().set_bit())
} else {
self.epstatusclr(self.index())
.write(|w| w.stallrq0().set_bit())
}
}
}
impl<'a, T> Bank<'a, T> {
ep!(epcfg, EPCFG, epcfg0, epcfg1, epcfg2, epcfg3, epcfg4, epcfg5, epcfg6, epcfg7);
ep!(
epstatusclr,
EPSTATUSCLR,
epstatusclr0,
epstatusclr1,
epstatusclr2,
epstatusclr3,
epstatusclr4,
epstatusclr5,
epstatusclr6,
epstatusclr7
);
ep!(
epstatusset,
EPSTATUSSET,
epstatusset0,
epstatusset1,
epstatusset2,
epstatusset3,
epstatusset4,
epstatusset5,
epstatusset6,
epstatusset7
);
ep!(
epstatus, EPSTATUS, epstatus0, epstatus1, epstatus2, epstatus3, epstatus4, epstatus5,
epstatus6, epstatus7
);
ep!(
epintflag, EPINTFLAG, epintflag0, epintflag1, epintflag2, epintflag3, epintflag4,
epintflag5, epintflag6, epintflag7
);
ep!(
epintenclr,
EPINTENCLR,
epintenclr0,
epintenclr1,
epintenclr2,
epintenclr3,
epintenclr4,
epintenclr5,
epintenclr6,
epintenclr7
);
ep!(
epintenset,
EPINTENSET,
epintenset0,
epintenset1,
epintenset2,
epintenset3,
epintenset4,
epintenset5,
epintenset6,
epintenset7
);
}
impl Inner {
ep!(epcfg, EPCFG, epcfg0, epcfg1, epcfg2, epcfg3, epcfg4, epcfg5, epcfg6, epcfg7);
ep!(
epstatus, EPSTATUS, epstatus0, epstatus1, epstatus2, epstatus3, epstatus4, epstatus5,
epstatus6, epstatus7
);
ep!(
epintflag, EPINTFLAG, epintflag0, epintflag1, epintflag2, epintflag3, epintflag4,
epintflag5, epintflag6, epintflag7
);
fn bank0(&'_ self, ep: EndpointAddress) -> UsbResult<Bank<'_, OutBank>> {
if ep.is_in() {
return Err(UsbError::InvalidEndpoint);
}
let endpoints = self.endpoints.borrow();
if endpoints.endpoints[ep.index()].bank0.ep_type == EndpointTypeBits::Disabled {
return Err(UsbError::InvalidEndpoint);
}
Ok(Bank {
address: ep,
usb: self.usb(),
desc: self.desc.borrow_mut(),
endpoints,
_phantom: PhantomData,
})
}
fn bank1(&'_ self, ep: EndpointAddress) -> UsbResult<Bank<'_, InBank>> {
if ep.is_out() {
return Err(UsbError::InvalidEndpoint);
}
let endpoints = self.endpoints.borrow();
if endpoints.endpoints[ep.index()].bank1.ep_type == EndpointTypeBits::Disabled {
return Err(UsbError::InvalidEndpoint);
}
Ok(Bank {
address: ep,
usb: self.usb(),
desc: self.desc.borrow_mut(),
endpoints,
_phantom: PhantomData,
})
}
}
impl UsbBus {
pub fn new(
_clock: &clock::UsbClock,
pm: &mut PM,
dm_pad: impl AnyPin<Id = PA24>,
dp_pad: impl AnyPin<Id = PA25>,
_usb: USB,
) -> Self {
pm.apbbmask.modify(|_, w| w.usb_().set_bit());
let desc = RefCell::new(Descriptors::new());
let inner = Inner {
_dm_pad: dm_pad.into().into_mode::<AlternateG>(),
_dp_pad: dp_pad.into().into_mode::<AlternateG>(),
desc,
buffers: RefCell::new(BufferAllocator::new()),
endpoints: RefCell::new(AllEndpoints::new()),
};
Self {
inner: Mutex::new(RefCell::new(inner)),
}
}
}
impl Inner {
fn usb(&self) -> &DEVICE {
unsafe { (*USB::ptr()).device() }
}
fn set_stall<EP: Into<EndpointAddress>>(&self, ep: EP, stall: bool) {
let ep = ep.into();
if ep.is_out() {
if let Ok(mut bank) = self.bank0(ep) {
bank.set_stall(stall);
}
} else if let Ok(mut bank) = self.bank1(ep) {
bank.set_stall(stall);
}
}
}
#[derive(Copy, Clone)]
enum FlushConfigMode {
// Write configuration to all configured endpoints.
Full,
// Refresh configuration which was reset due to a bus reset.
ProtocolReset,
}
impl Inner {
fn enable(&mut self) {
let usb = self.usb();
usb.ctrla.modify(|_, w| w.swrst().set_bit());
while usb.syncbusy.read().swrst().bit_is_set() {}
let addr = self.desc.borrow().address();
usb.descadd.write(|w| unsafe { w.descadd().bits(addr) });
usb.padcal.modify(|_, w| unsafe {
w.transn().bits(usb_transn_cal());
w.transp().bits(usb_transp_cal());
w.trim().bits(usb_trim_cal())
});
usb.qosctrl.modify(|_, w| {
w.dqos().bits(0b11);
w.cqos().bits(0b11)
});
usb.ctrla.modify(|_, w| {
w.mode().device();
w.runstdby().set_bit()
});
// full speed
usb.ctrlb.modify(|_, w| w.spdconf().fs());
usb.ctrla.modify(|_, w| w.enable().set_bit());
while usb.syncbusy.read().enable().bit_is_set() {}
// Clear pending.
usb.intflag
.write(|w| unsafe { w.bits(usb.intflag.read().bits()) });
usb.intenset.write(|w| w.eorst().set_bit());
// Configure the endpoints before we attach, as hosts may enumerate
// before attempting a USB protocol reset.
self.flush_eps(FlushConfigMode::Full);
usb.ctrlb.modify(|_, w| w.detach().clear_bit());
}
/// Enables/disables the Start Of Frame (SOF) interrupt
fn sof_interrupt(&self, enable: bool) {
if enable {
self.usb().intenset.write(|w| w.sof().set_bit());
} else {
self.usb().intenclr.write(|w| w.sof().set_bit());
}
}
/// Configures all endpoints based on prior calls to alloc_ep().
fn flush_eps(&self, mode: FlushConfigMode) {
for idx in 0..8 {
match (mode, idx) {
// A flush due to a protocol reset need not reconfigure endpoint 0,
// except for enabling its interrupts.
(FlushConfigMode::ProtocolReset, 0) => {
self.setup_ep_interrupts(EndpointAddress::from_parts(idx, UsbDirection::Out));
self.setup_ep_interrupts(EndpointAddress::from_parts(idx, UsbDirection::In));
}
// A full flush configures all provisioned endpoints + enables interrupts.
// Endpoints 1-8 have identical behaviour when flushed due to protocol reset.
(FlushConfigMode::Full, _) | (FlushConfigMode::ProtocolReset, _) => {
// Write bank configuration & endpoint type.
self.flush_ep(idx);
// Endpoint interrupts are configured after the write to EPTYPE, as it appears
// writes to EPINTEN*[n] do not take effect unless the
// endpoint is already somewhat configured. The datasheet is
// ambiguous here, section 38.8.3.7 (Device Interrupt EndPoint Set n)
// of the SAM D5x/E5x states:
// "This register is cleared by USB reset or when EPEN[n] is zero"
// EPEN[n] is not a register that exists, nor does it align with any other
// terminology. We assume this means setting EPCFG[n] to a
// non-zero value, but we do interrupt configuration last to
// be sure.
self.setup_ep_interrupts(EndpointAddress::from_parts(idx, UsbDirection::Out));
self.setup_ep_interrupts(EndpointAddress::from_parts(idx, UsbDirection::In));
}
}
}
}
/// flush_ep commits bank descriptor information for the endpoint pair,
/// and enables the endpoint according to its type.
fn flush_ep(&self, idx: usize) {
let cfg = self.epcfg(idx);
let info = &self.endpoints.borrow().endpoints[idx];
// Write bank descriptors first. We do this so there is no period in
// which the endpoint is enabled but has an invalid descriptor.
if let Ok(mut bank) = self.bank0(EndpointAddress::from_parts(idx, UsbDirection::Out)) {
bank.flush_config();
}
if let Ok(mut bank) = self.bank1(EndpointAddress::from_parts(idx, UsbDirection::In)) {
bank.flush_config();
}
// Set the endpoint type. At this point, the endpoint is enabled.
cfg.modify(|_, w| unsafe {
w.eptype0()
.bits(info.bank0.ep_type as u8)
.eptype1()
.bits(info.bank1.ep_type as u8)
});
}
/// setup_ep_interrupts enables interrupts for the given endpoint address.
fn setup_ep_interrupts(&self, ep_addr: EndpointAddress) {
if ep_addr.is_out() {
if let Ok(mut bank) = self.bank0(ep_addr) {
bank.setup_ep_interrupts();
}
} else if let Ok(mut bank) = self.bank1(ep_addr) {
bank.setup_ep_interrupts();
}
}
/// protocol_reset is called by the USB HAL when it detects the host has
/// performed a USB reset.
fn protocol_reset(&self) {
self.flush_eps(FlushConfigMode::ProtocolReset);
}
fn suspend(&self) {}
fn resume(&self) {}
fn alloc_ep(
&mut self,
dir: UsbDirection,
addr: Option<EndpointAddress>,
ep_type: EndpointType,
max_packet_size: u16,
interval: u8,
) -> UsbResult<EndpointAddress> {
// The USB hardware encodes the maximum packet size in 3 bits, so
// reserve enough buffer that the hardware won't overwrite it even if
// the other side issues an overly-long transfer.
let allocated_size = match max_packet_size {
1..=8 => 8,
9..=16 => 16,
17..=32 => 32,
33..=64 => 64,
65..=128 => 128,
129..=256 => 256,
257..=512 => 512,
513..=1023 => 1024,
_ => return Err(UsbError::Unsupported),
};
let buffer = self.buffers.borrow_mut().allocate_buffer(allocated_size)?;
let mut endpoints = self.endpoints.borrow_mut();
let idx = match addr {
None => endpoints.find_free_endpoint(dir)?,
Some(addr) => addr.index(),
};
let addr = endpoints.allocate_endpoint(
dir,
idx,
ep_type,
allocated_size,
max_packet_size,
interval,
buffer,
)?;
Ok(addr)
}
fn set_device_address(&self, addr: u8) {
self.usb()
.dadd
.write(|w| unsafe { w.dadd().bits(addr).adden().set_bit() });
}
fn check_sof_interrupt(&self) -> bool {
if self.usb().intflag.read().sof().bit() {
self.usb().intflag.write(|w| w.sof().set_bit());
return true;
}
false
}
fn poll(&self) -> PollResult {
let intflags = self.usb().intflag.read();
if intflags.eorst().bit() {
// end of reset interrupt
self.usb().intflag.write(|w| w.eorst().set_bit());
return PollResult::Reset;
}
// As the suspend & wakup interrupts/states cannot distinguish between
// unconnected & unsuspended, we do not handle them to avoid spurious
// transitions.
let mut ep_out = 0;
let mut ep_in_complete = 0;
let mut ep_setup = 0;
let intbits = self.usb().epintsmry.read().bits();
for ep in 0..8u16 {
let mask = 1 << ep;
let idx = ep as usize;
if (intbits & mask) != 0 {
if let Ok(bank1) = self.bank1(EndpointAddress::from_parts(idx, UsbDirection::In)) {
if bank1.is_transfer_complete() {
bank1.clear_transfer_complete();
ep_in_complete |= mask;
}
}
}
// Can't test intbits, because bk0rdy doesn't interrupt
if let Ok(bank0) = self.bank0(EndpointAddress::from_parts(idx, UsbDirection::Out)) {
if bank0.received_setup_interrupt() {
ep_setup |= mask;
// The RXSTP interrupt is not cleared here, because doing so
// would allow the USB hardware to overwrite the received
// data, potentially before it is `read()` - see SAMD21
// datasheet "32.6.2.6 Management of SETUP Transactions".
// Setup events are only relevant for control endpoints, and
// in typical USB devices, endpoint 0 is the only control
// endpoint. The usb-device `poll()` method, which calls
// this `poll()`, will immediately `read()` endpoint 0 when
// its setup bit is set.
}
// Clear the transfer complete and transfer failed interrupt flags
// so that execution leaves the USB interrupt until the host makes
// another transaction. The transfer failed flag may have been set
// if an OUT transaction wasn't read() from the endpoint by the
// Class; the hardware will have NAKed (unless the endpoint is
// isochronous) and the host may retry.
bank0.clear_transfer_complete();
// Use the bk0rdy flag via is_ready() to indicate that data has been
// received successfully, rather than the interrupting trcpt0 via
// is_transfer_ready(), because data may have been received on an
// earlier poll() which cleared trcpt0. bk0rdy is cleared in the
// endpoint read().
if bank0.is_ready() {
ep_out |= mask;
}
}
}
if ep_out == 0 && ep_in_complete == 0 && ep_setup == 0 {
PollResult::None
} else {
PollResult::Data {
ep_out,
ep_in_complete,
ep_setup,
}
}
}
fn write(&self, ep: EndpointAddress, buf: &[u8]) -> UsbResult<usize> {
let mut bank = self.bank1(ep)?;
if bank.is_ready() {
// Waiting for the host to pick up the existing data
return Err(UsbError::WouldBlock);
}
let size = bank.write(buf);
bank.clear_transfer_complete();
bank.set_ready(true); // ready to be sent
size
}
fn read(&self, ep: EndpointAddress, buf: &mut [u8]) -> UsbResult<usize> {
let mut bank = self.bank0(ep)?;
let rxstp = bank.received_setup_interrupt();
if bank.is_ready() || rxstp {
let size = bank.read(buf);
if rxstp {
bank.clear_received_setup_interrupt();
}
bank.clear_transfer_complete();
bank.set_ready(false);
size
} else {
Err(UsbError::WouldBlock)
}
}
fn is_stalled(&self, ep: EndpointAddress) -> bool {
if ep.is_out() {
self.bank0(ep).unwrap().is_stalled()
} else {
self.bank1(ep).unwrap().is_stalled()
}
}
fn set_stalled(&self, ep: EndpointAddress, stalled: bool) {
self.set_stall(ep, stalled);
}
}
impl UsbBus {
/// Enables the Start Of Frame (SOF) interrupt
pub fn enable_sof_interrupt(&self) {
disable_interrupts(|cs| self.inner.borrow(cs).borrow_mut().sof_interrupt(true))
}
/// Disables the Start Of Frame (SOF) interrupt
pub fn disable_sof_interrupt(&self) {
disable_interrupts(|cs| self.inner.borrow(cs).borrow_mut().sof_interrupt(false))
}
/// Checks, and clears if set, the Start Of Frame (SOF) interrupt flag
pub fn check_sof_interrupt(&self) -> bool {
disable_interrupts(|cs| self.inner.borrow(cs).borrow_mut().check_sof_interrupt())
}
}
impl usb_device::bus::UsbBus for UsbBus {
fn enable(&mut self) {
disable_interrupts(|cs| self.inner.borrow(cs).borrow_mut().enable())
}
fn reset(&self) {
disable_interrupts(|cs| self.inner.borrow(cs).borrow().protocol_reset())
}
fn suspend(&self) {
disable_interrupts(|cs| self.inner.borrow(cs).borrow().suspend())
}
fn resume(&self) {
disable_interrupts(|cs| self.inner.borrow(cs).borrow().resume())
}
fn alloc_ep(
&mut self,
dir: UsbDirection,
addr: Option<EndpointAddress>,
ep_type: EndpointType,
max_packet_size: u16,
interval: u8,
) -> UsbResult<EndpointAddress> {
disable_interrupts(|cs| {
self.inner.borrow(cs).borrow_mut().alloc_ep(
dir,
addr,
ep_type,
max_packet_size,
interval,
)
})
}
fn set_device_address(&self, addr: u8) {
disable_interrupts(|cs| self.inner.borrow(cs).borrow().set_device_address(addr))
}
fn poll(&self) -> PollResult {
disable_interrupts(|cs| self.inner.borrow(cs).borrow().poll())
}
fn write(&self, ep: EndpointAddress, buf: &[u8]) -> UsbResult<usize> {
disable_interrupts(|cs| self.inner.borrow(cs).borrow().write(ep, buf))
}
fn read(&self, ep: EndpointAddress, buf: &mut [u8]) -> UsbResult<usize> {
disable_interrupts(|cs| self.inner.borrow(cs).borrow().read(ep, buf))
}
fn set_stalled(&self, ep: EndpointAddress, stalled: bool) {
disable_interrupts(|cs| self.inner.borrow(cs).borrow().set_stalled(ep, stalled))
}
fn is_stalled(&self, ep: EndpointAddress) -> bool {
disable_interrupts(|cs| self.inner.borrow(cs).borrow().is_stalled(ep))
}
}
| 31.486647 | 124 | 0.565577 |
506e7a00c75bcc72271c6f289984e1c5db52618e
| 665 |
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(optin_builtin_traits)]
#![crate_type = "rlib"]
use std::marker::MarkerTrait;
pub trait DefaultedTrait : MarkerTrait { }
impl DefaultedTrait for .. { }
pub struct Something<T> { t: T }
| 33.25 | 68 | 0.732331 |
71427130ad007cd0cca38760cb21be9c7f7a8eda
| 3,178 |
use serde::{Serialize, Deserialize};
use actix_web::{ HttpResponse, web};
use crate::mpv;
use crate::library;
pub async fn request_property(body: web::Bytes) -> HttpResponse {
let result : PropertyComand = serde_json::from_str(std::str::from_utf8(&body).unwrap()).unwrap();
let command = &result.property;
let mpv_response : mpv::mpv::Property;
match command.as_ref() {
"time-pos" => {
match result.value {
None => mpv_response = mpv::mpv::event_property("time-pos".to_string(), None),
Some(value) => {
mpv_response = mpv::mpv::event_property("time-pos".to_string(), Some(value))
},
};
},
"duration" => {
mpv_response = mpv::mpv::event_property("duration".to_string(), None)
}
_ => {
let tjson = json!({ "error": "property not allowed" });
return HttpResponse::MethodNotAllowed().json(tjson)
},
}
let err_property :String = mpv_response.error.to_string();
if err_property != "success".to_string() {
let tjson = json!({ "error": "Something went wrong" });
return HttpResponse::InternalServerError().json(tjson)
}
HttpResponse::Ok().json(mpv_response) // <- send response
}
#[derive( Debug, Serialize, Deserialize)]
pub struct PlayerComand {
pub command : String,
pub value : Option<String>
}
pub async fn request_player(body: web::Bytes) -> HttpResponse {
let result : PlayerComand = serde_json::from_str(std::str::from_utf8(&body).unwrap()).unwrap();
let command = &result.command;
let mpv_response :mpv::mpv::Property;
match command.as_ref() {
"pause" => mpv_response = mpv::mpv::event_pause(),
"status" => {
let target = match result.value {
Some(v) => v ,
None => {
let tjson = json!({ "error": "target undefined" });
return HttpResponse::BadRequest().json(tjson)
},
};
mpv_response = library::get_video_status(target);
},
"stop" => mpv_response = mpv::mpv::event_stop(),
"play" => {
let target = match result.value {
Some(v) => v ,
None => {
let tjson = json!({ "error": "target undefined" });
return HttpResponse::BadRequest().json(tjson)
},
};
mpv_response = mpv::mpv::event_load(target)
},
"resume" => mpv_response = mpv::mpv::event_resume(),
_ => {
let tjson = json!({ "error": "method not allowed" });
return HttpResponse::MethodNotAllowed().json(tjson)
},
}
if mpv_response.error.replace("\"", "") != "success" {
return HttpResponse::InternalServerError().json(&mpv_response)
}
HttpResponse::Ok().json(mpv_response) // <- send response
}
#[derive(Deserialize)]
pub struct Info {
pub target: String,
}
#[derive( Debug, Serialize, Deserialize)]
pub struct PropertyComand {
pub property : String,
pub value : Option<String>
}
| 33.104167 | 101 | 0.557269 |
5b01fedda5bed417c837d05daa9cb85c6e4c2d32
| 4,119 |
//! This module implements the global `Boolean` object.
//!
//! The `Boolean` object is an object wrapper for a boolean value.
//!
//! More information:
//! - [ECMAScript reference][spec]
//! - [MDN documentation][mdn]
//!
//! [spec]: https://tc39.es/ecma262/#sec-boolean-object
//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Boolean
#[cfg(test)]
mod tests;
use crate::{
builtins::{
object::{internal_methods_trait::ObjectInternalMethods, Object, ObjectKind, PROTOTYPE},
value::{to_value, ResultValue, Value, ValueData},
},
exec::Interpreter,
};
use std::{borrow::Borrow, ops::Deref};
/// Create a new boolean object - [[Construct]]
pub fn construct_boolean(this: &mut Value, args: &[Value], _: &mut Interpreter) -> ResultValue {
this.set_kind(ObjectKind::Boolean);
// Get the argument, if any
if let Some(ref value) = args.get(0) {
this.set_internal_slot("BooleanData", to_boolean(value));
} else {
this.set_internal_slot("BooleanData", to_boolean(&to_value(false)));
}
// no need to return `this` as its passed by reference
Ok(this.clone())
}
/// Return a boolean literal [[Call]]
pub fn call_boolean(_: &mut Value, args: &[Value], _: &mut Interpreter) -> ResultValue {
// Get the argument, if any
match args.get(0) {
Some(ref value) => Ok(to_boolean(value)),
None => Ok(to_boolean(&to_value(false))),
}
}
/// The `toString()` method returns a string representing the specified `Boolean` object.
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-boolean-object
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Boolean/toString
pub fn to_string(this: &mut Value, _: &[Value], _: &mut Interpreter) -> ResultValue {
let b = this_boolean_value(this);
Ok(to_value(b.to_string()))
}
/// The valueOf() method returns the primitive value of a `Boolean` object.
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-boolean.prototype.valueof
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Boolean/valueOf
pub fn value_of(this: &mut Value, _: &[Value], _: &mut Interpreter) -> ResultValue {
Ok(this_boolean_value(this))
}
// === Utility Functions ===
/// [toBoolean](https://tc39.es/ecma262/#sec-toboolean)
/// Creates a new boolean value from the input
pub fn to_boolean(value: &Value) -> Value {
match *value.deref().borrow() {
ValueData::Object(_) => to_value(true),
ValueData::String(ref s) if !s.is_empty() => to_value(true),
ValueData::Rational(n) if n != 0.0 && !n.is_nan() => to_value(true),
ValueData::Integer(n) if n != 0 => to_value(true),
ValueData::Boolean(v) => to_value(v),
_ => to_value(false),
}
}
/// An Utility function used to get the internal BooleanData.
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-thisbooleanvalue
pub fn this_boolean_value(value: &Value) -> Value {
match *value.deref().borrow() {
ValueData::Boolean(v) => to_value(v),
ValueData::Object(ref v) => (v).deref().borrow().get_internal_slot("BooleanData"),
_ => to_value(false),
}
}
/// Create a new `Boolean` object.
pub fn create(global: &Value) -> Value {
// Create Prototype
// https://tc39.es/ecma262/#sec-properties-of-the-boolean-prototype-object
let prototype = ValueData::new_obj(Some(global));
prototype.set_internal_slot("BooleanData", to_boolean(&to_value(false)));
make_builtin_fn!(to_string, named "toString", of prototype);
make_builtin_fn!(value_of, named "valueOf", of prototype);
make_constructor_fn!(construct_boolean, call_boolean, global, prototype)
}
/// Initialise the `Boolean` object on the global object.
#[inline]
pub fn init(global: &Value) {
global.set_field_slice("Boolean", create(global));
}
| 34.613445 | 108 | 0.663511 |
69ccecf88f6dc1f8a6cd3e9a0194c231f0dd6c26
| 1,291 |
use stm32f3xx_hal::{
prelude::*,
gpio::{self},
};
enum ButtonAction {
NoAction,
Pressed,
Released
}
pub struct ResetButton {
pa0: gpio::gpioa::PA0<gpio::Input>,
pressed: bool
}
impl ResetButton {
pub fn new(pa0: gpio::gpioa::PA0<gpio::Input>) -> Self {
ResetButton {
pa0,
pressed: false
}
}
pub fn check_reset_press<F>(&mut self, on_press: F) where F: FnOnce(){
match self.get_button_state() {
ButtonAction::Pressed => {
self.pressed = true;
on_press();
},
ButtonAction::Released => {
//iprintln!(&mut itm, "user btn release");
self.pressed = false;
},
ButtonAction::NoAction => {}
}
}
fn get_button_state(&mut self) -> ButtonAction {
let high = self.pa0.is_high().unwrap();
if !self.pressed && high {
// new press react
return ButtonAction::Pressed
}
if self.pressed && !high {
// released, reset pressed
return ButtonAction::Released
}
// no pressed and not high, don't care
// Or pressed and still high
ButtonAction::NoAction
}
}
| 20.492063 | 74 | 0.506584 |
6a1069aa9e02bee60fa96c69691166b8dce2ff48
| 35,730 |
use forward_ref::{forward_ref_binop, forward_ref_op_assign};
use schemars::JsonSchema;
use serde::{de, ser, Deserialize, Deserializer, Serialize};
use std::fmt;
use std::ops::{
Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, RemAssign, Shr, ShrAssign, Sub, SubAssign,
};
use std::str::FromStr;
use crate::errors::{
ConversionOverflowError, DivideByZeroError, OverflowError, OverflowOperation, StdError,
};
use crate::{Uint128, Uint256, Uint64};
/// This module is purely a workaround that lets us ignore lints for all the code
/// the `construct_uint!` macro generates.
#[allow(clippy::all)]
mod uints {
uint::construct_uint! {
pub struct U512(8);
}
}
/// Used internally - we don't want to leak this type since we might change
/// the implementation in the future.
use uints::U512;
/// An implementation of u512 that is using strings for JSON encoding/decoding,
/// such that the full u512 range can be used for clients that convert JSON numbers to floats,
/// like JavaScript and jq.
///
/// # Examples
///
/// Use `from` to create instances out of primitive uint types or `new` to provide big
/// endian bytes:
///
/// ```
/// # use cosmwasm_std::Uint512;
/// let a = Uint512::from(258u128);
/// let b = Uint512::new([
/// 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
/// 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
/// 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
/// 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
/// 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
/// 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
/// 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
/// 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 1u8, 2u8,
/// ]);
/// assert_eq!(a, b);
/// ```
#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, JsonSchema)]
pub struct Uint512(#[schemars(with = "String")] U512);
impl Uint512 {
pub const MAX: Uint512 = Uint512(U512::MAX);
/// Creates a Uint512(value) from a big endian representation. It's just an alias for
/// `from_big_endian`.
pub const fn new(value: [u8; 64]) -> Self {
Self::from_be_bytes(value)
}
/// Creates a Uint512(0)
pub const fn zero() -> Self {
Uint512(U512::zero())
}
pub const fn from_be_bytes(data: [u8; 64]) -> Self {
let words: [u64; 8] = [
u64::from_le_bytes([
data[63], data[62], data[61], data[60], data[59], data[58], data[57], data[56],
]),
u64::from_le_bytes([
data[55], data[54], data[53], data[52], data[51], data[50], data[49], data[48],
]),
u64::from_le_bytes([
data[47], data[46], data[45], data[44], data[43], data[42], data[41], data[40],
]),
u64::from_le_bytes([
data[39], data[38], data[37], data[36], data[35], data[34], data[33], data[32],
]),
u64::from_le_bytes([
data[31], data[30], data[29], data[28], data[27], data[26], data[25], data[24],
]),
u64::from_le_bytes([
data[23], data[22], data[21], data[20], data[19], data[18], data[17], data[16],
]),
u64::from_le_bytes([
data[15], data[14], data[13], data[12], data[11], data[10], data[9], data[8],
]),
u64::from_le_bytes([
data[7], data[6], data[5], data[4], data[3], data[2], data[1], data[0],
]),
];
Self(U512(words))
}
pub const fn from_le_bytes(data: [u8; 64]) -> Self {
let words: [u64; 8] = [
u64::from_le_bytes([
data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],
]),
u64::from_le_bytes([
data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],
]),
u64::from_le_bytes([
data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],
]),
u64::from_le_bytes([
data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],
]),
u64::from_le_bytes([
data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],
]),
u64::from_le_bytes([
data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],
]),
u64::from_le_bytes([
data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],
]),
u64::from_le_bytes([
data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],
]),
];
Self(U512(words))
}
/// A conversion from `Uint256` that, unlike the one provided by the `From` trait,
/// can be used in a `const` context.
pub const fn from_uint256(num: Uint256) -> Self {
let bytes = num.to_le_bytes();
Self::from_le_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
bytes[8], bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], bytes[14], bytes[15],
bytes[16], bytes[17], bytes[18], bytes[19], bytes[20], bytes[21], bytes[22], bytes[23],
bytes[24], bytes[25], bytes[26], bytes[27], bytes[28], bytes[29], bytes[30], bytes[31],
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
])
}
/// Returns a copy of the number as big endian bytes.
pub const fn to_be_bytes(self) -> [u8; 64] {
let words = [
(self.0).0[7].to_be_bytes(),
(self.0).0[6].to_be_bytes(),
(self.0).0[5].to_be_bytes(),
(self.0).0[4].to_be_bytes(),
(self.0).0[3].to_be_bytes(),
(self.0).0[2].to_be_bytes(),
(self.0).0[1].to_be_bytes(),
(self.0).0[0].to_be_bytes(),
];
unsafe { std::mem::transmute::<[[u8; 8]; 8], [u8; 64]>(words) }
}
/// Returns a copy of the number as little endian bytes.
pub const fn to_le_bytes(self) -> [u8; 64] {
let words = [
(self.0).0[0].to_le_bytes(),
(self.0).0[1].to_le_bytes(),
(self.0).0[2].to_le_bytes(),
(self.0).0[3].to_le_bytes(),
(self.0).0[4].to_le_bytes(),
(self.0).0[5].to_le_bytes(),
(self.0).0[6].to_le_bytes(),
(self.0).0[7].to_le_bytes(),
];
unsafe { std::mem::transmute::<[[u8; 8]; 8], [u8; 64]>(words) }
}
pub const fn is_zero(&self) -> bool {
let words = (self.0).0;
words[0] == 0
&& words[1] == 0
&& words[2] == 0
&& words[3] == 0
&& words[4] == 0
&& words[5] == 0
&& words[6] == 0
&& words[7] == 0
}
pub fn pow(self, exp: u32) -> Self {
let res = self.0.pow(exp.into());
Self(res)
}
pub fn checked_add(self, other: Self) -> Result<Self, OverflowError> {
self.0
.checked_add(other.0)
.map(Self)
.ok_or_else(|| OverflowError::new(OverflowOperation::Add, self, other))
}
pub fn checked_sub(self, other: Self) -> Result<Self, OverflowError> {
self.0
.checked_sub(other.0)
.map(Self)
.ok_or_else(|| OverflowError::new(OverflowOperation::Sub, self, other))
}
pub fn checked_mul(self, other: Self) -> Result<Self, OverflowError> {
self.0
.checked_mul(other.0)
.map(Self)
.ok_or_else(|| OverflowError::new(OverflowOperation::Mul, self, other))
}
pub fn checked_pow(self, exp: u32) -> Result<Self, OverflowError> {
self.0
.checked_pow(exp.into())
.map(Self)
.ok_or_else(|| OverflowError::new(OverflowOperation::Pow, self, exp))
}
pub fn checked_div(self, other: Self) -> Result<Self, DivideByZeroError> {
self.0
.checked_div(other.0)
.map(Self)
.ok_or_else(|| DivideByZeroError::new(self))
}
pub fn checked_rem(self, other: Self) -> Result<Self, DivideByZeroError> {
self.0
.checked_rem(other.0)
.map(Self)
.ok_or_else(|| DivideByZeroError::new(self))
}
pub fn checked_shr(self, other: u32) -> Result<Self, OverflowError> {
if other >= 512 {
return Err(OverflowError::new(OverflowOperation::Shr, self, other));
}
Ok(Self(self.0.shr(other)))
}
pub fn saturating_add(self, other: Self) -> Self {
Self(self.0.saturating_add(other.0))
}
pub fn saturating_sub(self, other: Self) -> Self {
Self(self.0.saturating_sub(other.0))
}
pub fn saturating_mul(self, other: Self) -> Self {
Self(self.0.saturating_mul(other.0))
}
}
impl From<Uint256> for Uint512 {
fn from(val: Uint256) -> Self {
let bytes = [[0u8; 32], val.to_be_bytes()].concat();
Self::from_be_bytes(bytes.try_into().unwrap())
}
}
impl From<Uint128> for Uint512 {
fn from(val: Uint128) -> Self {
val.u128().into()
}
}
impl From<Uint64> for Uint512 {
fn from(val: Uint64) -> Self {
val.u64().into()
}
}
impl From<u128> for Uint512 {
fn from(val: u128) -> Self {
Uint512(val.into())
}
}
impl From<u64> for Uint512 {
fn from(val: u64) -> Self {
Uint512(val.into())
}
}
impl From<u32> for Uint512 {
fn from(val: u32) -> Self {
Uint512(val.into())
}
}
impl From<u16> for Uint512 {
fn from(val: u16) -> Self {
Uint512(val.into())
}
}
impl From<u8> for Uint512 {
fn from(val: u8) -> Self {
Uint512(val.into())
}
}
impl TryFrom<Uint512> for Uint256 {
type Error = ConversionOverflowError;
fn try_from(value: Uint512) -> Result<Self, Self::Error> {
let bytes = value.to_be_bytes();
let (first_bytes, last_bytes) = bytes.split_at(32);
if first_bytes != [0u8; 32] {
return Err(ConversionOverflowError::new(
"Uint512",
"Uint256",
value.to_string(),
));
}
Ok(Self::from_be_bytes(last_bytes.try_into().unwrap()))
}
}
impl TryFrom<Uint512> for Uint128 {
type Error = ConversionOverflowError;
fn try_from(value: Uint512) -> Result<Self, Self::Error> {
Ok(Uint128::new(value.0.try_into().map_err(|_| {
ConversionOverflowError::new("Uint512", "Uint128", value.to_string())
})?))
}
}
impl TryFrom<&str> for Uint512 {
type Error = StdError;
fn try_from(val: &str) -> Result<Self, Self::Error> {
Self::from_str(val)
}
}
impl FromStr for Uint512 {
type Err = StdError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match U512::from_dec_str(s) {
Ok(u) => Ok(Self(u)),
Err(e) => Err(StdError::generic_err(format!("Parsing u512: {}", e))),
}
}
}
impl From<Uint512> for String {
fn from(original: Uint512) -> Self {
original.to_string()
}
}
impl fmt::Display for Uint512 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// The inner type doesn't work as expected with padding, so we
// work around that.
let unpadded = self.0.to_string();
f.pad_integral(true, "", &unpadded)
}
}
impl Add<Uint512> for Uint512 {
type Output = Self;
fn add(self, rhs: Self) -> Self {
Uint512(self.0.checked_add(rhs.0).unwrap())
}
}
impl<'a> Add<&'a Uint512> for Uint512 {
type Output = Self;
fn add(self, rhs: &'a Uint512) -> Self {
Uint512(self.0.checked_add(rhs.0).unwrap())
}
}
impl Sub<Uint512> for Uint512 {
type Output = Self;
fn sub(self, rhs: Self) -> Self {
Uint512(self.0.checked_sub(rhs.0).unwrap())
}
}
forward_ref_binop!(impl Sub, sub for Uint512, Uint512);
impl SubAssign<Uint512> for Uint512 {
fn sub_assign(&mut self, rhs: Uint512) {
self.0 = self.0.checked_sub(rhs.0).unwrap();
}
}
forward_ref_op_assign!(impl SubAssign, sub_assign for Uint512, Uint512);
impl Div<Uint512> for Uint512 {
type Output = Self;
fn div(self, rhs: Self) -> Self::Output {
Self(self.0.checked_div(rhs.0).unwrap())
}
}
impl<'a> Div<&'a Uint512> for Uint512 {
type Output = Self;
fn div(self, rhs: &'a Uint512) -> Self::Output {
Self(self.0.checked_div(rhs.0).unwrap())
}
}
impl Rem for Uint512 {
type Output = Self;
/// # Panics
///
/// This operation will panic if `rhs` is zero.
#[inline]
fn rem(self, rhs: Self) -> Self {
Self(self.0.rem(rhs.0))
}
}
forward_ref_binop!(impl Rem, rem for Uint512, Uint512);
impl RemAssign<Uint512> for Uint512 {
fn rem_assign(&mut self, rhs: Uint512) {
*self = *self % rhs;
}
}
forward_ref_op_assign!(impl RemAssign, rem_assign for Uint512, Uint512);
impl Mul<Uint512> for Uint512 {
type Output = Self;
fn mul(self, rhs: Self) -> Self::Output {
Self(self.0.checked_mul(rhs.0).unwrap())
}
}
forward_ref_binop!(impl Mul, mul for Uint512, Uint512);
impl MulAssign<Uint512> for Uint512 {
fn mul_assign(&mut self, rhs: Self) {
self.0 = self.0.checked_mul(rhs.0).unwrap();
}
}
forward_ref_op_assign!(impl MulAssign, mul_assign for Uint512, Uint512);
impl Shr<u32> for Uint512 {
type Output = Self;
fn shr(self, rhs: u32) -> Self::Output {
self.checked_shr(rhs).unwrap_or_else(|_| {
panic!(
"right shift error: {} is larger or equal than the number of bits in Uint512",
rhs,
)
})
}
}
impl<'a> Shr<&'a u32> for Uint512 {
type Output = Self;
fn shr(self, rhs: &'a u32) -> Self::Output {
Shr::<u32>::shr(self, *rhs)
}
}
impl AddAssign<Uint512> for Uint512 {
fn add_assign(&mut self, rhs: Uint512) {
self.0 = self.0.checked_add(rhs.0).unwrap();
}
}
impl<'a> AddAssign<&'a Uint512> for Uint512 {
fn add_assign(&mut self, rhs: &'a Uint512) {
self.0 = self.0.checked_add(rhs.0).unwrap();
}
}
impl DivAssign<Uint512> for Uint512 {
fn div_assign(&mut self, rhs: Self) {
self.0 = self.0.checked_div(rhs.0).unwrap();
}
}
impl<'a> DivAssign<&'a Uint512> for Uint512 {
fn div_assign(&mut self, rhs: &'a Uint512) {
self.0 = self.0.checked_div(rhs.0).unwrap();
}
}
impl ShrAssign<u32> for Uint512 {
fn shr_assign(&mut self, rhs: u32) {
*self = Shr::<u32>::shr(*self, rhs);
}
}
impl<'a> ShrAssign<&'a u32> for Uint512 {
fn shr_assign(&mut self, rhs: &'a u32) {
*self = Shr::<u32>::shr(*self, *rhs);
}
}
impl Serialize for Uint512 {
/// Serializes as an integer string using base 10
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for Uint512 {
/// Deserialized from an integer string using base 10
fn deserialize<D>(deserializer: D) -> Result<Uint512, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_str(Uint512Visitor)
}
}
struct Uint512Visitor;
impl<'de> de::Visitor<'de> for Uint512Visitor {
type Value = Uint512;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("string-encoded integer")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
Uint512::try_from(v).map_err(|e| E::custom(format!("invalid Uint512 '{}' - {}", v, e)))
}
}
impl<A> std::iter::Sum<A> for Uint512
where
Self: Add<A, Output = Self>,
{
fn sum<I: Iterator<Item = A>>(iter: I) -> Self {
iter.fold(Self::zero(), Add::add)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{from_slice, to_vec};
#[test]
fn uint512_construct() {
let num = Uint512::new([1; 64]);
let a: [u8; 64] = num.to_be_bytes();
assert_eq!(a, [1; 64]);
let be_bytes = [
0u8, 222u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 1u8, 2u8, 3u8,
];
let num = Uint512::new(be_bytes);
let resulting_bytes: [u8; 64] = num.to_be_bytes();
assert_eq!(be_bytes, resulting_bytes);
}
#[test]
fn uint512_endianness() {
let be_bytes = [
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 1u8, 2u8, 3u8,
];
let le_bytes = [
3u8, 2u8, 1u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
];
// These should all be the same.
let num1 = Uint512::new(be_bytes);
let num2 = Uint512::from_be_bytes(be_bytes);
let num3 = Uint512::from_le_bytes(le_bytes);
assert_eq!(num1, Uint512::from(65536u32 + 512 + 3));
assert_eq!(num1, num2);
assert_eq!(num1, num3);
}
#[test]
fn uint512_convert_from() {
let a = Uint512::from(5u128);
assert_eq!(a.0, U512::from(5));
let a = Uint512::from(5u64);
assert_eq!(a.0, U512::from(5));
let a = Uint512::from(5u32);
assert_eq!(a.0, U512::from(5));
let a = Uint512::from(5u16);
assert_eq!(a.0, U512::from(5));
let a = Uint512::from(5u8);
assert_eq!(a.0, U512::from(5));
let result = Uint512::try_from("34567");
assert_eq!(result.unwrap().0, U512::from_dec_str("34567").unwrap());
let result = Uint512::try_from("1.23");
assert!(result.is_err());
}
#[test]
fn uint512_convert_to_uint128() {
let source = Uint512::from(42u128);
let target = Uint128::try_from(source);
assert_eq!(target, Ok(Uint128::new(42u128)));
let source = Uint512::MAX;
let target = Uint128::try_from(source);
assert_eq!(
target,
Err(ConversionOverflowError::new(
"Uint512",
"Uint128",
Uint512::MAX.to_string()
))
);
}
#[test]
fn uint512_from_uint256() {
assert_eq!(
Uint512::from_uint256(Uint256::from_str("123").unwrap()),
Uint512::from_str("123").unwrap()
);
assert_eq!(
Uint512::from_uint256(Uint256::from_str("9785746283745").unwrap()),
Uint512::from_str("9785746283745").unwrap()
);
assert_eq!(
Uint512::from_uint256(
Uint256::from_str(
"97857462837575757832978493758398593853985452378423874623874628736482736487236"
)
.unwrap()
),
Uint512::from_str(
"97857462837575757832978493758398593853985452378423874623874628736482736487236"
)
.unwrap()
);
}
#[test]
fn uint512_implements_display() {
let a = Uint512::from(12345u32);
assert_eq!(format!("Embedded: {}", a), "Embedded: 12345");
assert_eq!(a.to_string(), "12345");
let a = Uint512::zero();
assert_eq!(format!("Embedded: {}", a), "Embedded: 0");
assert_eq!(a.to_string(), "0");
}
#[test]
fn uint512_display_padding_works() {
let a = Uint512::from(123u64);
assert_eq!(format!("Embedded: {:05}", a), "Embedded: 00123");
}
#[test]
fn uint512_to_be_bytes_works() {
assert_eq!(
Uint512::zero().to_be_bytes(),
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
]
);
assert_eq!(
Uint512::MAX.to_be_bytes(),
[
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
]
);
assert_eq!(
Uint512::from(1u128).to_be_bytes(),
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1
]
);
// Python: `[b for b in (240282366920938463463374607431768124608).to_bytes(64, "big")]`
assert_eq!(
Uint512::from(240282366920938463463374607431768124608u128).to_be_bytes(),
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 180, 196, 179, 87, 165,
121, 59, 133, 246, 117, 221, 191, 255, 254, 172, 192
]
);
assert_eq!(
Uint512::from_be_bytes([
17, 4, 23, 32, 87, 67, 123, 200, 58, 91, 0, 38, 33, 21, 67, 78, 87, 76, 65, 54,
211, 201, 192, 7, 42, 233, 2, 240, 200, 115, 150, 240, 218, 88, 106, 45, 208, 134,
238, 119, 85, 22, 14, 88, 166, 195, 154, 73, 64, 10, 44, 59, 13, 22, 47, 12, 99, 8,
252, 96, 230, 187, 38, 29
])
.to_be_bytes(),
[
17, 4, 23, 32, 87, 67, 123, 200, 58, 91, 0, 38, 33, 21, 67, 78, 87, 76, 65, 54,
211, 201, 192, 7, 42, 233, 2, 240, 200, 115, 150, 240, 218, 88, 106, 45, 208, 134,
238, 119, 85, 22, 14, 88, 166, 195, 154, 73, 64, 10, 44, 59, 13, 22, 47, 12, 99, 8,
252, 96, 230, 187, 38, 29
]
);
}
#[test]
fn uint512_to_le_bytes_works() {
assert_eq!(
Uint512::zero().to_le_bytes(),
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
]
);
assert_eq!(
Uint512::MAX.to_le_bytes(),
[
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
]
);
assert_eq!(
Uint512::from(1u128).to_le_bytes(),
[
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
]
);
// Python: `[b for b in (240282366920938463463374607431768124608).to_bytes(64, "little")]`
assert_eq!(
Uint512::from(240282366920938463463374607431768124608u128).to_le_bytes(),
[
192, 172, 254, 255, 191, 221, 117, 246, 133, 59, 121, 165, 87, 179, 196, 180, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
);
assert_eq!(
Uint512::from_be_bytes([
17, 4, 23, 32, 87, 67, 123, 200, 58, 91, 0, 38, 33, 21, 67, 78, 87, 76, 65, 54,
211, 201, 192, 7, 42, 233, 2, 240, 200, 115, 150, 240, 218, 88, 106, 45, 208, 134,
238, 119, 85, 22, 14, 88, 166, 195, 154, 73, 64, 10, 44, 59, 13, 22, 47, 12, 99, 8,
252, 96, 230, 187, 38, 29
])
.to_le_bytes(),
[
29, 38, 187, 230, 96, 252, 8, 99, 12, 47, 22, 13, 59, 44, 10, 64, 73, 154, 195,
166, 88, 14, 22, 85, 119, 238, 134, 208, 45, 106, 88, 218, 240, 150, 115, 200, 240,
2, 233, 42, 7, 192, 201, 211, 54, 65, 76, 87, 78, 67, 21, 33, 38, 0, 91, 58, 200,
123, 67, 87, 32, 23, 4, 17
]
);
}
#[test]
fn uint512_is_zero_works() {
assert!(Uint512::zero().is_zero());
assert!(Uint512(U512::from(0)).is_zero());
assert!(!Uint512::from(1u32).is_zero());
assert!(!Uint512::from(123u32).is_zero());
}
#[test]
fn uint512_json() {
let orig = Uint512::from(1234567890987654321u128);
let serialized = to_vec(&orig).unwrap();
assert_eq!(serialized.as_slice(), b"\"1234567890987654321\"");
let parsed: Uint512 = from_slice(&serialized).unwrap();
assert_eq!(parsed, orig);
}
#[test]
fn uint512_compare() {
let a = Uint512::from(12345u32);
let b = Uint512::from(23456u32);
assert!(a < b);
assert!(b > a);
assert_eq!(a, Uint512::from(12345u32));
}
#[test]
#[allow(clippy::op_ref)]
fn uint512_math() {
let a = Uint512::from(12345u32);
let b = Uint512::from(23456u32);
// test + with owned and reference right hand side
assert_eq!(a + b, Uint512::from(35801u32));
assert_eq!(a + &b, Uint512::from(35801u32));
// test - with owned and reference right hand side
assert_eq!(b - a, Uint512::from(11111u32));
assert_eq!(b - &a, Uint512::from(11111u32));
// test += with owned and reference right hand side
let mut c = Uint512::from(300000u32);
c += b;
assert_eq!(c, Uint512::from(323456u32));
let mut d = Uint512::from(300000u32);
d += &b;
assert_eq!(d, Uint512::from(323456u32));
// test -= with owned and reference right hand side
let mut c = Uint512::from(300000u32);
c -= b;
assert_eq!(c, Uint512::from(276544u32));
let mut d = Uint512::from(300000u32);
d -= &b;
assert_eq!(d, Uint512::from(276544u32));
// error result on underflow (- would produce negative result)
let underflow_result = a.checked_sub(b);
let OverflowError {
operand1, operand2, ..
} = underflow_result.unwrap_err();
assert_eq!((operand1, operand2), (a.to_string(), b.to_string()));
}
#[test]
#[should_panic]
fn uint512_add_overflow_panics() {
let max = Uint512::new([255u8; 64]);
let _ = max + Uint512::from(12u32);
}
#[test]
#[allow(clippy::op_ref)]
fn uint512_sub_works() {
assert_eq!(
Uint512::from(2u32) - Uint512::from(1u32),
Uint512::from(1u32)
);
assert_eq!(
Uint512::from(2u32) - Uint512::from(0u32),
Uint512::from(2u32)
);
assert_eq!(
Uint512::from(2u32) - Uint512::from(2u32),
Uint512::from(0u32)
);
// works for refs
let a = Uint512::from(10u32);
let b = Uint512::from(3u32);
let expected = Uint512::from(7u32);
assert_eq!(a - b, expected);
assert_eq!(a - &b, expected);
assert_eq!(&a - b, expected);
assert_eq!(&a - &b, expected);
}
#[test]
#[should_panic]
fn uint512_sub_overflow_panics() {
let _ = Uint512::from(1u32) - Uint512::from(2u32);
}
#[test]
fn uint512_sub_assign_works() {
let mut a = Uint512::from(14u32);
a -= Uint512::from(2u32);
assert_eq!(a, Uint512::from(12u32));
// works for refs
let mut a = Uint512::from(10u32);
let b = Uint512::from(3u32);
let expected = Uint512::from(7u32);
a -= &b;
assert_eq!(a, expected);
}
#[test]
#[allow(clippy::op_ref)]
fn uint512_mul_works() {
assert_eq!(
Uint512::from(2u32) * Uint512::from(3u32),
Uint512::from(6u32)
);
assert_eq!(Uint512::from(2u32) * Uint512::zero(), Uint512::zero());
// works for refs
let a = Uint512::from(11u32);
let b = Uint512::from(3u32);
let expected = Uint512::from(33u32);
assert_eq!(a * b, expected);
assert_eq!(a * &b, expected);
assert_eq!(&a * b, expected);
assert_eq!(&a * &b, expected);
}
#[test]
fn uint512_mul_assign_works() {
let mut a = Uint512::from(14u32);
a *= Uint512::from(2u32);
assert_eq!(a, Uint512::from(28u32));
// works for refs
let mut a = Uint512::from(10u32);
let b = Uint512::from(3u32);
a *= &b;
assert_eq!(a, Uint512::from(30u32));
}
#[test]
fn uint512_pow_works() {
assert_eq!(Uint512::from(2u32).pow(2), Uint512::from(4u32));
assert_eq!(Uint512::from(2u32).pow(10), Uint512::from(1024u32));
}
#[test]
#[should_panic]
fn uint512_pow_overflow_panics() {
Uint512::MAX.pow(2u32);
}
#[test]
fn uint512_shr_works() {
let original = Uint512::new([
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 2u8, 0u8, 4u8, 2u8,
]);
let shifted = Uint512::new([
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 128u8, 1u8, 0u8,
]);
assert_eq!(original >> 2u32, shifted);
}
#[test]
#[should_panic]
fn uint512_shr_overflow_panics() {
let _ = Uint512::from(1u32) >> 512u32;
}
#[test]
fn sum_works() {
let nums = vec![
Uint512::from(17u32),
Uint512::from(123u32),
Uint512::from(540u32),
Uint512::from(82u32),
];
let expected = Uint512::from(762u32);
let sum_as_ref = nums.iter().sum();
assert_eq!(expected, sum_as_ref);
let sum_as_owned = nums.into_iter().sum();
assert_eq!(expected, sum_as_owned);
}
#[test]
fn uint512_methods() {
// checked_*
assert!(matches!(
Uint512::MAX.checked_add(Uint512::from(1u32)),
Err(OverflowError { .. })
));
assert_eq!(
Uint512::from(1u32).checked_add(Uint512::from(1u32)),
Ok(Uint512::from(2u32)),
);
assert!(matches!(
Uint512::from(0u32).checked_sub(Uint512::from(1u32)),
Err(OverflowError { .. })
));
assert_eq!(
Uint512::from(2u32).checked_sub(Uint512::from(1u32)),
Ok(Uint512::from(1u32)),
);
assert!(matches!(
Uint512::MAX.checked_mul(Uint512::from(2u32)),
Err(OverflowError { .. })
));
assert_eq!(
Uint512::from(2u32).checked_mul(Uint512::from(2u32)),
Ok(Uint512::from(4u32)),
);
assert!(matches!(
Uint512::MAX.checked_pow(2u32),
Err(OverflowError { .. })
));
assert_eq!(
Uint512::from(2u32).checked_pow(3u32),
Ok(Uint512::from(8u32)),
);
assert!(matches!(
Uint512::MAX.checked_div(Uint512::from(0u32)),
Err(DivideByZeroError { .. })
));
assert_eq!(
Uint512::from(6u32).checked_div(Uint512::from(2u32)),
Ok(Uint512::from(3u32)),
);
assert!(matches!(
Uint512::MAX.checked_rem(Uint512::from(0u32)),
Err(DivideByZeroError { .. })
));
// saturating_*
assert_eq!(
Uint512::MAX.saturating_add(Uint512::from(1u32)),
Uint512::MAX
);
assert_eq!(
Uint512::from(0u32).saturating_sub(Uint512::from(1u32)),
Uint512::from(0u32)
);
assert_eq!(
Uint512::MAX.saturating_mul(Uint512::from(2u32)),
Uint512::MAX
);
}
#[test]
#[allow(clippy::op_ref)]
fn uint512_implements_rem() {
let a = Uint512::from(10u32);
assert_eq!(a % Uint512::from(10u32), Uint512::zero());
assert_eq!(a % Uint512::from(2u32), Uint512::zero());
assert_eq!(a % Uint512::from(1u32), Uint512::zero());
assert_eq!(a % Uint512::from(3u32), Uint512::from(1u32));
assert_eq!(a % Uint512::from(4u32), Uint512::from(2u32));
// works for refs
let a = Uint512::from(10u32);
let b = Uint512::from(3u32);
let expected = Uint512::from(1u32);
assert_eq!(a % b, expected);
assert_eq!(a % &b, expected);
assert_eq!(&a % b, expected);
assert_eq!(&a % &b, expected);
}
#[test]
#[should_panic(expected = "division by zero")]
fn uint512_rem_panics_for_zero() {
let _ = Uint512::from(10u32) % Uint512::zero();
}
#[test]
#[allow(clippy::op_ref)]
fn uint512_rem_works() {
assert_eq!(
Uint512::from(12u32) % Uint512::from(10u32),
Uint512::from(2u32)
);
assert_eq!(Uint512::from(50u32) % Uint512::from(5u32), Uint512::zero());
// works for refs
let a = Uint512::from(42u32);
let b = Uint512::from(5u32);
let expected = Uint512::from(2u32);
assert_eq!(a % b, expected);
assert_eq!(a % &b, expected);
assert_eq!(&a % b, expected);
assert_eq!(&a % &b, expected);
}
#[test]
fn uint512_rem_assign_works() {
let mut a = Uint512::from(30u32);
a %= Uint512::from(4u32);
assert_eq!(a, Uint512::from(2u32));
// works for refs
let mut a = Uint512::from(25u32);
let b = Uint512::from(6u32);
a %= &b;
assert_eq!(a, Uint512::from(1u32));
}
}
| 31.930295 | 99 | 0.519731 |
effb452ec4c91b860c6589f830f1d735c4657e23
| 31,100 |
#[doc = "Reader of register CTRL1"]
pub type R = crate::R<u32, super::CTRL1>;
#[doc = "Writer for register CTRL1"]
pub type W = crate::W<u32, super::CTRL1>;
#[doc = "Register CTRL1 `reset()`'s with value 0"]
impl crate::ResetValue for super::CTRL1 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `PROPSEG`"]
pub type PROPSEG_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `PROPSEG`"]
pub struct PROPSEG_W<'a> {
w: &'a mut W,
}
impl<'a> PROPSEG_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x07) | ((value as u32) & 0x07);
self.w
}
}
#[doc = "Listen-Only Mode\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum LOM_A {
#[doc = "0: Listen-Only mode is deactivated."]
_0,
#[doc = "1: FlexCAN module operates in Listen-Only mode."]
_1,
}
impl From<LOM_A> for bool {
#[inline(always)]
fn from(variant: LOM_A) -> Self {
match variant {
LOM_A::_0 => false,
LOM_A::_1 => true,
}
}
}
#[doc = "Reader of field `LOM`"]
pub type LOM_R = crate::R<bool, LOM_A>;
impl LOM_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> LOM_A {
match self.bits {
false => LOM_A::_0,
true => LOM_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == LOM_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == LOM_A::_1
}
}
#[doc = "Write proxy for field `LOM`"]
pub struct LOM_W<'a> {
w: &'a mut W,
}
impl<'a> LOM_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: LOM_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Listen-Only mode is deactivated."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(LOM_A::_0)
}
#[doc = "FlexCAN module operates in Listen-Only mode."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(LOM_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Lowest Buffer Transmitted First\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum LBUF_A {
#[doc = "0: Buffer with highest priority is transmitted first."]
_0,
#[doc = "1: Lowest number buffer is transmitted first."]
_1,
}
impl From<LBUF_A> for bool {
#[inline(always)]
fn from(variant: LBUF_A) -> Self {
match variant {
LBUF_A::_0 => false,
LBUF_A::_1 => true,
}
}
}
#[doc = "Reader of field `LBUF`"]
pub type LBUF_R = crate::R<bool, LBUF_A>;
impl LBUF_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> LBUF_A {
match self.bits {
false => LBUF_A::_0,
true => LBUF_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == LBUF_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == LBUF_A::_1
}
}
#[doc = "Write proxy for field `LBUF`"]
pub struct LBUF_W<'a> {
w: &'a mut W,
}
impl<'a> LBUF_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: LBUF_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Buffer with highest priority is transmitted first."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(LBUF_A::_0)
}
#[doc = "Lowest number buffer is transmitted first."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(LBUF_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Timer Sync\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TSYN_A {
#[doc = "0: Timer Sync feature disabled"]
_0,
#[doc = "1: Timer Sync feature enabled"]
_1,
}
impl From<TSYN_A> for bool {
#[inline(always)]
fn from(variant: TSYN_A) -> Self {
match variant {
TSYN_A::_0 => false,
TSYN_A::_1 => true,
}
}
}
#[doc = "Reader of field `TSYN`"]
pub type TSYN_R = crate::R<bool, TSYN_A>;
impl TSYN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TSYN_A {
match self.bits {
false => TSYN_A::_0,
true => TSYN_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == TSYN_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == TSYN_A::_1
}
}
#[doc = "Write proxy for field `TSYN`"]
pub struct TSYN_W<'a> {
w: &'a mut W,
}
impl<'a> TSYN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TSYN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Timer Sync feature disabled"]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(TSYN_A::_0)
}
#[doc = "Timer Sync feature enabled"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(TSYN_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Bus Off Recovery\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum BOFFREC_A {
#[doc = "0: Automatic recovering from Bus Off state enabled, according to CAN Spec 2.0 part B."]
_0,
#[doc = "1: Automatic recovering from Bus Off state disabled."]
_1,
}
impl From<BOFFREC_A> for bool {
#[inline(always)]
fn from(variant: BOFFREC_A) -> Self {
match variant {
BOFFREC_A::_0 => false,
BOFFREC_A::_1 => true,
}
}
}
#[doc = "Reader of field `BOFFREC`"]
pub type BOFFREC_R = crate::R<bool, BOFFREC_A>;
impl BOFFREC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BOFFREC_A {
match self.bits {
false => BOFFREC_A::_0,
true => BOFFREC_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == BOFFREC_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == BOFFREC_A::_1
}
}
#[doc = "Write proxy for field `BOFFREC`"]
pub struct BOFFREC_W<'a> {
w: &'a mut W,
}
impl<'a> BOFFREC_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: BOFFREC_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Automatic recovering from Bus Off state enabled, according to CAN Spec 2.0 part B."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(BOFFREC_A::_0)
}
#[doc = "Automatic recovering from Bus Off state disabled."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(BOFFREC_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "CAN Bit Sampling\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SMP_A {
#[doc = "0: Just one sample is used to determine the bit value."]
_0,
#[doc = "1: Three samples are used to determine the value of the received bit: the regular one (sample point) and 2 preceding samples; a majority rule is used."]
_1,
}
impl From<SMP_A> for bool {
#[inline(always)]
fn from(variant: SMP_A) -> Self {
match variant {
SMP_A::_0 => false,
SMP_A::_1 => true,
}
}
}
#[doc = "Reader of field `SMP`"]
pub type SMP_R = crate::R<bool, SMP_A>;
impl SMP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SMP_A {
match self.bits {
false => SMP_A::_0,
true => SMP_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == SMP_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == SMP_A::_1
}
}
#[doc = "Write proxy for field `SMP`"]
pub struct SMP_W<'a> {
w: &'a mut W,
}
impl<'a> SMP_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SMP_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Just one sample is used to determine the bit value."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(SMP_A::_0)
}
#[doc = "Three samples are used to determine the value of the received bit: the regular one (sample point) and 2 preceding samples; a majority rule is used."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(SMP_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Rx Warning Interrupt Mask\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RWRNMSK_A {
#[doc = "0: Rx Warning Interrupt disabled."]
_0,
#[doc = "1: Rx Warning Interrupt enabled."]
_1,
}
impl From<RWRNMSK_A> for bool {
#[inline(always)]
fn from(variant: RWRNMSK_A) -> Self {
match variant {
RWRNMSK_A::_0 => false,
RWRNMSK_A::_1 => true,
}
}
}
#[doc = "Reader of field `RWRNMSK`"]
pub type RWRNMSK_R = crate::R<bool, RWRNMSK_A>;
impl RWRNMSK_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RWRNMSK_A {
match self.bits {
false => RWRNMSK_A::_0,
true => RWRNMSK_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == RWRNMSK_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == RWRNMSK_A::_1
}
}
#[doc = "Write proxy for field `RWRNMSK`"]
pub struct RWRNMSK_W<'a> {
w: &'a mut W,
}
impl<'a> RWRNMSK_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RWRNMSK_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Rx Warning Interrupt disabled."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(RWRNMSK_A::_0)
}
#[doc = "Rx Warning Interrupt enabled."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(RWRNMSK_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Tx Warning Interrupt Mask\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TWRNMSK_A {
#[doc = "0: Tx Warning Interrupt disabled."]
_0,
#[doc = "1: Tx Warning Interrupt enabled."]
_1,
}
impl From<TWRNMSK_A> for bool {
#[inline(always)]
fn from(variant: TWRNMSK_A) -> Self {
match variant {
TWRNMSK_A::_0 => false,
TWRNMSK_A::_1 => true,
}
}
}
#[doc = "Reader of field `TWRNMSK`"]
pub type TWRNMSK_R = crate::R<bool, TWRNMSK_A>;
impl TWRNMSK_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TWRNMSK_A {
match self.bits {
false => TWRNMSK_A::_0,
true => TWRNMSK_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == TWRNMSK_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == TWRNMSK_A::_1
}
}
#[doc = "Write proxy for field `TWRNMSK`"]
pub struct TWRNMSK_W<'a> {
w: &'a mut W,
}
impl<'a> TWRNMSK_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TWRNMSK_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Tx Warning Interrupt disabled."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(TWRNMSK_A::_0)
}
#[doc = "Tx Warning Interrupt enabled."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(TWRNMSK_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
#[doc = "Loop Back Mode\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum LPB_A {
#[doc = "0: Loop Back disabled."]
_0,
#[doc = "1: Loop Back enabled."]
_1,
}
impl From<LPB_A> for bool {
#[inline(always)]
fn from(variant: LPB_A) -> Self {
match variant {
LPB_A::_0 => false,
LPB_A::_1 => true,
}
}
}
#[doc = "Reader of field `LPB`"]
pub type LPB_R = crate::R<bool, LPB_A>;
impl LPB_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> LPB_A {
match self.bits {
false => LPB_A::_0,
true => LPB_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == LPB_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == LPB_A::_1
}
}
#[doc = "Write proxy for field `LPB`"]
pub struct LPB_W<'a> {
w: &'a mut W,
}
impl<'a> LPB_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: LPB_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Loop Back disabled."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(LPB_A::_0)
}
#[doc = "Loop Back enabled."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(LPB_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12);
self.w
}
}
#[doc = "CAN Engine Clock Source\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CLKSRC_A {
#[doc = "0: The CAN engine clock source is the oscillator clock. Under this condition, the oscillator clock frequency must be lower than the bus clock."]
_0,
#[doc = "1: The CAN engine clock source is the peripheral clock."]
_1,
}
impl From<CLKSRC_A> for bool {
#[inline(always)]
fn from(variant: CLKSRC_A) -> Self {
match variant {
CLKSRC_A::_0 => false,
CLKSRC_A::_1 => true,
}
}
}
#[doc = "Reader of field `CLKSRC`"]
pub type CLKSRC_R = crate::R<bool, CLKSRC_A>;
impl CLKSRC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CLKSRC_A {
match self.bits {
false => CLKSRC_A::_0,
true => CLKSRC_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == CLKSRC_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == CLKSRC_A::_1
}
}
#[doc = "Write proxy for field `CLKSRC`"]
pub struct CLKSRC_W<'a> {
w: &'a mut W,
}
impl<'a> CLKSRC_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CLKSRC_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "The CAN engine clock source is the oscillator clock. Under this condition, the oscillator clock frequency must be lower than the bus clock."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(CLKSRC_A::_0)
}
#[doc = "The CAN engine clock source is the peripheral clock."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(CLKSRC_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13);
self.w
}
}
#[doc = "Error Mask\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERRMSK_A {
#[doc = "0: Error interrupt disabled."]
_0,
#[doc = "1: Error interrupt enabled."]
_1,
}
impl From<ERRMSK_A> for bool {
#[inline(always)]
fn from(variant: ERRMSK_A) -> Self {
match variant {
ERRMSK_A::_0 => false,
ERRMSK_A::_1 => true,
}
}
}
#[doc = "Reader of field `ERRMSK`"]
pub type ERRMSK_R = crate::R<bool, ERRMSK_A>;
impl ERRMSK_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ERRMSK_A {
match self.bits {
false => ERRMSK_A::_0,
true => ERRMSK_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == ERRMSK_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == ERRMSK_A::_1
}
}
#[doc = "Write proxy for field `ERRMSK`"]
pub struct ERRMSK_W<'a> {
w: &'a mut W,
}
impl<'a> ERRMSK_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ERRMSK_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Error interrupt disabled."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(ERRMSK_A::_0)
}
#[doc = "Error interrupt enabled."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(ERRMSK_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14);
self.w
}
}
#[doc = "Bus Off Mask\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum BOFFMSK_A {
#[doc = "0: Bus Off interrupt disabled."]
_0,
#[doc = "1: Bus Off interrupt enabled."]
_1,
}
impl From<BOFFMSK_A> for bool {
#[inline(always)]
fn from(variant: BOFFMSK_A) -> Self {
match variant {
BOFFMSK_A::_0 => false,
BOFFMSK_A::_1 => true,
}
}
}
#[doc = "Reader of field `BOFFMSK`"]
pub type BOFFMSK_R = crate::R<bool, BOFFMSK_A>;
impl BOFFMSK_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BOFFMSK_A {
match self.bits {
false => BOFFMSK_A::_0,
true => BOFFMSK_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == BOFFMSK_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == BOFFMSK_A::_1
}
}
#[doc = "Write proxy for field `BOFFMSK`"]
pub struct BOFFMSK_W<'a> {
w: &'a mut W,
}
impl<'a> BOFFMSK_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: BOFFMSK_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Bus Off interrupt disabled."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(BOFFMSK_A::_0)
}
#[doc = "Bus Off interrupt enabled."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(BOFFMSK_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15);
self.w
}
}
#[doc = "Reader of field `PSEG2`"]
pub type PSEG2_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `PSEG2`"]
pub struct PSEG2_W<'a> {
w: &'a mut W,
}
impl<'a> PSEG2_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 16)) | (((value as u32) & 0x07) << 16);
self.w
}
}
#[doc = "Reader of field `PSEG1`"]
pub type PSEG1_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `PSEG1`"]
pub struct PSEG1_W<'a> {
w: &'a mut W,
}
impl<'a> PSEG1_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 19)) | (((value as u32) & 0x07) << 19);
self.w
}
}
#[doc = "Reader of field `RJW`"]
pub type RJW_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `RJW`"]
pub struct RJW_W<'a> {
w: &'a mut W,
}
impl<'a> RJW_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 22)) | (((value as u32) & 0x03) << 22);
self.w
}
}
#[doc = "Reader of field `PRESDIV`"]
pub type PRESDIV_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `PRESDIV`"]
pub struct PRESDIV_W<'a> {
w: &'a mut W,
}
impl<'a> PRESDIV_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xff << 24)) | (((value as u32) & 0xff) << 24);
self.w
}
}
impl R {
#[doc = "Bits 0:2 - Propagation Segment"]
#[inline(always)]
pub fn propseg(&self) -> PROPSEG_R {
PROPSEG_R::new((self.bits & 0x07) as u8)
}
#[doc = "Bit 3 - Listen-Only Mode"]
#[inline(always)]
pub fn lom(&self) -> LOM_R {
LOM_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Lowest Buffer Transmitted First"]
#[inline(always)]
pub fn lbuf(&self) -> LBUF_R {
LBUF_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - Timer Sync"]
#[inline(always)]
pub fn tsyn(&self) -> TSYN_R {
TSYN_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - Bus Off Recovery"]
#[inline(always)]
pub fn boffrec(&self) -> BOFFREC_R {
BOFFREC_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - CAN Bit Sampling"]
#[inline(always)]
pub fn smp(&self) -> SMP_R {
SMP_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 10 - Rx Warning Interrupt Mask"]
#[inline(always)]
pub fn rwrnmsk(&self) -> RWRNMSK_R {
RWRNMSK_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 11 - Tx Warning Interrupt Mask"]
#[inline(always)]
pub fn twrnmsk(&self) -> TWRNMSK_R {
TWRNMSK_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 12 - Loop Back Mode"]
#[inline(always)]
pub fn lpb(&self) -> LPB_R {
LPB_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 13 - CAN Engine Clock Source"]
#[inline(always)]
pub fn clksrc(&self) -> CLKSRC_R {
CLKSRC_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 14 - Error Mask"]
#[inline(always)]
pub fn errmsk(&self) -> ERRMSK_R {
ERRMSK_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bit 15 - Bus Off Mask"]
#[inline(always)]
pub fn boffmsk(&self) -> BOFFMSK_R {
BOFFMSK_R::new(((self.bits >> 15) & 0x01) != 0)
}
#[doc = "Bits 16:18 - Phase Segment 2"]
#[inline(always)]
pub fn pseg2(&self) -> PSEG2_R {
PSEG2_R::new(((self.bits >> 16) & 0x07) as u8)
}
#[doc = "Bits 19:21 - Phase Segment 1"]
#[inline(always)]
pub fn pseg1(&self) -> PSEG1_R {
PSEG1_R::new(((self.bits >> 19) & 0x07) as u8)
}
#[doc = "Bits 22:23 - Resync Jump Width"]
#[inline(always)]
pub fn rjw(&self) -> RJW_R {
RJW_R::new(((self.bits >> 22) & 0x03) as u8)
}
#[doc = "Bits 24:31 - Prescaler Division Factor"]
#[inline(always)]
pub fn presdiv(&self) -> PRESDIV_R {
PRESDIV_R::new(((self.bits >> 24) & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 0:2 - Propagation Segment"]
#[inline(always)]
pub fn propseg(&mut self) -> PROPSEG_W {
PROPSEG_W { w: self }
}
#[doc = "Bit 3 - Listen-Only Mode"]
#[inline(always)]
pub fn lom(&mut self) -> LOM_W {
LOM_W { w: self }
}
#[doc = "Bit 4 - Lowest Buffer Transmitted First"]
#[inline(always)]
pub fn lbuf(&mut self) -> LBUF_W {
LBUF_W { w: self }
}
#[doc = "Bit 5 - Timer Sync"]
#[inline(always)]
pub fn tsyn(&mut self) -> TSYN_W {
TSYN_W { w: self }
}
#[doc = "Bit 6 - Bus Off Recovery"]
#[inline(always)]
pub fn boffrec(&mut self) -> BOFFREC_W {
BOFFREC_W { w: self }
}
#[doc = "Bit 7 - CAN Bit Sampling"]
#[inline(always)]
pub fn smp(&mut self) -> SMP_W {
SMP_W { w: self }
}
#[doc = "Bit 10 - Rx Warning Interrupt Mask"]
#[inline(always)]
pub fn rwrnmsk(&mut self) -> RWRNMSK_W {
RWRNMSK_W { w: self }
}
#[doc = "Bit 11 - Tx Warning Interrupt Mask"]
#[inline(always)]
pub fn twrnmsk(&mut self) -> TWRNMSK_W {
TWRNMSK_W { w: self }
}
#[doc = "Bit 12 - Loop Back Mode"]
#[inline(always)]
pub fn lpb(&mut self) -> LPB_W {
LPB_W { w: self }
}
#[doc = "Bit 13 - CAN Engine Clock Source"]
#[inline(always)]
pub fn clksrc(&mut self) -> CLKSRC_W {
CLKSRC_W { w: self }
}
#[doc = "Bit 14 - Error Mask"]
#[inline(always)]
pub fn errmsk(&mut self) -> ERRMSK_W {
ERRMSK_W { w: self }
}
#[doc = "Bit 15 - Bus Off Mask"]
#[inline(always)]
pub fn boffmsk(&mut self) -> BOFFMSK_W {
BOFFMSK_W { w: self }
}
#[doc = "Bits 16:18 - Phase Segment 2"]
#[inline(always)]
pub fn pseg2(&mut self) -> PSEG2_W {
PSEG2_W { w: self }
}
#[doc = "Bits 19:21 - Phase Segment 1"]
#[inline(always)]
pub fn pseg1(&mut self) -> PSEG1_W {
PSEG1_W { w: self }
}
#[doc = "Bits 22:23 - Resync Jump Width"]
#[inline(always)]
pub fn rjw(&mut self) -> RJW_W {
RJW_W { w: self }
}
#[doc = "Bits 24:31 - Prescaler Division Factor"]
#[inline(always)]
pub fn presdiv(&mut self) -> PRESDIV_W {
PRESDIV_W { w: self }
}
}
| 28.144796 | 165 | 0.532379 |
e6dcd0f5c6acef5ba6fa0cabcde895e812b05dd2
| 18,283 |
extern crate time;
pub mod merkletree;
pub mod types;
pub mod constants;
use self::types::{
AttribOperation,
GetAttribOperation,
GetNymOperation,
GetSchemaOperationData,
GetSchemaOperation,
NymOperation,
Request,
SchemaOperation,
SchemaOperationData,
ClaimDefOperation,
ClaimDefOperationData,
GetClaimDefOperation,
GetDdoOperation,
NodeOperation,
NodeOperationData,
Role,
GetTxnOperation
};
use errors::common::CommonError;
use utils::json::{JsonEncodable, JsonDecodable};
use utils::crypto::base58::Base58;
trait LedgerSerializer {
fn serialize(&self) -> String;
}
pub struct LedgerService {}
impl LedgerService {
pub fn new() -> LedgerService {
LedgerService {}
}
pub fn build_nym_request(&self, identifier: &str, dest: &str, verkey: Option<&str>,
alias: Option<&str>, role: Option<&str>) -> Result<String, CommonError> {
//TODO: check identifier, dest, verkey
Base58::decode(&identifier)?;
Base58::decode(&dest)?;
let req_id = LedgerService::get_req_id();
let role = match role {
Some(r) =>
match r.clone() {
"STEWARD" => Some(Role::STEWARD as i32),
"TRUSTEE" => Some(Role::TRUSTEE as i32),
role @ _ => return Err(CommonError::InvalidStructure(format!("Invalid role: {}", role)))
},
_ => None
};
let operation = NymOperation::new(dest.to_string(),
verkey.as_ref().map(|s| s.to_string()),
alias.as_ref().map(|s| s.to_string()),
role.as_ref().map(|s| s.to_string()));
let request = Request::new(req_id,
identifier.to_string(),
operation);
let request_json = Request::to_json(&request)
.map_err(|err| CommonError::InvalidState(format!("Invalid nym request json: {}", err.to_string())))?;
Ok(request_json)
}
pub fn build_get_nym_request(&self, identifier: &str, dest: &str) -> Result<String, CommonError> {
Base58::decode(&identifier)?;
Base58::decode(&dest)?;
let req_id = LedgerService::get_req_id();
let operation = GetNymOperation::new(dest.to_string());
let request = Request::new(req_id,
identifier.to_string(),
operation);
let request_json = Request::to_json(&request)
.map_err(|err| CommonError::InvalidState(format!("Invalid get_nym request json: {}", err.to_string())))?;
Ok(request_json)
}
pub fn build_get_ddo_request(&self, identifier: &str, dest: &str) -> Result<String, CommonError> {
Base58::decode(&identifier)?;
Base58::decode(&dest)?;
let req_id = LedgerService::get_req_id();
let operation = GetDdoOperation::new(dest.to_string());
let request = Request::new(req_id,
identifier.to_string(),
operation);
let request_json = Request::to_json(&request)
.map_err(|err| CommonError::InvalidState(format!("Invalid get_ddo request json: {}", err.to_string())))?;
Ok(request_json)
}
pub fn build_attrib_request(&self, identifier: &str, dest: &str, hash: Option<&str>,
raw: Option<&str>, enc: Option<&str>) -> Result<String, CommonError> {
Base58::decode(&identifier)?;
Base58::decode(&dest)?;
if raw.is_none() && hash.is_none() && enc.is_none() {
return Err(CommonError::InvalidStructure(format!("Either raw or hash or enc must be specified")))
}
let req_id = LedgerService::get_req_id();
let operation = AttribOperation::new(dest.to_string(),
hash.as_ref().map(|s| s.to_string()),
raw.as_ref().map(|s| s.to_string()),
enc.as_ref().map(|s| s.to_string()));
let request = Request::new(req_id,
identifier.to_string(),
operation);
let request_json = Request::to_json(&request)
.map_err(|err| CommonError::InvalidState(format!("Invalid attrib request json: {}", err.to_string())))?;
Ok(request_json)
}
pub fn build_get_attrib_request(&self, identifier: &str, dest: &str, raw: &str) -> Result<String, CommonError> {
Base58::decode(&identifier)?;
Base58::decode(&dest)?;
let req_id = LedgerService::get_req_id();
let operation = GetAttribOperation::new(dest.to_string(),
raw.to_string());
let request = Request::new(req_id,
identifier.to_string(),
operation);
let request_json = Request::to_json(&request)
.map_err(|err| CommonError::InvalidState(format!("Invalid get_attrib request json: {}", err.to_string())))?;
Ok(request_json)
}
pub fn build_schema_request(&self, identifier: &str, data: &str) -> Result<String, CommonError> {
Base58::decode(&identifier)?;
let req_id = LedgerService::get_req_id();
SchemaOperationData::from_json(&data)
.map_err(|err| CommonError::InvalidStructure(format!("Invalid data json: {}", err.to_string())))?;
let operation = SchemaOperation::new(data.to_string());
let request = Request::new(req_id,
identifier.to_string(),
operation);
let request_json = Request::to_json(&request)
.map_err(|err| CommonError::InvalidState(format!("Invalid schema request json: {}", err.to_string())))?;
Ok(request_json)
}
pub fn build_get_schema_request(&self, identifier: &str, dest: &str, data: &str) -> Result<String, CommonError> {
Base58::decode(&identifier)?;
Base58::decode(&dest)?;
let req_id = LedgerService::get_req_id();
let data = GetSchemaOperationData::from_json(data)
.map_err(|err| CommonError::InvalidStructure(format!("Invalid data json: {}", err.to_string())))?;
let operation = GetSchemaOperation::new(dest.to_string(), data);
let request = Request::new(req_id,
identifier.to_string(),
operation);
let request_json = Request::to_json(&request)
.map_err(|err| CommonError::InvalidState(format!("Invalid get_schema request json: {}", err.to_string())))?;
Ok(request_json)
}
pub fn build_claim_def_request(&self, identifier: &str, _ref: i32, signature_type: &str, data: &str) -> Result<String, CommonError> {
Base58::decode(&identifier)?;
let req_id = LedgerService::get_req_id();
ClaimDefOperationData::from_json(&data)
.map_err(|err| CommonError::InvalidStructure(format!("Invalid data json: {}", err.to_string())))?;
let operation = ClaimDefOperation::new(_ref, signature_type.to_string(), data.to_string());
let request = Request::new(req_id,
identifier.to_string(),
operation);
let request_json = Request::to_json(&request)
.map_err(|err| CommonError::InvalidState(format!("Invalid claim_def request json: {}", err.to_string())))?;
Ok(request_json)
}
pub fn build_get_claim_def_request(&self, identifier: &str, _ref: i32, signature_type: &str, origin: &str) -> Result<String, CommonError> {
Base58::decode(&identifier)?;
Base58::decode(&origin)?;
let req_id = LedgerService::get_req_id();
let operation = GetClaimDefOperation::new(_ref,
signature_type.to_string(),
origin.to_string());
let request = Request::new(req_id,
identifier.to_string(),
operation);
let request_json = Request::to_json(&request)
.map_err(|err| CommonError::InvalidState(format!("Invalid get_claim_def request json: {}", err.to_string())))?;
Ok(request_json)
}
pub fn build_node_request(&self, identifier: &str, dest: &str, data: &str) -> Result<String, CommonError> {
Base58::decode(&identifier)?;
Base58::decode(&dest)?;
let req_id = LedgerService::get_req_id();
let data = NodeOperationData::from_json(&data)
.map_err(|err| CommonError::InvalidStructure(format!("Invalid data json: {}", err.to_string())))?;
let operation = NodeOperation::new(dest.to_string(), data);
let request = Request::new(req_id,
identifier.to_string(),
operation);
let request_json = Request::to_json(&request)
.map_err(|err| CommonError::InvalidState(format!("Invalid node request json: {}", err.to_string())))?;
Ok(request_json)
}
pub fn build_get_txn_request(&self, identifier: &str, data: i32) -> Result<String, CommonError> {
Base58::decode(&identifier)?;
let req_id = LedgerService::get_req_id();
let operation = GetTxnOperation::new(data);
let request = Request::new(req_id,
identifier.to_string(),
operation);
let request_json = Request::to_json(&request)
.map_err(|err| CommonError::InvalidState(format!("Invalid get txn request json: {}", err.to_string())))?;
Ok(request_json)
}
fn get_req_id() -> u64 {
time::get_time().sec as u64 * (1e9 as u64) + time::get_time().nsec as u64
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn build_nym_request_works_for_only_required_fields() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let dest = "dest";
let expected_result = r#""identifier":"identifier","operation":{"type":"1","dest":"dest"}"#;
let nym_request = ledger_service.build_nym_request(identifier, dest, None, None, None);
assert!(nym_request.is_ok());
let nym_request = nym_request.unwrap();
assert!(nym_request.contains(expected_result));
}
#[test]
fn build_nym_request_works_for_optional_fields() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let dest = "dest";
let verkey = "verkey";
let alias = "some_alias";
let expected_result = r#""identifier":"identifier","operation":{"type":"1","dest":"dest","verkey":"verkey","alias":"some_alias"}"#;
let nym_request = ledger_service.build_nym_request(identifier, dest, Some(verkey), Some(alias), None);
assert!(nym_request.is_ok());
let nym_request = nym_request.unwrap();
assert!(nym_request.contains(expected_result));
}
#[test]
fn build_get_nym_request_works() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let dest = "dest";
let expected_result = r#""identifier":"identifier","operation":{"type":"105","dest":"dest"}"#;
let get_nym_request = ledger_service.build_get_nym_request(identifier, dest);
assert!(get_nym_request.is_ok());
let get_nym_request = get_nym_request.unwrap();
assert!(get_nym_request.contains(expected_result));
}
#[test]
fn build_get_ddo_request_works() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let dest = "dest";
let expected_result = r#""identifier":"identifier","operation":{"type":"120","dest":"dest"}"#;
let get_ddo_request = ledger_service.build_get_ddo_request(identifier, dest);
assert!(get_ddo_request.is_ok());
let get_ddo_request = get_ddo_request.unwrap();
assert!(get_ddo_request.contains(expected_result));
}
#[test]
fn build_attrib_request_works_for_miss_attrib_field() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let dest = "dest";
let attrib_request = ledger_service.build_attrib_request(identifier, dest, None, None, None);
assert!(attrib_request.is_err());
}
#[test]
fn build_attrib_request_works_for_hash_field() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let dest = "dest";
let hash = "hash";
let expected_result = r#""identifier":"identifier","operation":{"type":"100","dest":"dest","hash":"hash"}"#;
let attrib_request = ledger_service.build_attrib_request(identifier, dest, Some(hash), None, None);
assert!(attrib_request.is_ok());
let attrib_request = attrib_request.unwrap();
assert!(attrib_request.contains(expected_result));
}
#[test]
fn build_get_attrib_request_works() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let dest = "dest";
let raw = "raw";
let expected_result = r#""identifier":"identifier","operation":{"type":"104","dest":"dest","raw":"raw"}"#;
let get_attrib_request = ledger_service.build_get_attrib_request(identifier, dest, raw);
assert!(get_attrib_request.is_ok());
let get_attrib_request = get_attrib_request.unwrap();
assert!(get_attrib_request.contains(expected_result));
}
#[test]
fn build_schema_request_works_for_wrong_data() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let data = r#"{"name":"name"}"#;
let get_attrib_request = ledger_service.build_schema_request(identifier, data);
assert!(get_attrib_request.is_err());
}
#[test]
fn build_schema_request_works_for_correct_data() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let data = r#"{"name":"name", "version":"1.0", "keys":["name","male"]}"#;
let expected_result = r#""operation":{"type":"101","data":"{\"name\":\"name\", \"version\":\"1.0\", \"keys\":[\"name\",\"male\"]"#;
let schema_request = ledger_service.build_schema_request(identifier, data);
assert!(schema_request.is_ok());
let schema_request = schema_request.unwrap();
assert!(schema_request.contains(expected_result));
}
#[test]
fn build_get_schema_request_works_for_wrong_data() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let data = r#"{"name":"name","keys":["name","male"]}"#;
let get_schema_request = ledger_service.build_get_schema_request(identifier, identifier, data);
assert!(get_schema_request.is_err());
}
#[test]
fn build_get_schema_request_works_for_correct_data() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let data = r#"{"name":"name","version":"1.0"}"#;
let expected_result = r#""identifier":"identifier","operation":{"type":"107","dest":"identifier","data":{"name":"name","version":"1.0"}}"#;
let get_schema_request = ledger_service.build_get_schema_request(identifier, identifier, data);
assert!(get_schema_request.is_ok());
let get_schema_request = get_schema_request.unwrap();
assert!(get_schema_request.contains(expected_result));
}
#[test]
fn build_get_claim_def_request_works() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let _ref = 1;
let signature_type = "signature_type";
let origin = "origin";
let expected_result = r#""identifier":"identifier","operation":{"type":"108","ref":1,"signature_type":"signature_type","origin":"origin"}"#;
let get_claim_def_request = ledger_service.build_get_claim_def_request(identifier, _ref, signature_type, origin);
assert!(get_claim_def_request.is_ok());
let get_claim_def_request = get_claim_def_request.unwrap();
assert!(get_claim_def_request.contains(expected_result));
}
#[test]
fn build_node_request_works() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let dest = "dest";
let data = r#"{"node_ip":"ip", "node_port": 1, "client_ip": "ip", "client_port": 1, "alias":"some", "services": ["VALIDATOR"]}"#;
let expected_result = r#""identifier":"identifier","operation":{"type":"0","dest":"dest","data":{"node_ip":"ip","node_port":1,"client_ip":"ip","client_port":1,"alias":"some","services":["VALIDATOR"]}}"#;
let node_request = ledger_service.build_node_request(identifier, dest, data);
assert!(node_request.is_ok());
let node_request = node_request.unwrap();
assert!(node_request.contains(expected_result));
}
#[test]
fn build_node_request_works_for_wrong_data() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let dest = "dest";
let data = r#"{"node_ip":"ip", "node_port": 1, "client_ip": "ip", "client_port": 1}"#;
let node_request = ledger_service.build_node_request(identifier, dest, data);
assert!(node_request.is_err());
}
#[test]
fn build_get_txn_request_works() {
let ledger_service = LedgerService::new();
let identifier = "identifier";
let expected_result = r#""identifier":"identifier","operation":{"type":"106","data":1}"#;
let get_txn_request = ledger_service.build_get_txn_request(identifier, 1);
assert!(get_txn_request.is_ok());
let get_txn_request = get_txn_request.unwrap();
assert!(get_txn_request.contains(expected_result));
}
}
| 41.837529 | 211 | 0.596948 |
22496c41bab3bf57152d742665668731e94e841b
| 16,012 |
// Copyright 2021 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
use crate::types::*;
use chrono::prelude::{Local, TimeZone};
use iota::{Address as IotaAddress, MessageId as RustMessageId};
use iota_wallet::{
address::AddressWrapper as RustAddressWrapper,
message::{
Message as RustWalletMessage, MessageType as RustMessageType,
RemainderValueStrategy as RustRemainderValueStrategy, Transfer as RustTransfer,
},
signing::SignerType as RustSingerType,
};
use pyo3::prelude::*;
use std::{
convert::{Into, TryInto},
num::NonZeroU64,
str::FromStr,
};
#[pymethods]
impl AccountSynchronizer {
/// Number of address indexes that are generated.
fn gap_limit(&mut self, limit: usize) {
self.account_synchronizer = Some(self.account_synchronizer.take().unwrap().gap_limit(limit));
}
/// Skip saving new messages and addresses on the account object.
/// The found data is returned on the `execute` call but won't be persisted on the database.
fn skip_persistence(&mut self) {
self.account_synchronizer = Some(self.account_synchronizer.take().unwrap().skip_persistence());
}
/// Initial address index to start syncing.
fn address_index(&mut self, address_index: usize) {
self.account_synchronizer = Some(self.account_synchronizer.take().unwrap().address_index(address_index));
}
/// Syncs account with the tangle.
/// The account syncing process ensures that the latest metadata (balance, transactions)
/// associated with an account is fetched from the tangle and is stored locally.
fn execute(&mut self) -> Result<SyncedAccount> {
let synced_account = crate::block_on(async { self.account_synchronizer.take().unwrap().execute().await })?;
Ok(SyncedAccount { synced_account })
}
}
#[pymethods]
impl Transfer {
#[new]
fn new(
amount: u64,
address: &str, // IotaAddress
bench32_hrp: &str,
indexation: Option<Indexation>,
remainder_value_strategy: &str,
) -> Result<Self> {
let address_wrapper = RustAddressWrapper::new(IotaAddress::try_from_bech32(address)?, bench32_hrp.to_string());
let mut builder = RustTransfer::builder(address_wrapper, NonZeroU64::new(amount).unwrap());
let strategy = match remainder_value_strategy {
"ReuseAddress" => RustRemainderValueStrategy::ReuseAddress,
"ChangeAddress" => RustRemainderValueStrategy::ChangeAddress,
_ => RustRemainderValueStrategy::AccountAddress(RustAddressWrapper::new(
IotaAddress::try_from_bech32(address)?,
bench32_hrp.to_string(),
)),
};
builder = builder.with_remainder_value_strategy(strategy);
if let Some(indexation) = indexation {
builder = builder.with_indexation(indexation.try_into()?);
}
Ok(Transfer {
transfer: builder.finish(),
})
}
}
#[pymethods]
impl AccountHandle {
/// Returns the builder to setup the process to synchronize this account with the Tangle.
fn sync(&self) -> AccountSynchronizer {
let account_synchronizer = crate::block_on(async { self.account_handle.sync().await });
AccountSynchronizer {
account_synchronizer: Some(account_synchronizer),
}
}
/// Send messages.
fn transfer(&self, transfer_obj: Transfer) -> Result<WalletMessage> {
crate::block_on(async { self.account_handle.transfer(transfer_obj.transfer).await?.try_into() })
}
/// Retry message.
fn retry(&self, message_id: &str) -> Result<WalletMessage> {
crate::block_on(async {
self.account_handle
.retry(&RustMessageId::from_str(&message_id)?)
.await?
.try_into()
})
}
/// Promote message.
fn promote(&self, message_id: &str) -> Result<WalletMessage> {
crate::block_on(async {
self.account_handle
.promote(&RustMessageId::from_str(&message_id)?)
.await?
.try_into()
})
}
/// Reattach message.
fn reattach(&self, message_id: &str) -> Result<WalletMessage> {
crate::block_on(async {
self.account_handle
.reattach(&RustMessageId::from_str(&message_id)?)
.await?
.try_into()
})
}
}
#[pymethods]
impl AccountHandle {
/// Bridge to [Account#id](struct.Account.html#method.id).
/// Returns the account ID.
fn id(&self) -> String {
crate::block_on(async { self.account_handle.id().await })
}
/// Bridge to [Account#signer_type](struct.Account.html#method.signer_type).
/// Return the singer type of this account.
fn signer_type(&self) -> String {
match crate::block_on(async { self.account_handle.signer_type().await }) {
RustSingerType::Stronghold => "Stronghold".to_string(),
RustSingerType::LedgerNano => "LedgerNano".to_string(),
RustSingerType::LedgerNanoSimulator => "LedgerNanoSimulator".to_string(),
RustSingerType::Custom(s) => s,
}
}
/// Bridge to [Account#index](struct.Account.html#method.index).
/// Return the account index.
fn index(&self) -> usize {
crate::block_on(async { self.account_handle.index().await })
}
/// Bridge to [Account#alias](struct.Account.html#method.alias).
/// Return the account alias.
fn alias(&self) -> String {
crate::block_on(async { self.account_handle.alias().await })
}
/// Bridge to [Account#created_at](struct.Account.html#method.created_at).
/// Return the created UNIX timestamp
fn created_at(&self) -> i64 {
crate::block_on(async { self.account_handle.created_at().await }).timestamp()
}
/// Bridge to [Account#last_synced_at](struct.Account.html#method.last_synced_at).
/// Return the last synced UNIX timestamp
fn last_synced_at(&self) -> Option<i64> {
crate::block_on(async { self.account_handle.last_synced_at().await }).map(|t| t.timestamp())
}
/// Bridge to [Account#client_options](struct.Account.html#method.client_options).
/// Return the client options of this account
fn client_options(&self) -> ClientOptions {
crate::block_on(async { self.account_handle.client_options().await }).into()
}
// #[doc = "Bridge to [Account#bech32_hrp](struct.Account.html#method.bech32_hrp)."] => bech32_hrp => String
fn bech32_hrp(&self) -> String {
crate::block_on(async { self.account_handle.bech32_hrp().await })
}
/// Consolidate outputs.
fn consolidate_outputs(&self) -> Result<Vec<WalletMessage>> {
let rust_messages = crate::block_on(async { self.account_handle.consolidate_outputs().await })?;
let mut messages = Vec::new();
for message in rust_messages {
messages.push(message.try_into()?);
}
Ok(messages)
}
/// Gets a new unused address and links it to this account.
fn generate_address(&self) -> Result<Address> {
Ok(crate::block_on(async { self.account_handle.generate_address().await })?.into())
}
/// Synchronizes the account addresses with the Tangle and returns the latest address in the account,
/// which is an address without balance.
fn get_unused_address(&self) -> Result<Address> {
Ok(crate::block_on(async { self.account_handle.get_unused_address().await })?.into())
}
/// Syncs the latest address with the Tangle and determines whether it's unused or not.
/// An unused address is an address without balance and associated message history.
/// Note that such address might have been used in the past, because the message history might have been pruned by
/// the node.
fn is_latest_address_unused(&self) -> Result<bool> {
Ok(crate::block_on(async {
self.account_handle.is_latest_address_unused().await
})?)
}
/// Bridge to [Account#latest_address](struct.Account.html#method.latest_address).
fn latest_address(&self) -> Address {
crate::block_on(async { self.account_handle.latest_address().await }).into()
}
/// Bridge to [Account#addresses](struct.Account.html#method.addresses).
fn addresses(&self) -> Vec<Address> {
let addresses = crate::block_on(async { self.account_handle.addresses().await });
addresses.into_iter().map(|address| address.into()).collect()
}
/// Bridge to [Account#balance](struct.Account.html#method.balance).
fn balance(&self) -> AccountBalance {
crate::block_on(async { self.account_handle.balance().await }).into()
}
/// Bridge to [Account#set_alias](struct.Account.html#method.set_alias).
fn set_alias(&self, alias: &str) -> Result<()> {
Ok(crate::block_on(async { self.account_handle.set_alias(alias).await })?)
}
/// Bridge to [Account#set_client_options](struct.Account.html#method.set_client_options).
fn set_client_options(&self, options: ClientOptions) -> Result<()> {
Ok(crate::block_on(async {
self.account_handle.set_client_options(options.into()).await
})?)
}
/// The number of messages associated with the account.
fn message_count(&self, message_type: Option<&str>) -> usize {
let message_type = match message_type {
Some("Received") => Some(RustMessageType::Received),
Some("Sent") => Some(RustMessageType::Sent),
Some("Failed") => Some(RustMessageType::Failed),
Some("Unconfirmed") => Some(RustMessageType::Unconfirmed),
Some("Value") => Some(RustMessageType::Value),
_ => None,
};
crate::block_on(async {
self.account_handle
.read()
.await
.list_messages(0, 0, message_type)
.iter()
.len()
})
}
/// Bridge to [Account#list_messages](struct.Account.html#method.list_messages).
/// This method clones the account's messages so when querying a large list of messages
/// prefer using the `read` method to access the account instance.
fn list_messages(
&self,
count: Option<usize>,
from: Option<usize>,
message_type: Option<&str>,
) -> Result<Vec<WalletMessage>> {
let message_type = match message_type {
Some("Received") => Some(RustMessageType::Received),
Some("Sent") => Some(RustMessageType::Sent),
Some("Failed") => Some(RustMessageType::Failed),
Some("Unconfirmed") => Some(RustMessageType::Unconfirmed),
Some("Value") => Some(RustMessageType::Value),
_ => None,
};
let messages = crate::block_on(async {
self.account_handle
.list_messages(count.unwrap_or(0), from.unwrap_or(0), message_type)
.await
});
let mut parsed_messages = Vec::new();
for message in messages {
parsed_messages.push(message.try_into()?);
}
Ok(parsed_messages)
}
/// Bridge to [Account#list_spent_addresses](struct.Account.html#method.list_spent_addresses).
/// This method clones the account's addresses so when querying a large list of addresses
/// prefer using the `read` method to access the account instance.
fn list_spent_addresses(&self) -> Vec<Address> {
let addresses = crate::block_on(async { self.account_handle.list_spent_addresses().await });
addresses.into_iter().map(|addr| addr.into()).collect()
}
/// Bridge to [Account#list_unspent_addresses](struct.Account.html#method.list_unspent_addresses).
/// This method clones the account's addresses so when querying a large list of addresses
/// prefer using the `read` method to access the account instance.
fn list_unspent_addresses(&self) -> Vec<Address> {
let addresses = crate::block_on(async { self.account_handle.list_unspent_addresses().await });
addresses.into_iter().map(|addr| addr.into()).collect()
}
/// Bridge to [Account#get_message](struct.Account.html#method.get_message).
fn get_message(&self, message_id: &str) -> Result<Option<WalletMessage>> {
let res: Result<Option<RustWalletMessage>> = crate::block_on(async {
Ok(self
.account_handle
.get_message(&RustMessageId::from_str(&message_id)?)
.await)
});
if let Some(message) = res? {
Ok(Some(message.try_into()?))
} else {
Ok(None)
}
}
}
#[pymethods]
impl AccountInitialiser {
/// Sets the account type.
fn signer_type(&mut self, signer_type: &str) {
let signer_type = match signer_type {
"Stronghold" => RustSingerType::Stronghold,
"LedgerNano" => RustSingerType::LedgerNano,
"LedgerNanoSimulator" => RustSingerType::LedgerNanoSimulator,
_ => RustSingerType::Custom(signer_type.to_string()),
};
self.account_initialiser = Some(self.account_initialiser.take().unwrap().signer_type(signer_type));
}
/// Defines the account alias. If not defined, we'll generate one.
fn alias(&mut self, alias: &str) {
self.account_initialiser = Some(self.account_initialiser.take().unwrap().alias(alias));
}
/// Time of account creation.
fn created_at(&mut self, created_at: i64) {
self.account_initialiser = Some(
self.account_initialiser
.take()
.unwrap()
.created_at(Local.timestamp(created_at, 0)),
);
}
/// Messages associated with the seed.
/// The account can be initialised with locally stored messages.
fn messages(&mut self, messages: Vec<WalletMessage>) {
let mut account_initialiser = self.account_initialiser.take().unwrap();
let messages = messages
.into_iter()
.map(|msg| {
// we use an empty bech32 HRP here because we update it later on wallet.rs
crate::block_on(to_rust_message(
msg,
"".to_string(),
self.accounts.clone(),
"",
&self.addresses,
&account_initialiser.client_options,
))
.unwrap_or_else(|msg| panic!("AccountInitialiser: Message {:?} is invalid", msg))
})
.collect();
account_initialiser = account_initialiser.messages(messages);
self.account_initialiser = Some(account_initialiser);
}
/// Address history associated with the seed.
/// The account can be initialised with locally stored address history.
fn addresses(&mut self, addresses: Vec<WalletAddress>) -> Result<()> {
for address in &addresses {
self.addresses.push(address.clone().try_into()?);
}
self.account_initialiser = Some(
self.account_initialiser.take().unwrap().addresses(
addresses
.into_iter()
.map(|address| {
address
.try_into()
.unwrap_or_else(|msg| panic!("AccountInitialiser: Address {:?} is invalid", msg))
})
.collect(),
),
);
Ok(())
}
/// Skips storing the account to the database.
fn skip_persistence(&mut self) {
self.account_initialiser = Some(self.account_initialiser.take().unwrap().skip_persistence());
}
/// Initialises the account.
fn initialise(&mut self) -> Result<AccountHandle> {
let account_handle = crate::block_on(async { self.account_initialiser.take().unwrap().initialise().await })?;
Ok(AccountHandle { account_handle })
}
}
| 39.633663 | 119 | 0.622471 |
1ad9d1f67cd8583e4a278698b0a35687f13e2ed3
| 1,931 |
/* _ _ _
* __| |_ _ ___ _ __( |_)_ _
* / _` | '_/ _ \ '_ \/| | ' \
* \__,_|_| \___/ .__/ |_|_||_| dropin-compiler - WebAssembly
* |_|
* Copyright © 2019-2022 Blue Forest
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use serde_derive::{Deserialize, Serialize};
use std::fs::{read_to_string, write};
use std::path::{Path, PathBuf};
pub struct Config {
path: PathBuf,
content: Content,
}
#[derive(Deserialize, Serialize)]
struct Content {
owner: Option<String>,
model: Option<String>,
}
impl Config {
pub fn new(root: &Path) -> Self {
let mut path = root.to_path_buf();
path.push("config.toml");
let content = if !path.exists() {
Content {
owner: None,
model: None,
}
} else {
let file_content = read_to_string(&path).unwrap();
toml::from_str(&file_content).unwrap()
};
Self { path, content }
}
pub fn set_owner(&mut self, owner: String) {
self.content.owner = Some(owner);
self.save();
}
pub fn owner(&self) -> &Option<String> {
&self.content.owner
}
pub fn set_model(&mut self, model: String) {
self.content.model = Some(model);
self.save();
}
pub fn model(&self) -> &Option<String> {
&self.content.model
}
fn save(&self) {
write(&self.path, toml::to_string(&self.content).unwrap()).unwrap();
}
}
| 25.407895 | 75 | 0.656137 |
c11563b3539e8139c387631c45d1f2f73e3d19ed
| 9,821 |
use clap::{App, Arg};
use csv::{ReaderBuilder, StringRecord, WriterBuilder};
use regex::Regex;
use std::error::Error;
use std::fs::File;
use std::io::{self, BufRead, BufReader};
type MyResult<T> = Result<T, Box<dyn Error>>;
type PositionList = Vec<usize>;
#[derive(Debug)]
pub struct Config {
files: Vec<String>,
delimiter: u8,
fields: Option<PositionList>,
bytes: Option<PositionList>,
chars: Option<PositionList>,
}
// --------------------------------------------------
pub fn get_args() -> MyResult<Config> {
let matches = App::new("cutr")
.version("0.1.0")
.author("Ken Youens-Clark <[email protected]>")
.about("Rust cut")
.arg(
Arg::with_name("files")
.value_name("FILE")
.help("Input file(s)")
.required(true)
.default_value("-")
.min_values(1),
)
.arg(
Arg::with_name("delimiter")
.value_name("DELIMITER")
.help("Field delimiter")
.short("d")
.long("delim")
.default_value("\t"),
)
.arg(
Arg::with_name("fields")
.value_name("FIELDS")
.help("Selected fields")
.short("f")
.long("fields")
.conflicts_with_all(&["chars", "bytes"]),
)
.arg(
Arg::with_name("bytes")
.value_name("BYTES")
.help("Selected bytes")
.short("b")
.long("bytes")
.conflicts_with_all(&["fields", "chars"]),
)
.arg(
Arg::with_name("chars")
.value_name("CHARS")
.help("Selected characters")
.short("c")
.long("chars")
.conflicts_with_all(&["fields", "bytes"]),
)
.get_matches();
let delim = matches.value_of("delimiter").unwrap_or("\t");
let delim_bytes = delim.as_bytes();
if delim.len() > 1 {
return Err(From::from(format!(
"--delim \"{}\" must be a single byte",
delim
)));
}
let fields = parse_positions(matches.value_of("fields"))?;
let bytes = parse_positions(matches.value_of("bytes"))?;
let chars = parse_positions(matches.value_of("chars"))?;
if vec![&fields, &bytes, &chars]
.into_iter()
.all(|v| v.is_none())
{
return Err(From::from("Must have --fields, --bytes, or --chars"));
}
Ok(Config {
files: matches.values_of_lossy("files").unwrap(),
delimiter: delim_bytes[0],
fields,
bytes,
chars,
})
}
// --------------------------------------------------
pub fn run(config: Config) -> MyResult<()> {
//println!("{:#?}", &config);
for filename in &config.files {
if let Err(err) = cut(&filename, &config) {
eprintln!("{}: {}", filename, err);
}
}
Ok(())
}
// --------------------------------------------------
pub fn cut(filename: &str, config: &Config) -> MyResult<()> {
let file: Box<dyn BufRead> = match filename {
"-" => Box::new(BufReader::new(io::stdin())),
_ => Box::new(BufReader::new(File::open(filename)?)),
};
if let Some(field_pos) = &config.fields {
let mut reader = ReaderBuilder::new()
.delimiter(config.delimiter)
.has_headers(false)
.from_reader(file);
let mut wtr = WriterBuilder::new()
.delimiter(config.delimiter)
.from_writer(io::stdout());
for record in reader.records() {
let record = record?;
wtr.write_record(extract_fields(&record, &field_pos))?;
}
} else if let Some(byte_pos) = &config.bytes {
for line in file.lines() {
println!("{}", extract_bytes(&line?, byte_pos));
}
} else if let Some(char_pos) = &config.chars {
for line in file.lines() {
println!("{}", extract_chars(&line?, char_pos));
}
}
Ok(())
}
// --------------------------------------------------
fn parse_positions(range: Option<&str>) -> MyResult<Option<PositionList>> {
match range {
Some(range_val) => {
let mut fields: Vec<usize> = vec![];
let range_re = Regex::new(r"(\d+)?-(\d+)?").unwrap();
for val in range_val.split(",") {
if let Some(cap) = range_re.captures(val) {
let n1 = &cap[1].parse::<usize>()?;
let n2 = &cap[2].parse::<usize>()?;
if n1 < n2 {
for n in *n1..=*n2 {
fields.push(n.clone());
}
} else {
return Err(From::from(format!(
concat!(
"First number in range ({}) ",
"must be lower than second number ({})"
),
n1, n2
)));
}
} else {
match val.parse::<usize>() {
Ok(n) => fields.push(n.clone()),
Err(_) => {
return Err(From::from(format!(
"illegal list value: \"{}\"",
val
)))
}
}
}
}
// Subtract one for field indexes
Ok(Some(fields.into_iter().map(|i| i - 1).collect()))
}
_ => Ok(None),
}
}
// --------------------------------------------------
fn extract_fields<'a>(
record: &'a StringRecord,
field_pos: &'a PositionList,
) -> Vec<&'a str> {
field_pos.iter().filter_map(|i| record.get(*i)).collect()
}
// --------------------------------------------------
//fn extract_fields(
// record: &StringRecord,
// field_pos: &PositionList,
//) -> Vec<String> {
// field_pos
// .iter()
// .filter_map(|i| record.get(*i))
// .map(|v| v.to_string())
// .collect()
//}
// --------------------------------------------------
fn extract_bytes(line: &str, byte_pos: &[usize]) -> String {
let bytes: Vec<u8> = line.bytes().collect();
let selected: Vec<u8> = byte_pos
.iter()
.filter_map(|i| bytes.get(*i))
.cloned()
.collect();
String::from_utf8_lossy(&selected).into_owned()
}
// --------------------------------------------------
fn extract_chars(line: &str, char_pos: &PositionList) -> String {
let chars: Vec<char> = line.chars().collect();
char_pos
.iter()
.filter_map(|i| chars.get(*i))
.collect::<String>()
}
// --------------------------------------------------
#[cfg(test)]
mod tests {
use super::{
extract_bytes, extract_chars, extract_fields, parse_positions,
};
use csv::StringRecord;
#[test]
fn test_parse_positions() {
let res1 = parse_positions(None);
assert!(res1.is_ok());
if let Ok(val1) = res1 {
assert!(val1.is_none());
}
assert!(parse_positions(Some("")).is_err());
assert!(parse_positions(Some("a")).is_err());
assert!(parse_positions(Some("1,a")).is_err());
assert!(parse_positions(Some("2-1")).is_err());
let res2 = parse_positions(Some("1"));
assert!(res2.is_ok());
if let Some(val2) = res2.unwrap() {
assert_eq!(val2, vec![0]);
}
let res3 = parse_positions(Some("1,3"));
assert!(res3.is_ok());
if let Some(val3) = res3.unwrap() {
assert_eq!(val3, vec![0, 2]);
}
let res4 = parse_positions(Some("1-3"));
assert!(res4.is_ok());
if let Some(val4) = res4.unwrap() {
assert_eq!(val4, vec![0, 1, 2]);
}
let res5 = parse_positions(Some("1,7,3-5"));
assert!(res5.is_ok());
if let Some(val5) = res5.unwrap() {
assert_eq!(val5, vec![0, 6, 2, 3, 4]);
}
}
#[test]
fn test_extract_fields() {
let rec = StringRecord::from(vec!["Captain", "Sham", "12345"]);
assert_eq!(extract_fields(&rec, &vec![0]), vec!["Captain"]);
assert_eq!(extract_fields(&rec, &vec![1]), vec!["Sham"]);
assert_eq!(
extract_fields(&rec, &vec![0, 2]),
vec!["Captain", "12345"]
);
assert_eq!(extract_fields(&rec, &vec![0, 3]), vec!["Captain"]);
assert_eq!(
extract_fields(&rec, &vec![1, 0]),
vec!["Sham", "Captain"]
);
}
#[test]
fn test_extract_chars() {
assert_eq!(extract_chars("", &vec![0]), "".to_string());
assert_eq!(extract_chars("ábc", &vec![0]), "á".to_string());
assert_eq!(extract_chars("ábc", &vec![0, 2]), "ác".to_string());
assert_eq!(extract_chars("ábc", &vec![0, 1, 2]), "ábc".to_string());
assert_eq!(extract_chars("ábc", &vec![2, 1]), "cb".to_string());
assert_eq!(extract_chars("ábc", &vec![0, 1, 4]), "áb".to_string());
}
#[test]
fn test_extract_bytes() {
assert_eq!(extract_bytes("ábc", &vec![0]), "�".to_string());
assert_eq!(extract_bytes("ábc", &vec![0, 1]), "á".to_string());
assert_eq!(extract_bytes("ábc", &vec![0, 1, 2]), "áb".to_string());
assert_eq!(
extract_bytes("ábc", &vec![0, 1, 2, 3]),
"ábc".to_string()
);
assert_eq!(extract_bytes("ábc", &vec![3, 2]), "cb".to_string());
assert_eq!(extract_bytes("ábc", &vec![0, 1, 5]), "á".to_string());
}
}
| 31.680645 | 76 | 0.455656 |
fc68ca7d64a0bd6917a739dc5a23f63120c6bf91
| 8,242 |
use crate::{
candidates :: { CandidateSet, traits :: CandidatesRead },
bitsets :: traits::BitSet,
indices,
};
use self::{ value_key::*, traits::* };
pub type LocationSet = std::collections::HashSet<usize>;
pub mod traits {
use super::*;
pub trait ValueBase: ValuesRead + ValuesModify + CandidatesRead { }
pub trait ValuesRead {
fn get(&self, key: &ValueKey) -> Option<&LocationSet>;
}
pub trait ValuesModify {
fn insert(&mut self, key: ValueKey, values: LocationSet) -> Option<LocationSet>;
fn insert_into(&mut self, key: ValueKey, value: usize) -> Option<&LocationSet>;
fn remove(&mut self, key: &ValueKey) -> Option<LocationSet>;
fn remove_from(&mut self, key: &ValueKey, value: usize) -> Option<&LocationSet>;
}
pub trait Values {
fn init_values(&mut self);
fn update_values(&mut self, index: usize, new_value: u8, old_value: u8);
}
}
impl<T> Values for T where T: ValueBase {
fn init_values(&mut self) {
for i in 0..9 {
populate(self, SetType::Row, i);
populate(self, SetType::Col, i);
populate(self, SetType::Box, i);
}
}
fn update_values(&mut self, index: usize, new_value: u8, old_value: u8) {
if new_value == 0 {
// For each allowed candidate at this index insert the index to the corresponding value map
insert_all(self, index);
// The remaining cells in this row, column, and box can now potentially contain the value that is being
// removed. But this depends on the other values in the rows, columns, and boxes that the other cells are
// associated with. Never the less, we must handle this:
for &key in rcb_value_key(index, old_value).iter() {
insert_for(self, key);
}
}
else {
// Since every index is associated with three value records (Row, Column and Box), get the appropriate keys
// and perform updates for each of them
for &key in rcb_value_key(index, new_value).iter() {
remove_for(self, key);
}
// All the remaining values stored at the given index should be removed from the Row, Column and Box records
// that contain this index. This is done as the last step because the list of locations that were stored
// within the RCB records for this value were needed to clean up associated cells. Since that is now done,
// the remaining data can be removed without exceptions.
remove_all(self, index);
}
}
}
fn populate<T: ValueBase>(context: &mut T, set_type: SetType, set_index: usize) {
let set_indices = match set_type {
SetType::Row => indices::row_at(set_index),
SetType::Col => indices::col_at(set_index),
SetType::Box => indices::box_at(set_index)
};
for &index in set_indices.iter() {
let candidate_set = match CandidatesRead::get(context, index) {
Some(set) => *set,
None => continue
};
for candidate in candidate_set.iter() {
context.insert_into(ValueKey { value: candidate.into(), set_type, set_index }, index);
}
}
}
fn insert_for<T: ValueBase>(context: &mut T, key: ValueKey) {
let affected = match key.set_type {
SetType::Row => (indices::row_at(key.set_index), [SetType::Col, SetType::Box]),
SetType::Col => (indices::col_at(key.set_index), [SetType::Row, SetType::Box]),
SetType::Box => (indices::box_at(key.set_index), [SetType::Row, SetType::Col]),
};
for &index in affected.0.iter() {
let candidate_set = match CandidatesRead::get(context, index) {
Some(set) => *set,
None => continue
};
if !candidate_set.contains(CandidateSet::from(key.value)) {
continue;
}
context.insert_into(key, index);
for &set_type in affected.1.iter() {
let associated_key = ValueKey {
value: key.value,
set_type,
set_index: match set_type {
SetType::Row => indices::row_index(index),
SetType::Col => indices::col_index(index),
SetType::Box => indices::box_index(index)
}
};
context.insert_into(associated_key, index);
}
}
}
fn remove_for<T: ValueBase>(context: &mut T, key: ValueKey) {
// Start by removing the value, along with all of its locations, from within whichever collection the given key
// refers to
let location_set = match context.remove(&key) {
Some(set) => set,
None => return
};
// Next, for each of the locations where the value could have been present within this collection, we need to
// update the other two types of collections and remove those specific indices.
let associated_sets = match key.set_type {
SetType::Row => [SetType::Col, SetType::Box],
SetType::Col => [SetType::Row, SetType::Box],
SetType::Box => [SetType::Row, SetType::Col]
};
for &cell_index in location_set.iter() {
for &set_type in associated_sets.iter() {
let key = ValueKey {
value: key.value,
set_type,
set_index: match set_type {
SetType::Row => indices::row_index(cell_index),
SetType::Col => indices::col_index(cell_index),
SetType::Box => indices::box_index(cell_index)
}
};
match context.remove_from(&key, cell_index) {
Some(remaining) => if remaining.is_empty() { context.remove(&key); },
None => continue
}
}
}
}
fn insert_all<T: ValueBase>(context: &mut T, index: usize) {
// For each possible candidate at this index insert the index to the corresponding value map
let candidate_set = match CandidatesRead::get(context, index) {
Some(set) => *set,
None => return
};
for candidate in candidate_set.iter() {
for key in rcb_value_key(index, candidate.into()).iter() {
context.insert_into(*key, index);
}
}
}
// Given an index and a value, removes all values at the index from the value map
fn remove_all<T: ValueBase>(context: &mut T, index: usize) {
let candidate_set = match CandidatesRead::get(context, index) {
Some(set) => *set,
None => return
};
for candidate in candidate_set.iter() {
for key in rcb_value_key(index, candidate.into()).iter() {
match context.remove_from(key, index) {
Some(remaining) => if remaining.is_empty() { context.remove(key); },
None => continue
}
}
}
}
#[inline]
fn rcb_value_key(cell_index: usize, value: u8) -> [ValueKey; 3] {[
ValueKey { value, set_type: SetType::Row, set_index: indices::row_index(cell_index) },
ValueKey { value, set_type: SetType::Col, set_index: indices::col_index(cell_index) },
ValueKey { value, set_type: SetType::Box, set_index: indices::box_index(cell_index) }
]}
pub mod value_key {
#[repr(u8)]
#[derive(PartialEq, Eq, Ord, PartialOrd, Hash, Copy, Clone, Debug)]
pub enum SetType { Row, Col, Box }
impl ::core::fmt::Display for SetType {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
f.write_str(
match &self {
SetType::Row => "R",
SetType::Col => "C",
SetType::Box => "B"
}
)
}
}
#[derive(PartialEq, Eq, Ord, PartialOrd, Hash, Copy, Clone, Debug)]
pub struct ValueKey {
pub value: u8,
pub set_type: SetType,
pub set_index: usize
}
impl ::core::fmt::Display for ValueKey {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
f.write_fmt(format_args!("{} ({:?}:{:?})", self.value, self.set_type, self.set_index))
}
}
}
| 36.469027 | 120 | 0.578743 |
89fd23b7031455788bfe30f9acf15407508ecb96
| 238,616 |
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// Paginator for [`DescribeActivations`](crate::operation::DescribeActivations)
pub struct DescribeActivationsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_activations_input::Builder,
}
impl DescribeActivationsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_activations_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `activation_list`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeActivationsPaginatorItems {
crate::paginator::DescribeActivationsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeActivationsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeActivationsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_activations_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeAssociationExecutions`](crate::operation::DescribeAssociationExecutions)
pub struct DescribeAssociationExecutionsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_association_executions_input::Builder,
}
impl DescribeAssociationExecutionsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_association_executions_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `association_executions`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeAssociationExecutionsPaginatorItems {
crate::paginator::DescribeAssociationExecutionsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeAssociationExecutionsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeAssociationExecutionsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_association_executions_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeAssociationExecutionTargets`](crate::operation::DescribeAssociationExecutionTargets)
pub struct DescribeAssociationExecutionTargetsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_association_execution_targets_input::Builder,
}
impl DescribeAssociationExecutionTargetsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_association_execution_targets_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `association_execution_targets`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeAssociationExecutionTargetsPaginatorItems {
crate::paginator::DescribeAssociationExecutionTargetsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeAssociationExecutionTargetsOutput,
aws_smithy_http::result::SdkError<
crate::error::DescribeAssociationExecutionTargetsError,
>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_association_execution_targets_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeAutomationExecutions`](crate::operation::DescribeAutomationExecutions)
pub struct DescribeAutomationExecutionsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_automation_executions_input::Builder,
}
impl DescribeAutomationExecutionsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_automation_executions_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `automation_execution_metadata_list`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeAutomationExecutionsPaginatorItems {
crate::paginator::DescribeAutomationExecutionsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeAutomationExecutionsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeAutomationExecutionsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_automation_executions_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeAutomationStepExecutions`](crate::operation::DescribeAutomationStepExecutions)
pub struct DescribeAutomationStepExecutionsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_automation_step_executions_input::Builder,
}
impl DescribeAutomationStepExecutionsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_automation_step_executions_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `step_executions`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeAutomationStepExecutionsPaginatorItems {
crate::paginator::DescribeAutomationStepExecutionsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeAutomationStepExecutionsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeAutomationStepExecutionsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_automation_step_executions_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeAvailablePatches`](crate::operation::DescribeAvailablePatches)
pub struct DescribeAvailablePatchesPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_available_patches_input::Builder,
}
impl DescribeAvailablePatchesPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_available_patches_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `patches`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeAvailablePatchesPaginatorItems {
crate::paginator::DescribeAvailablePatchesPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeAvailablePatchesOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeAvailablePatchesError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_available_patches_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeEffectiveInstanceAssociations`](crate::operation::DescribeEffectiveInstanceAssociations)
pub struct DescribeEffectiveInstanceAssociationsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_effective_instance_associations_input::Builder,
}
impl DescribeEffectiveInstanceAssociationsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_effective_instance_associations_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `associations`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeEffectiveInstanceAssociationsPaginatorItems {
crate::paginator::DescribeEffectiveInstanceAssociationsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeEffectiveInstanceAssociationsOutput,
aws_smithy_http::result::SdkError<
crate::error::DescribeEffectiveInstanceAssociationsError,
>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_effective_instance_associations_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeEffectivePatchesForPatchBaseline`](crate::operation::DescribeEffectivePatchesForPatchBaseline)
pub struct DescribeEffectivePatchesForPatchBaselinePaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_effective_patches_for_patch_baseline_input::Builder,
}
impl DescribeEffectivePatchesForPatchBaselinePaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_effective_patches_for_patch_baseline_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `effective_patches`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeEffectivePatchesForPatchBaselinePaginatorItems {
crate::paginator::DescribeEffectivePatchesForPatchBaselinePaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeEffectivePatchesForPatchBaselineOutput,
aws_smithy_http::result::SdkError<
crate::error::DescribeEffectivePatchesForPatchBaselineError,
>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_effective_patches_for_patch_baseline_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeInstanceAssociationsStatus`](crate::operation::DescribeInstanceAssociationsStatus)
pub struct DescribeInstanceAssociationsStatusPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_instance_associations_status_input::Builder,
}
impl DescribeInstanceAssociationsStatusPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_instance_associations_status_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `instance_association_status_infos`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeInstanceAssociationsStatusPaginatorItems {
crate::paginator::DescribeInstanceAssociationsStatusPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeInstanceAssociationsStatusOutput,
aws_smithy_http::result::SdkError<
crate::error::DescribeInstanceAssociationsStatusError,
>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_instance_associations_status_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeInstanceInformation`](crate::operation::DescribeInstanceInformation)
pub struct DescribeInstanceInformationPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_instance_information_input::Builder,
}
impl DescribeInstanceInformationPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_instance_information_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `instance_information_list`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeInstanceInformationPaginatorItems {
crate::paginator::DescribeInstanceInformationPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeInstanceInformationOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeInstanceInformationError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_instance_information_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeInstancePatches`](crate::operation::DescribeInstancePatches)
pub struct DescribeInstancePatchesPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_instance_patches_input::Builder,
}
impl DescribeInstancePatchesPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_instance_patches_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `patches`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeInstancePatchesPaginatorItems {
crate::paginator::DescribeInstancePatchesPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeInstancePatchesOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeInstancePatchesError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_instance_patches_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeInstancePatchStates`](crate::operation::DescribeInstancePatchStates)
pub struct DescribeInstancePatchStatesPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_instance_patch_states_input::Builder,
}
impl DescribeInstancePatchStatesPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_instance_patch_states_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `instance_patch_states`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeInstancePatchStatesPaginatorItems {
crate::paginator::DescribeInstancePatchStatesPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeInstancePatchStatesOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeInstancePatchStatesError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_instance_patch_states_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeInstancePatchStatesForPatchGroup`](crate::operation::DescribeInstancePatchStatesForPatchGroup)
pub struct DescribeInstancePatchStatesForPatchGroupPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_instance_patch_states_for_patch_group_input::Builder,
}
impl DescribeInstancePatchStatesForPatchGroupPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_instance_patch_states_for_patch_group_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `instance_patch_states`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeInstancePatchStatesForPatchGroupPaginatorItems {
crate::paginator::DescribeInstancePatchStatesForPatchGroupPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeInstancePatchStatesForPatchGroupOutput,
aws_smithy_http::result::SdkError<
crate::error::DescribeInstancePatchStatesForPatchGroupError,
>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_instance_patch_states_for_patch_group_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeInventoryDeletions`](crate::operation::DescribeInventoryDeletions)
pub struct DescribeInventoryDeletionsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_inventory_deletions_input::Builder,
}
impl DescribeInventoryDeletionsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_inventory_deletions_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `inventory_deletions`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeInventoryDeletionsPaginatorItems {
crate::paginator::DescribeInventoryDeletionsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeInventoryDeletionsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeInventoryDeletionsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_inventory_deletions_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeMaintenanceWindowExecutions`](crate::operation::DescribeMaintenanceWindowExecutions)
pub struct DescribeMaintenanceWindowExecutionsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_window_executions_input::Builder,
}
impl DescribeMaintenanceWindowExecutionsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_window_executions_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `window_executions`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeMaintenanceWindowExecutionsPaginatorItems {
crate::paginator::DescribeMaintenanceWindowExecutionsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeMaintenanceWindowExecutionsOutput,
aws_smithy_http::result::SdkError<
crate::error::DescribeMaintenanceWindowExecutionsError,
>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_maintenance_window_executions_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeMaintenanceWindowExecutionTaskInvocations`](crate::operation::DescribeMaintenanceWindowExecutionTaskInvocations)
pub struct DescribeMaintenanceWindowExecutionTaskInvocationsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_window_execution_task_invocations_input::Builder,
}
impl DescribeMaintenanceWindowExecutionTaskInvocationsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_window_execution_task_invocations_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `window_execution_task_invocation_identities`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(
self,
) -> crate::paginator::DescribeMaintenanceWindowExecutionTaskInvocationsPaginatorItems {
crate::paginator::DescribeMaintenanceWindowExecutionTaskInvocationsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeMaintenanceWindowExecutionTaskInvocationsOutput,
aws_smithy_http::result::SdkError<
crate::error::DescribeMaintenanceWindowExecutionTaskInvocationsError,
>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_maintenance_window_execution_task_invocations_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeMaintenanceWindowExecutionTasks`](crate::operation::DescribeMaintenanceWindowExecutionTasks)
pub struct DescribeMaintenanceWindowExecutionTasksPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_window_execution_tasks_input::Builder,
}
impl DescribeMaintenanceWindowExecutionTasksPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_window_execution_tasks_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `window_execution_task_identities`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeMaintenanceWindowExecutionTasksPaginatorItems {
crate::paginator::DescribeMaintenanceWindowExecutionTasksPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeMaintenanceWindowExecutionTasksOutput,
aws_smithy_http::result::SdkError<
crate::error::DescribeMaintenanceWindowExecutionTasksError,
>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_maintenance_window_execution_tasks_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeMaintenanceWindows`](crate::operation::DescribeMaintenanceWindows)
pub struct DescribeMaintenanceWindowsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_windows_input::Builder,
}
impl DescribeMaintenanceWindowsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_windows_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `window_identities`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeMaintenanceWindowsPaginatorItems {
crate::paginator::DescribeMaintenanceWindowsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeMaintenanceWindowsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeMaintenanceWindowsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_maintenance_windows_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeMaintenanceWindowSchedule`](crate::operation::DescribeMaintenanceWindowSchedule)
pub struct DescribeMaintenanceWindowSchedulePaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_window_schedule_input::Builder,
}
impl DescribeMaintenanceWindowSchedulePaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_window_schedule_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `scheduled_window_executions`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeMaintenanceWindowSchedulePaginatorItems {
crate::paginator::DescribeMaintenanceWindowSchedulePaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeMaintenanceWindowScheduleOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeMaintenanceWindowScheduleError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_maintenance_window_schedule_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeMaintenanceWindowsForTarget`](crate::operation::DescribeMaintenanceWindowsForTarget)
pub struct DescribeMaintenanceWindowsForTargetPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_windows_for_target_input::Builder,
}
impl DescribeMaintenanceWindowsForTargetPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_windows_for_target_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `window_identities`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeMaintenanceWindowsForTargetPaginatorItems {
crate::paginator::DescribeMaintenanceWindowsForTargetPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeMaintenanceWindowsForTargetOutput,
aws_smithy_http::result::SdkError<
crate::error::DescribeMaintenanceWindowsForTargetError,
>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_maintenance_windows_for_target_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeMaintenanceWindowTargets`](crate::operation::DescribeMaintenanceWindowTargets)
pub struct DescribeMaintenanceWindowTargetsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_window_targets_input::Builder,
}
impl DescribeMaintenanceWindowTargetsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_window_targets_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `targets`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeMaintenanceWindowTargetsPaginatorItems {
crate::paginator::DescribeMaintenanceWindowTargetsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeMaintenanceWindowTargetsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeMaintenanceWindowTargetsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_maintenance_window_targets_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeMaintenanceWindowTasks`](crate::operation::DescribeMaintenanceWindowTasks)
pub struct DescribeMaintenanceWindowTasksPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_window_tasks_input::Builder,
}
impl DescribeMaintenanceWindowTasksPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_maintenance_window_tasks_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `tasks`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeMaintenanceWindowTasksPaginatorItems {
crate::paginator::DescribeMaintenanceWindowTasksPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeMaintenanceWindowTasksOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeMaintenanceWindowTasksError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_maintenance_window_tasks_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeOpsItems`](crate::operation::DescribeOpsItems)
pub struct DescribeOpsItemsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_ops_items_input::Builder,
}
impl DescribeOpsItemsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_ops_items_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `ops_item_summaries`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeOpsItemsPaginatorItems {
crate::paginator::DescribeOpsItemsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeOpsItemsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeOpsItemsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_ops_items_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeParameters`](crate::operation::DescribeParameters)
pub struct DescribeParametersPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_parameters_input::Builder,
}
impl DescribeParametersPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_parameters_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeParametersOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeParametersError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_parameters_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribePatchBaselines`](crate::operation::DescribePatchBaselines)
pub struct DescribePatchBaselinesPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_patch_baselines_input::Builder,
}
impl DescribePatchBaselinesPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_patch_baselines_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `baseline_identities`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribePatchBaselinesPaginatorItems {
crate::paginator::DescribePatchBaselinesPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribePatchBaselinesOutput,
aws_smithy_http::result::SdkError<crate::error::DescribePatchBaselinesError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_patch_baselines_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribePatchGroups`](crate::operation::DescribePatchGroups)
pub struct DescribePatchGroupsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_patch_groups_input::Builder,
}
impl DescribePatchGroupsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_patch_groups_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `mappings`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribePatchGroupsPaginatorItems {
crate::paginator::DescribePatchGroupsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribePatchGroupsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribePatchGroupsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_patch_groups_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribePatchProperties`](crate::operation::DescribePatchProperties)
pub struct DescribePatchPropertiesPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_patch_properties_input::Builder,
}
impl DescribePatchPropertiesPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_patch_properties_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `properties`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribePatchPropertiesPaginatorItems {
crate::paginator::DescribePatchPropertiesPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribePatchPropertiesOutput,
aws_smithy_http::result::SdkError<crate::error::DescribePatchPropertiesError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_patch_properties_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`DescribeSessions`](crate::operation::DescribeSessions)
pub struct DescribeSessionsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_sessions_input::Builder,
}
impl DescribeSessionsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::describe_sessions_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `sessions`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::DescribeSessionsPaginatorItems {
crate::paginator::DescribeSessionsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::DescribeSessionsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeSessionsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_describe_sessions_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`GetInventory`](crate::operation::GetInventory)
pub struct GetInventoryPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::get_inventory_input::Builder,
}
impl GetInventoryPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::get_inventory_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `entities`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::GetInventoryPaginatorItems {
crate::paginator::GetInventoryPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::GetInventoryOutput,
aws_smithy_http::result::SdkError<crate::error::GetInventoryError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_get_inventory_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`GetInventorySchema`](crate::operation::GetInventorySchema)
pub struct GetInventorySchemaPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::get_inventory_schema_input::Builder,
}
impl GetInventorySchemaPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::get_inventory_schema_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `schemas`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::GetInventorySchemaPaginatorItems {
crate::paginator::GetInventorySchemaPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::GetInventorySchemaOutput,
aws_smithy_http::result::SdkError<crate::error::GetInventorySchemaError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_get_inventory_schema_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`GetOpsSummary`](crate::operation::GetOpsSummary)
pub struct GetOpsSummaryPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::get_ops_summary_input::Builder,
}
impl GetOpsSummaryPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::get_ops_summary_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `entities`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::GetOpsSummaryPaginatorItems {
crate::paginator::GetOpsSummaryPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::GetOpsSummaryOutput,
aws_smithy_http::result::SdkError<crate::error::GetOpsSummaryError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_get_ops_summary_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`GetParameterHistory`](crate::operation::GetParameterHistory)
pub struct GetParameterHistoryPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::get_parameter_history_input::Builder,
}
impl GetParameterHistoryPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::get_parameter_history_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::GetParameterHistoryOutput,
aws_smithy_http::result::SdkError<crate::error::GetParameterHistoryError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_get_parameter_history_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`GetParametersByPath`](crate::operation::GetParametersByPath)
pub struct GetParametersByPathPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::get_parameters_by_path_input::Builder,
}
impl GetParametersByPathPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::get_parameters_by_path_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::GetParametersByPathOutput,
aws_smithy_http::result::SdkError<crate::error::GetParametersByPathError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_get_parameters_by_path_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListAssociations`](crate::operation::ListAssociations)
pub struct ListAssociationsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_associations_input::Builder,
}
impl ListAssociationsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_associations_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `associations`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListAssociationsPaginatorItems {
crate::paginator::ListAssociationsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListAssociationsOutput,
aws_smithy_http::result::SdkError<crate::error::ListAssociationsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_associations_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListAssociationVersions`](crate::operation::ListAssociationVersions)
pub struct ListAssociationVersionsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_association_versions_input::Builder,
}
impl ListAssociationVersionsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_association_versions_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `association_versions`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListAssociationVersionsPaginatorItems {
crate::paginator::ListAssociationVersionsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListAssociationVersionsOutput,
aws_smithy_http::result::SdkError<crate::error::ListAssociationVersionsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_association_versions_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListCommandInvocations`](crate::operation::ListCommandInvocations)
pub struct ListCommandInvocationsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_command_invocations_input::Builder,
}
impl ListCommandInvocationsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_command_invocations_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `command_invocations`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListCommandInvocationsPaginatorItems {
crate::paginator::ListCommandInvocationsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListCommandInvocationsOutput,
aws_smithy_http::result::SdkError<crate::error::ListCommandInvocationsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_command_invocations_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListCommands`](crate::operation::ListCommands)
pub struct ListCommandsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_commands_input::Builder,
}
impl ListCommandsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_commands_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `commands`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListCommandsPaginatorItems {
crate::paginator::ListCommandsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListCommandsOutput,
aws_smithy_http::result::SdkError<crate::error::ListCommandsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_commands_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListComplianceItems`](crate::operation::ListComplianceItems)
pub struct ListComplianceItemsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_compliance_items_input::Builder,
}
impl ListComplianceItemsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_compliance_items_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `compliance_items`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListComplianceItemsPaginatorItems {
crate::paginator::ListComplianceItemsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListComplianceItemsOutput,
aws_smithy_http::result::SdkError<crate::error::ListComplianceItemsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_compliance_items_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListComplianceSummaries`](crate::operation::ListComplianceSummaries)
pub struct ListComplianceSummariesPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_compliance_summaries_input::Builder,
}
impl ListComplianceSummariesPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_compliance_summaries_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `compliance_summary_items`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListComplianceSummariesPaginatorItems {
crate::paginator::ListComplianceSummariesPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListComplianceSummariesOutput,
aws_smithy_http::result::SdkError<crate::error::ListComplianceSummariesError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_compliance_summaries_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListDocuments`](crate::operation::ListDocuments)
pub struct ListDocumentsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_documents_input::Builder,
}
impl ListDocumentsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_documents_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `document_identifiers`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListDocumentsPaginatorItems {
crate::paginator::ListDocumentsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListDocumentsOutput,
aws_smithy_http::result::SdkError<crate::error::ListDocumentsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_documents_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListDocumentVersions`](crate::operation::ListDocumentVersions)
pub struct ListDocumentVersionsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_document_versions_input::Builder,
}
impl ListDocumentVersionsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_document_versions_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `document_versions`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListDocumentVersionsPaginatorItems {
crate::paginator::ListDocumentVersionsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListDocumentVersionsOutput,
aws_smithy_http::result::SdkError<crate::error::ListDocumentVersionsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_document_versions_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListOpsItemEvents`](crate::operation::ListOpsItemEvents)
pub struct ListOpsItemEventsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_ops_item_events_input::Builder,
}
impl ListOpsItemEventsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_ops_item_events_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `summaries`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListOpsItemEventsPaginatorItems {
crate::paginator::ListOpsItemEventsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListOpsItemEventsOutput,
aws_smithy_http::result::SdkError<crate::error::ListOpsItemEventsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_ops_item_events_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListOpsItemRelatedItems`](crate::operation::ListOpsItemRelatedItems)
pub struct ListOpsItemRelatedItemsPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_ops_item_related_items_input::Builder,
}
impl ListOpsItemRelatedItemsPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_ops_item_related_items_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `summaries`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListOpsItemRelatedItemsPaginatorItems {
crate::paginator::ListOpsItemRelatedItemsPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListOpsItemRelatedItemsOutput,
aws_smithy_http::result::SdkError<crate::error::ListOpsItemRelatedItemsError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_ops_item_related_items_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListOpsMetadata`](crate::operation::ListOpsMetadata)
pub struct ListOpsMetadataPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_ops_metadata_input::Builder,
}
impl ListOpsMetadataPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_ops_metadata_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `ops_metadata_list`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListOpsMetadataPaginatorItems {
crate::paginator::ListOpsMetadataPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListOpsMetadataOutput,
aws_smithy_http::result::SdkError<crate::error::ListOpsMetadataError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_ops_metadata_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListResourceComplianceSummaries`](crate::operation::ListResourceComplianceSummaries)
pub struct ListResourceComplianceSummariesPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_resource_compliance_summaries_input::Builder,
}
impl ListResourceComplianceSummariesPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_resource_compliance_summaries_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `resource_compliance_summary_items`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListResourceComplianceSummariesPaginatorItems {
crate::paginator::ListResourceComplianceSummariesPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListResourceComplianceSummariesOutput,
aws_smithy_http::result::SdkError<crate::error::ListResourceComplianceSummariesError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_resource_compliance_summaries_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Paginator for [`ListResourceDataSync`](crate::operation::ListResourceDataSync)
pub struct ListResourceDataSyncPaginator {
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_resource_data_sync_input::Builder,
}
impl ListResourceDataSyncPaginator {
/// Create a new paginator-wrapper
pub(crate) fn new(
handle: std::sync::Arc<crate::client::Handle>,
builder: crate::input::list_resource_data_sync_input::Builder,
) -> Self {
Self { handle, builder }
}
/// Set the page size
///
/// _Note: this method will override any previously set value for `max_results`_
pub fn page_size(mut self, limit: i32) -> Self {
self.builder.max_results = Some(limit);
self
}
/// Create a flattened paginator
///
/// This paginator automatically flattens results using `resource_data_sync_items`. Queries to the underlying service
/// are dispatched lazily.
pub fn items(self) -> crate::paginator::ListResourceDataSyncPaginatorItems {
crate::paginator::ListResourceDataSyncPaginatorItems(self)
}
/// Create the pagination stream
///
/// _Note:_ No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next)).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::output::ListResourceDataSyncOutput,
aws_smithy_http::result::SdkError<crate::error::ListResourceDataSyncError>,
>,
> + Unpin {
// Move individual fields out of self for the borrow checker
let builder = self.builder;
let handle = self.handle;
aws_smithy_async::future::fn_stream::FnStream::new(move |tx| {
Box::pin(async move {
// Build the input for the first time. If required fields are missing, this is where we'll produce an early error.
let mut input = match builder.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(input) => input,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
loop {
let op = match input.make_operation(&handle.conf).await.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
}) {
Ok(op) => op,
Err(e) => {
let _ = tx.send(Err(e)).await;
return;
}
};
let resp = handle.client.call(op).await;
// If the input member is None or it was an error
let done = match resp {
Ok(ref resp) => {
let new_token = crate::lens::reflens_structure_crate_output_list_resource_data_sync_output_next_token(resp);
let is_empty = new_token.map(|token| token.is_empty()).unwrap_or(true);
if !is_empty && new_token == input.next_token.as_ref() {
let _ = tx.send(Err(aws_smithy_http::result::SdkError::ConstructionFailure("next token did not change, aborting paginator. This indicates an SDK or AWS service bug.".into()))).await;
return;
}
input.next_token = new_token.cloned();
is_empty
}
Err(_) => true,
};
if tx.send(resp).await.is_err() {
// receiving end was dropped
return;
}
if done {
return;
}
}
})
})
}
}
/// Flattened paginator for `DescribeActivationsPaginator`
///
/// This is created with [`.items()`](DescribeActivationsPaginator::items)
pub struct DescribeActivationsPaginatorItems(DescribeActivationsPaginator);
impl DescribeActivationsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::Activation,
aws_smithy_http::result::SdkError<crate::error::DescribeActivationsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_describe_activations_output_activation_list(
page,
)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `DescribeAssociationExecutionsPaginator`
///
/// This is created with [`.items()`](DescribeAssociationExecutionsPaginator::items)
pub struct DescribeAssociationExecutionsPaginatorItems(DescribeAssociationExecutionsPaginator);
impl DescribeAssociationExecutionsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::AssociationExecution,
aws_smithy_http::result::SdkError<crate::error::DescribeAssociationExecutionsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_association_executions_output_association_executions(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeAssociationExecutionTargetsPaginator`
///
/// This is created with [`.items()`](DescribeAssociationExecutionTargetsPaginator::items)
pub struct DescribeAssociationExecutionTargetsPaginatorItems(
DescribeAssociationExecutionTargetsPaginator,
);
impl DescribeAssociationExecutionTargetsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::AssociationExecutionTarget,
aws_smithy_http::result::SdkError<
crate::error::DescribeAssociationExecutionTargetsError,
>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_association_execution_targets_output_association_execution_targets(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeAutomationExecutionsPaginator`
///
/// This is created with [`.items()`](DescribeAutomationExecutionsPaginator::items)
pub struct DescribeAutomationExecutionsPaginatorItems(DescribeAutomationExecutionsPaginator);
impl DescribeAutomationExecutionsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::AutomationExecutionMetadata,
aws_smithy_http::result::SdkError<crate::error::DescribeAutomationExecutionsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_automation_executions_output_automation_execution_metadata_list(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeAutomationStepExecutionsPaginator`
///
/// This is created with [`.items()`](DescribeAutomationStepExecutionsPaginator::items)
pub struct DescribeAutomationStepExecutionsPaginatorItems(
DescribeAutomationStepExecutionsPaginator,
);
impl DescribeAutomationStepExecutionsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::StepExecution,
aws_smithy_http::result::SdkError<crate::error::DescribeAutomationStepExecutionsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_automation_step_executions_output_step_executions(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeAvailablePatchesPaginator`
///
/// This is created with [`.items()`](DescribeAvailablePatchesPaginator::items)
pub struct DescribeAvailablePatchesPaginatorItems(DescribeAvailablePatchesPaginator);
impl DescribeAvailablePatchesPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::Patch,
aws_smithy_http::result::SdkError<crate::error::DescribeAvailablePatchesError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_describe_available_patches_output_patches(page)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `DescribeEffectiveInstanceAssociationsPaginator`
///
/// This is created with [`.items()`](DescribeEffectiveInstanceAssociationsPaginator::items)
pub struct DescribeEffectiveInstanceAssociationsPaginatorItems(
DescribeEffectiveInstanceAssociationsPaginator,
);
impl DescribeEffectiveInstanceAssociationsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::InstanceAssociation,
aws_smithy_http::result::SdkError<
crate::error::DescribeEffectiveInstanceAssociationsError,
>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_effective_instance_associations_output_associations(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeEffectivePatchesForPatchBaselinePaginator`
///
/// This is created with [`.items()`](DescribeEffectivePatchesForPatchBaselinePaginator::items)
pub struct DescribeEffectivePatchesForPatchBaselinePaginatorItems(
DescribeEffectivePatchesForPatchBaselinePaginator,
);
impl DescribeEffectivePatchesForPatchBaselinePaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::EffectivePatch,
aws_smithy_http::result::SdkError<
crate::error::DescribeEffectivePatchesForPatchBaselineError,
>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_effective_patches_for_patch_baseline_output_effective_patches(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeInstanceAssociationsStatusPaginator`
///
/// This is created with [`.items()`](DescribeInstanceAssociationsStatusPaginator::items)
pub struct DescribeInstanceAssociationsStatusPaginatorItems(
DescribeInstanceAssociationsStatusPaginator,
);
impl DescribeInstanceAssociationsStatusPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::InstanceAssociationStatusInfo,
aws_smithy_http::result::SdkError<
crate::error::DescribeInstanceAssociationsStatusError,
>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_instance_associations_status_output_instance_association_status_infos(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeInstanceInformationPaginator`
///
/// This is created with [`.items()`](DescribeInstanceInformationPaginator::items)
pub struct DescribeInstanceInformationPaginatorItems(DescribeInstanceInformationPaginator);
impl DescribeInstanceInformationPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::InstanceInformation,
aws_smithy_http::result::SdkError<crate::error::DescribeInstanceInformationError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_instance_information_output_instance_information_list(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeInstancePatchesPaginator`
///
/// This is created with [`.items()`](DescribeInstancePatchesPaginator::items)
pub struct DescribeInstancePatchesPaginatorItems(DescribeInstancePatchesPaginator);
impl DescribeInstancePatchesPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::PatchComplianceData,
aws_smithy_http::result::SdkError<crate::error::DescribeInstancePatchesError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_describe_instance_patches_output_patches(page)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `DescribeInstancePatchStatesPaginator`
///
/// This is created with [`.items()`](DescribeInstancePatchStatesPaginator::items)
pub struct DescribeInstancePatchStatesPaginatorItems(DescribeInstancePatchStatesPaginator);
impl DescribeInstancePatchStatesPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::InstancePatchState,
aws_smithy_http::result::SdkError<crate::error::DescribeInstancePatchStatesError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_instance_patch_states_output_instance_patch_states(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeInstancePatchStatesForPatchGroupPaginator`
///
/// This is created with [`.items()`](DescribeInstancePatchStatesForPatchGroupPaginator::items)
pub struct DescribeInstancePatchStatesForPatchGroupPaginatorItems(
DescribeInstancePatchStatesForPatchGroupPaginator,
);
impl DescribeInstancePatchStatesForPatchGroupPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::InstancePatchState,
aws_smithy_http::result::SdkError<
crate::error::DescribeInstancePatchStatesForPatchGroupError,
>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_instance_patch_states_for_patch_group_output_instance_patch_states(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeInventoryDeletionsPaginator`
///
/// This is created with [`.items()`](DescribeInventoryDeletionsPaginator::items)
pub struct DescribeInventoryDeletionsPaginatorItems(DescribeInventoryDeletionsPaginator);
impl DescribeInventoryDeletionsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::InventoryDeletionStatusItem,
aws_smithy_http::result::SdkError<crate::error::DescribeInventoryDeletionsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_inventory_deletions_output_inventory_deletions(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeMaintenanceWindowExecutionsPaginator`
///
/// This is created with [`.items()`](DescribeMaintenanceWindowExecutionsPaginator::items)
pub struct DescribeMaintenanceWindowExecutionsPaginatorItems(
DescribeMaintenanceWindowExecutionsPaginator,
);
impl DescribeMaintenanceWindowExecutionsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::MaintenanceWindowExecution,
aws_smithy_http::result::SdkError<
crate::error::DescribeMaintenanceWindowExecutionsError,
>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_maintenance_window_executions_output_window_executions(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeMaintenanceWindowExecutionTaskInvocationsPaginator`
///
/// This is created with [`.items()`](DescribeMaintenanceWindowExecutionTaskInvocationsPaginator::items)
pub struct DescribeMaintenanceWindowExecutionTaskInvocationsPaginatorItems(
DescribeMaintenanceWindowExecutionTaskInvocationsPaginator,
);
impl DescribeMaintenanceWindowExecutionTaskInvocationsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::MaintenanceWindowExecutionTaskInvocationIdentity,
aws_smithy_http::result::SdkError<
crate::error::DescribeMaintenanceWindowExecutionTaskInvocationsError,
>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_maintenance_window_execution_task_invocations_output_window_execution_task_invocation_identities(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeMaintenanceWindowExecutionTasksPaginator`
///
/// This is created with [`.items()`](DescribeMaintenanceWindowExecutionTasksPaginator::items)
pub struct DescribeMaintenanceWindowExecutionTasksPaginatorItems(
DescribeMaintenanceWindowExecutionTasksPaginator,
);
impl DescribeMaintenanceWindowExecutionTasksPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::MaintenanceWindowExecutionTaskIdentity,
aws_smithy_http::result::SdkError<
crate::error::DescribeMaintenanceWindowExecutionTasksError,
>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_maintenance_window_execution_tasks_output_window_execution_task_identities(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeMaintenanceWindowsPaginator`
///
/// This is created with [`.items()`](DescribeMaintenanceWindowsPaginator::items)
pub struct DescribeMaintenanceWindowsPaginatorItems(DescribeMaintenanceWindowsPaginator);
impl DescribeMaintenanceWindowsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::MaintenanceWindowIdentity,
aws_smithy_http::result::SdkError<crate::error::DescribeMaintenanceWindowsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_maintenance_windows_output_window_identities(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeMaintenanceWindowSchedulePaginator`
///
/// This is created with [`.items()`](DescribeMaintenanceWindowSchedulePaginator::items)
pub struct DescribeMaintenanceWindowSchedulePaginatorItems(
DescribeMaintenanceWindowSchedulePaginator,
);
impl DescribeMaintenanceWindowSchedulePaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::ScheduledWindowExecution,
aws_smithy_http::result::SdkError<crate::error::DescribeMaintenanceWindowScheduleError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_maintenance_window_schedule_output_scheduled_window_executions(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeMaintenanceWindowsForTargetPaginator`
///
/// This is created with [`.items()`](DescribeMaintenanceWindowsForTargetPaginator::items)
pub struct DescribeMaintenanceWindowsForTargetPaginatorItems(
DescribeMaintenanceWindowsForTargetPaginator,
);
impl DescribeMaintenanceWindowsForTargetPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::MaintenanceWindowIdentityForTarget,
aws_smithy_http::result::SdkError<
crate::error::DescribeMaintenanceWindowsForTargetError,
>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_maintenance_windows_for_target_output_window_identities(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeMaintenanceWindowTargetsPaginator`
///
/// This is created with [`.items()`](DescribeMaintenanceWindowTargetsPaginator::items)
pub struct DescribeMaintenanceWindowTargetsPaginatorItems(
DescribeMaintenanceWindowTargetsPaginator,
);
impl DescribeMaintenanceWindowTargetsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::MaintenanceWindowTarget,
aws_smithy_http::result::SdkError<crate::error::DescribeMaintenanceWindowTargetsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_maintenance_window_targets_output_targets(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribeMaintenanceWindowTasksPaginator`
///
/// This is created with [`.items()`](DescribeMaintenanceWindowTasksPaginator::items)
pub struct DescribeMaintenanceWindowTasksPaginatorItems(DescribeMaintenanceWindowTasksPaginator);
impl DescribeMaintenanceWindowTasksPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::MaintenanceWindowTask,
aws_smithy_http::result::SdkError<crate::error::DescribeMaintenanceWindowTasksError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_describe_maintenance_window_tasks_output_tasks(
page,
)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `DescribeOpsItemsPaginator`
///
/// This is created with [`.items()`](DescribeOpsItemsPaginator::items)
pub struct DescribeOpsItemsPaginatorItems(DescribeOpsItemsPaginator);
impl DescribeOpsItemsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::OpsItemSummary,
aws_smithy_http::result::SdkError<crate::error::DescribeOpsItemsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_describe_ops_items_output_ops_item_summaries(
page,
)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `DescribePatchBaselinesPaginator`
///
/// This is created with [`.items()`](DescribePatchBaselinesPaginator::items)
pub struct DescribePatchBaselinesPaginatorItems(DescribePatchBaselinesPaginator);
impl DescribePatchBaselinesPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::PatchBaselineIdentity,
aws_smithy_http::result::SdkError<crate::error::DescribePatchBaselinesError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_describe_patch_baselines_output_baseline_identities(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `DescribePatchGroupsPaginator`
///
/// This is created with [`.items()`](DescribePatchGroupsPaginator::items)
pub struct DescribePatchGroupsPaginatorItems(DescribePatchGroupsPaginator);
impl DescribePatchGroupsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::PatchGroupPatchBaselineMapping,
aws_smithy_http::result::SdkError<crate::error::DescribePatchGroupsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_describe_patch_groups_output_mappings(page)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `DescribePatchPropertiesPaginator`
///
/// This is created with [`.items()`](DescribePatchPropertiesPaginator::items)
pub struct DescribePatchPropertiesPaginatorItems(DescribePatchPropertiesPaginator);
impl DescribePatchPropertiesPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
std::collections::HashMap<std::string::String, std::string::String>,
aws_smithy_http::result::SdkError<crate::error::DescribePatchPropertiesError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_describe_patch_properties_output_properties(
page,
)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `DescribeSessionsPaginator`
///
/// This is created with [`.items()`](DescribeSessionsPaginator::items)
pub struct DescribeSessionsPaginatorItems(DescribeSessionsPaginator);
impl DescribeSessionsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::Session,
aws_smithy_http::result::SdkError<crate::error::DescribeSessionsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_describe_sessions_output_sessions(page)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `GetInventoryPaginator`
///
/// This is created with [`.items()`](GetInventoryPaginator::items)
pub struct GetInventoryPaginatorItems(GetInventoryPaginator);
impl GetInventoryPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::InventoryResultEntity,
aws_smithy_http::result::SdkError<crate::error::GetInventoryError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_get_inventory_output_entities(page)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `GetInventorySchemaPaginator`
///
/// This is created with [`.items()`](GetInventorySchemaPaginator::items)
pub struct GetInventorySchemaPaginatorItems(GetInventorySchemaPaginator);
impl GetInventorySchemaPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::InventoryItemSchema,
aws_smithy_http::result::SdkError<crate::error::GetInventorySchemaError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_get_inventory_schema_output_schemas(page)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `GetOpsSummaryPaginator`
///
/// This is created with [`.items()`](GetOpsSummaryPaginator::items)
pub struct GetOpsSummaryPaginatorItems(GetOpsSummaryPaginator);
impl GetOpsSummaryPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::OpsEntity,
aws_smithy_http::result::SdkError<crate::error::GetOpsSummaryError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_get_ops_summary_output_entities(page)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `ListAssociationsPaginator`
///
/// This is created with [`.items()`](ListAssociationsPaginator::items)
pub struct ListAssociationsPaginatorItems(ListAssociationsPaginator);
impl ListAssociationsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::Association,
aws_smithy_http::result::SdkError<crate::error::ListAssociationsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_list_associations_output_associations(page)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `ListAssociationVersionsPaginator`
///
/// This is created with [`.items()`](ListAssociationVersionsPaginator::items)
pub struct ListAssociationVersionsPaginatorItems(ListAssociationVersionsPaginator);
impl ListAssociationVersionsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::AssociationVersionInfo,
aws_smithy_http::result::SdkError<crate::error::ListAssociationVersionsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_list_association_versions_output_association_versions(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `ListCommandInvocationsPaginator`
///
/// This is created with [`.items()`](ListCommandInvocationsPaginator::items)
pub struct ListCommandInvocationsPaginatorItems(ListCommandInvocationsPaginator);
impl ListCommandInvocationsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::CommandInvocation,
aws_smithy_http::result::SdkError<crate::error::ListCommandInvocationsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_list_command_invocations_output_command_invocations(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `ListCommandsPaginator`
///
/// This is created with [`.items()`](ListCommandsPaginator::items)
pub struct ListCommandsPaginatorItems(ListCommandsPaginator);
impl ListCommandsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::Command,
aws_smithy_http::result::SdkError<crate::error::ListCommandsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_list_commands_output_commands(page)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `ListComplianceItemsPaginator`
///
/// This is created with [`.items()`](ListComplianceItemsPaginator::items)
pub struct ListComplianceItemsPaginatorItems(ListComplianceItemsPaginator);
impl ListComplianceItemsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::ComplianceItem,
aws_smithy_http::result::SdkError<crate::error::ListComplianceItemsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_list_compliance_items_output_compliance_items(
page,
)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `ListComplianceSummariesPaginator`
///
/// This is created with [`.items()`](ListComplianceSummariesPaginator::items)
pub struct ListComplianceSummariesPaginatorItems(ListComplianceSummariesPaginator);
impl ListComplianceSummariesPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::ComplianceSummaryItem,
aws_smithy_http::result::SdkError<crate::error::ListComplianceSummariesError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_list_compliance_summaries_output_compliance_summary_items(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `ListDocumentsPaginator`
///
/// This is created with [`.items()`](ListDocumentsPaginator::items)
pub struct ListDocumentsPaginatorItems(ListDocumentsPaginator);
impl ListDocumentsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::DocumentIdentifier,
aws_smithy_http::result::SdkError<crate::error::ListDocumentsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_list_documents_output_document_identifiers(
page,
)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `ListDocumentVersionsPaginator`
///
/// This is created with [`.items()`](ListDocumentVersionsPaginator::items)
pub struct ListDocumentVersionsPaginatorItems(ListDocumentVersionsPaginator);
impl ListDocumentVersionsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::DocumentVersionInfo,
aws_smithy_http::result::SdkError<crate::error::ListDocumentVersionsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_list_document_versions_output_document_versions(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `ListOpsItemEventsPaginator`
///
/// This is created with [`.items()`](ListOpsItemEventsPaginator::items)
pub struct ListOpsItemEventsPaginatorItems(ListOpsItemEventsPaginator);
impl ListOpsItemEventsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::OpsItemEventSummary,
aws_smithy_http::result::SdkError<crate::error::ListOpsItemEventsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_list_ops_item_events_output_summaries(page)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `ListOpsItemRelatedItemsPaginator`
///
/// This is created with [`.items()`](ListOpsItemRelatedItemsPaginator::items)
pub struct ListOpsItemRelatedItemsPaginatorItems(ListOpsItemRelatedItemsPaginator);
impl ListOpsItemRelatedItemsPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::OpsItemRelatedItemSummary,
aws_smithy_http::result::SdkError<crate::error::ListOpsItemRelatedItemsError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_list_ops_item_related_items_output_summaries(
page,
)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `ListOpsMetadataPaginator`
///
/// This is created with [`.items()`](ListOpsMetadataPaginator::items)
pub struct ListOpsMetadataPaginatorItems(ListOpsMetadataPaginator);
impl ListOpsMetadataPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::OpsMetadata,
aws_smithy_http::result::SdkError<crate::error::ListOpsMetadataError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| {
crate::lens::lens_structure_crate_output_list_ops_metadata_output_ops_metadata_list(
page,
)
.unwrap_or_default()
.into_iter()
})
}
}
/// Flattened paginator for `ListResourceComplianceSummariesPaginator`
///
/// This is created with [`.items()`](ListResourceComplianceSummariesPaginator::items)
pub struct ListResourceComplianceSummariesPaginatorItems(ListResourceComplianceSummariesPaginator);
impl ListResourceComplianceSummariesPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::ResourceComplianceSummaryItem,
aws_smithy_http::result::SdkError<crate::error::ListResourceComplianceSummariesError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_list_resource_compliance_summaries_output_resource_compliance_summary_items(page).unwrap_or_default().into_iter())
}
}
/// Flattened paginator for `ListResourceDataSyncPaginator`
///
/// This is created with [`.items()`](ListResourceDataSyncPaginator::items)
pub struct ListResourceDataSyncPaginatorItems(ListResourceDataSyncPaginator);
impl ListResourceDataSyncPaginatorItems {
/// Create the pagination stream
///
/// _Note: No requests will be dispatched until the stream is used (eg. with [`.next().await`](tokio_stream::StreamExt::next))._
///
/// To read the entirety of the paginator, use [`.collect::<Result<Vec<_>, _>()`](tokio_stream::StreamExt::collect).
pub fn send(
self,
) -> impl tokio_stream::Stream<
Item = std::result::Result<
crate::model::ResourceDataSyncItem,
aws_smithy_http::result::SdkError<crate::error::ListResourceDataSyncError>,
>,
> + Unpin {
aws_smithy_async::future::fn_stream::TryFlatMap::new(self.0.send()).flat_map(|page| crate::lens::lens_structure_crate_output_list_resource_data_sync_output_resource_data_sync_items(page).unwrap_or_default().into_iter())
}
}
| 43.495443 | 277 | 0.553794 |
3915fa7204cde232fd6747c0503b630168466eb7
| 3,557 |
//! utils
use solana_program::{
instruction::{AccountMeta, Instruction},
program_error::ProgramError,
pubkey::Pubkey,
sanitize::SanitizeError,
serialize_utils::{read_pubkey, read_u16, read_u8},
};
/// read a bool
pub fn read_bool(current: &mut usize, data: &[u8]) -> Result<bool, SanitizeError> {
if data.len() < *current + 1 {
return Err(SanitizeError::IndexOutOfBounds);
}
let e = {
match data[*current] {
0 => false,
1 => true,
_ => return Err(SanitizeError::InvalidValue),
}
};
*current += 1;
Ok(e)
}
/// write a bool
pub fn write_bool(current: &mut usize, b: bool, dst: &mut [u8]) -> Result<(), ProgramError> {
if dst.len() < *current + 1 {
return Err(ProgramError::InvalidAccountData);
}
dst[*current] = b.into();
*current += 1;
Ok(())
}
/// write a u16
pub fn write_u16(current: &mut usize, src: u16, dst: &mut [u8]) -> Result<(), ProgramError> {
if dst.len() < *current + 2 {
return Err(ProgramError::InvalidAccountData);
}
dst[*current..*current + 2].copy_from_slice(&src.to_le_bytes());
*current += 2;
Ok(())
}
/// write a pubkey
pub fn write_pubkey(
current: &mut usize,
pubkey: &Pubkey,
dst: &mut [u8],
) -> Result<(), ProgramError> {
if dst.len() < *current + 32 {
return Err(ProgramError::InvalidAccountData);
}
dst[*current..*current + 32].copy_from_slice(pubkey.as_ref());
*current += 32;
Ok(())
}
/// read an instruction
pub fn read_instruction(current: &mut usize, input: &[u8]) -> Result<Instruction, ProgramError> {
let account_len = usize::from(read_u16(current, &input).unwrap());
let mut accounts = Vec::new();
for _ in 0..account_len {
let account_metadata = read_u8(current, &input).unwrap();
let account_pubkey = read_pubkey(current, &input).unwrap();
let account_meta = AccountMeta {
pubkey: account_pubkey,
is_signer: account_metadata & (1 << 0) != 0,
is_writable: account_metadata & (1 << 1) != 0,
};
accounts.push(account_meta);
}
let program_id = read_pubkey(current, input).unwrap();
let data_len = usize::from(read_u16(current, &input).unwrap());
let data = input[*current..*current + data_len].to_vec();
*current += data_len;
Ok(Instruction {
program_id: program_id,
accounts: accounts,
data: data,
})
}
/// write instruction
pub fn write_instruction(
current: &mut usize,
instruction: &Instruction,
dst: &mut [u8],
) -> Result<(), ProgramError> {
dst[*current..*current + 2].copy_from_slice(&(instruction.accounts.len() as u16).to_le_bytes());
*current += 2;
for account_meta in instruction.accounts.iter() {
let mut meta_byte = 0;
if account_meta.is_signer {
meta_byte |= 1 << 0;
}
if account_meta.is_writable {
meta_byte |= 1 << 1;
}
dst[*current] = meta_byte;
*current += 1;
dst[*current..*current + 32].copy_from_slice(account_meta.pubkey.as_ref());
*current += 32;
}
dst[*current..*current + 32].copy_from_slice(instruction.program_id.as_ref());
*current += 32;
let data_len = instruction.data.len();
dst[*current..*current + 2].copy_from_slice(&(data_len as u16).to_le_bytes());
*current += 2;
dst[*current..*current + data_len].copy_from_slice(instruction.data.as_ref());
*current += data_len;
Ok(())
}
| 28.456 | 100 | 0.595164 |
db423b3807710cd99b54e5709e17000e3d0a00d4
| 661 |
/// **(internal)** Implementation of basic logical operators for `Bdd`s using the `apply` function.
pub mod _impl_boolean_ops;
/// **(internal)** Implementation of extra operations which enable relation-like treatment of BDDs
/// (quantification, selection, projection, partial element picking)
pub mod _impl_relation_ops;
/// **(internal)** Simple export functions for printing `Bdd`s as `.dot` files.
pub mod _impl_export_dot;
/// **(internal)** Implementation of the string and byte serialisation procedures for `Bdd`s.
pub mod _impl_serialisation;
/// **(internal)** Implementation of some basic internal utility methods for `Bdd`s.
pub mod _impl_util;
| 41.3125 | 99 | 0.75643 |
c1ba04f0cbd3db2d1ffd64b5eb0aa4980fe639bc
| 7,245 |
use std::fmt;
#[derive(Clone)]
struct Transition {
from: usize,
to: usize,
on: Option<char>,
with: Option<String>,
}
#[derive(Clone)]
pub struct FST {
state_count: usize,
accepting_states: Vec<usize>,
transitions: Vec<Transition>,
}
fn shift_transitions(transitions: Vec<Transition>, shift: usize) -> Vec<Transition> {
transitions.into_iter()
.map(|t| Transition{
from: t.from + shift,
to: t.to + shift,
on: t.on,
with: t.with,
})
.collect()
}
fn shift_states(states: Vec<usize>, shift: usize) -> Vec<usize> {
states.into_iter()
.map(|s| s + shift)
.collect()
}
impl FST {
pub fn empty() -> Self {
Self {
state_count: 1,
accepting_states: vec!(0),
transitions: Vec::new(),
}
}
pub fn all_reject() -> Self {
Self {
state_count: 1,
accepting_states: Vec::new(),
transitions: Vec::new(),
}
}
pub fn symbol(symbol: char) -> Self {
Self {
state_count: 2,
accepting_states: vec!(1),
transitions: vec!(Transition{ from: 0, to: 1, on: Some(symbol), with: Some(symbol.to_string())}),
}
}
pub fn string(s: &str) -> Self {
let mut result = Self::empty();
for c in s.chars() {
result = FST::and(result, FST::symbol(c));
}
result
}
pub fn one_of_symbols(symbols: Vec<char>) -> Self {
let mut result = Self::all_reject();
for symbol in symbols {
result = FST::or(result, FST::symbol(symbol));
}
result
}
pub fn consume(mut m: Self) -> Self {
m.transitions = m.transitions.into_iter()
.map(|mut t| { t.with = None; t })
.collect();
m
}
pub fn and(m1: Self, m2: Self) -> Self {
let mut new_transitions = m1.transitions;
new_transitions.extend(shift_transitions(m2.transitions, m1.state_count));
for m1_accepting in m1.accepting_states {
new_transitions.push(Transition{ from: m1_accepting, to: m1.state_count, on: None, with: None });
}
Self {
state_count: m1.state_count + m2.state_count,
accepting_states: shift_states(m2.accepting_states, m1.state_count),
transitions: new_transitions,
}
}
pub fn and_optionally(m1: Self, m2: Self) -> Self {
let mut new_transitions = m1.transitions;
new_transitions.extend(shift_transitions(m2.transitions, m1.state_count));
for m1_accepting in &m1.accepting_states {
new_transitions.push(Transition{ from: *m1_accepting, to: m1.state_count, on: None, with: None });
}
let mut new_accepting = m1.accepting_states;
new_accepting.extend(shift_states(m2.accepting_states, m1.state_count));
Self {
state_count: m1.state_count + m2.state_count,
accepting_states: new_accepting,
transitions: new_transitions,
}
}
pub fn wrap(m: Self, name: &str) -> Self {
let mut new_transitions = shift_transitions(m.transitions, 2);
new_transitions.push(Transition{ from: 0, to: 2, on: None, with: Some(name.to_string() + " ") });
for m_accepting in shift_states(m.accepting_states, 2) {
new_transitions.push(Transition{ from: m_accepting, to: 1, on: None, with: Some(" ".to_string()) });
}
Self {
state_count: m.state_count + 2,
accepting_states: vec!(1),
transitions: new_transitions
}
}
pub fn or(m1: Self, m2: Self) -> Self {
let mut new_transitions = shift_transitions(m1.transitions, 1);
new_transitions.extend(shift_transitions(m2.transitions, m1.state_count+1));
new_transitions.push(Transition{ from: 0, to: 1, on: None, with: None });
new_transitions.push(Transition{ from: 0, to: m1.state_count+1, on: None, with: None });
let mut new_accepting = shift_states(m1.accepting_states, 1);
new_accepting.extend(shift_states(m2.accepting_states, m1.state_count + 1));
Self {
state_count: m1.state_count + m2.state_count + 1,
accepting_states: new_accepting,
transitions: new_transitions,
}
}
pub fn one_of(ms: Vec<Self>) -> Self {
let mut result = Self::all_reject();
for m in ms {
result = Self::or(m, result);
}
result
}
pub fn at_least_once(mut m: Self) -> Self {
for accepting in &m.accepting_states {
m.transitions.push(Transition{ from: *accepting, to: 0, on: None, with: None });
}
m
}
pub fn repeated(mut m: Self) -> Self {
m.transitions = shift_transitions(m.transitions, 1);
m.accepting_states = vec!(0);
for accepting in &m.accepting_states {
m.transitions.push(Transition{ from: *accepting, to: 0, on: None, with: None });
}
m
}
fn symbol_step(&self, state: usize, path: Vec<String>, on: Option<char>) -> Vec<(usize, Vec<String>)> {
self.transitions.iter()
.filter(|t| t.from == state && t.on == on)
.map(|t| {
let mut new_path = path.clone();
new_path.extend(t.with.clone());
(t.to, new_path)
})
.collect()
}
fn epsilon_closure(&self, start_states: Vec<(usize, Vec<String>)>) -> Vec<(usize, Vec<String>)> {
let mut result = Vec::new();
let mut new_states = start_states;
while {
new_states = new_states.into_iter()
.inspect(|s| result.push(s.clone()))
.flat_map(|(state, path)| self.symbol_step(state, path, None))
.collect();
!new_states.is_empty()
}{}
result
}
fn step(&self, start_states: Vec<(usize, Vec<String>)>, symbol: char) -> Vec<(usize, Vec<String>)> {
self.epsilon_closure(start_states.into_iter()
.flat_map(|(state, path)| self.symbol_step(state, path, Some(symbol)))
.collect())
}
pub fn match_string(&self, string: &str) -> Vec<Vec<String>> {
let mut states = self.epsilon_closure(vec!((0, Vec::new())));
for symbol in string.chars() {
states = self.step(states, symbol);
for (state, path) in &states {
print!("{} {}; ", state, path.join(""));
}
println!();
}
states.into_iter()
.filter(|(state, _)| self.accepting_states.contains(state))
.map(|(_, path)| path)
.collect()
}
}
impl fmt::Display for FST {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "{} {}", self.state_count, self.transitions.len());
for t in self.transitions.clone() {
writeln!(f, "{}\t{}\t{}\t{}", t.from, t.to, t.on.unwrap_or('`'), t.with.unwrap_or_default());
}
for state in &self.accepting_states {
write!(f, "{} ", state);
}
writeln!(f)
}
}
| 31.776316 | 112 | 0.550311 |
8a29ba04b09051942297ddd1aa1c2b9a8f47ac35
| 1,671 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
use std::fs::File;
use std::io::BufReader;
use std::path::PathBuf;
use crate::rand::RAND;
mod test_vector_structs;
pub use self::test_vector_structs::*;
pub fn printbinary(array: &[u8]) {
for i in 0..array.len() {
print!("{:02X}", array[i])
}
println!("")
}
pub fn create_rng() -> RAND {
let mut raw: [u8; 100] = [0; 100];
let mut rng = RAND::new();
rng.clean();
for i in 0..100 {
raw[i] = i as u8
}
rng.seed(100, &raw);
rng
}
// Reads the json test files
pub fn json_reader(file_name: &str) -> BufReader<File> {
let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
file_path_buf.push("src/test_utils/hash_to_curve_vectors/");
let mut file_name = String::from(file_name);
file_name.push_str(".json");
file_path_buf.push(file_name);
let file = File::open(file_path_buf).unwrap();
BufReader::new(file)
}
| 27.393443 | 70 | 0.70018 |
de9f0a4f01738a7d6d8dfb6ff3a80957af3cb36f
| 2,777 |
//! Graph structure used to separate bodies into islands
//! for parallel computation and sleeping.
#[derive(Clone, Copy, Debug)]
pub enum Edge {
Rope {
body_idx: usize,
rope_node_idx: usize,
},
Constraint {
body_idx: usize,
constr_idx: usize,
},
Contact {
body_idx: usize,
pair_idx: usize,
},
// marking possible contacts and constraints with static objects as well
// so that we can get this knowledge into the island solver
StaticConstraint {
constr_idx: usize,
},
StaticContact {
pair_idx: usize,
},
}
#[derive(Clone, Copy, Debug)]
pub struct EdgeListNode {
pub next: Option<usize>,
pub edge: Edge,
}
/// A set of single-linked lists stored in a Vec
/// for a memory-efficient graph representation
pub struct ConstraintGraph {
// indices to the start of the list for reading and end for writing
pub first_nodes_per_body: Vec<Option<usize>>,
// last node exists for sure if first does, we always check first first
pub last_nodes_per_body: Vec<usize>,
pub nodes: Vec<EdgeListNode>,
}
impl ConstraintGraph {
pub fn clear(&mut self) {
self.first_nodes_per_body.clear();
self.last_nodes_per_body.clear();
self.nodes.clear();
}
pub fn resize(&mut self, new_len: usize) {
self.first_nodes_per_body.resize(new_len, None);
self.last_nodes_per_body.resize(new_len, 0);
}
pub fn insert(&mut self, body_idx: usize, edge: Edge) {
let node_idx = self.nodes.len();
self.nodes.push(EdgeListNode { next: None, edge });
match self.first_nodes_per_body[body_idx] {
Some(_first) => {
self.nodes[self.last_nodes_per_body[body_idx]].next = Some(node_idx);
self.last_nodes_per_body[body_idx] = node_idx;
}
None => {
self.first_nodes_per_body[body_idx] = Some(node_idx);
self.last_nodes_per_body[body_idx] = node_idx;
}
}
}
pub fn iter(&self, body_idx: usize) -> ConstraintGraphIter<'_> {
ConstraintGraphIter {
graph: self,
body_idx,
curr_node_idx: None,
}
}
}
pub struct ConstraintGraphIter<'a> {
graph: &'a ConstraintGraph,
body_idx: usize,
curr_node_idx: Option<usize>,
}
impl<'a> Iterator for ConstraintGraphIter<'a> {
type Item = &'a Edge;
fn next(&mut self) -> Option<Self::Item> {
self.curr_node_idx = match self.curr_node_idx {
Some(node_idx) => self.graph.nodes[node_idx].next,
None => self.graph.first_nodes_per_body[self.body_idx],
};
self.curr_node_idx.map(|ni| &self.graph.nodes[ni].edge)
}
}
| 28.336735 | 85 | 0.616133 |
4bbda5eb195dae7d66c34861c5329201fe9890ed
| 24,415 |
use heck::*;
use oasis_rpc::Interface;
use proc_macro2::{Ident, Literal, TokenStream};
use quote::{format_ident, quote};
macro_rules! format_ts_ident {
(@import, $name:expr) => {
format_ts_ident!(@raw, $name.to_mixed_case())
};
(@class, $name:expr) => {
format_ts_ident!(@raw, $name.to_camel_case())
};
(@var, $name:expr) => {
format_ts_ident!(@raw, var_name(&$name))
};
(@const, $name:expr) => {
format_ts_ident!(@raw, $name.to_shouty_snake_case());
};
(@raw, $name:expr) => {
format_ident!("{}", $name)
};
}
pub fn generate(iface: &Interface, bytecode: &[u8]) -> TokenStream {
let service_ident = format_ts_ident!(@class, iface.name);
let bytecode_str = base64::encode(bytecode);
let imports = iface.imports.iter().map(|imp| {
let import_ident = format_ts_ident!(@var, imp.name);
let import_path = format!("./{}", module_name(&imp.name));
quote!(import * as #import_ident from #import_path;)
});
let type_defs = generate_type_defs(&iface.type_defs);
let deploy_function = generate_deploy_function(&service_ident, &iface.constructor);
let rpc_functions = generate_rpc_functions(&service_ident, &iface.functions);
quote! {
import { Buffer } from "buffer";
import * as oasis from "oasis-std";
#(#imports)*
#(#type_defs)*
export class #service_ident {
public static BYTECODE = #bytecode_str;
private constructor(readonly address: oasis.Address, private gateway: oasis.Gateway) {}
public static async connect(
address: oasis.Address,
gateway: oasis.Gateway
): Promise<#service_ident> {
return new #service_ident(address, gateway);
}
#deploy_function
#(#rpc_functions)*
}
}
}
fn generate_type_defs(type_defs: &[oasis_rpc::TypeDef]) -> Vec<TokenStream> {
type_defs
.iter()
.map(|type_def| {
use oasis_rpc::TypeDef;
match type_def {
TypeDef::Struct { name, fields } => {
if fields.iter().any(|f| f.name.parse::<u32>().is_ok()) {
generate_tuple_class(
&name,
&fields.iter().map(|f| f.ty.clone()).collect::<Vec<_>>(),
quote!(),
None,
)
} else {
generate_struct_class(name, fields, quote!(), None)
}
}
TypeDef::Enum { name, variants } => {
let type_ident = format_ts_ident!(@class, name);
let variant_idents: Vec<_> = variants
.iter()
.map(|v| format_ts_ident!(@class, v.name))
.collect();
let variant_classes =
variants
.iter()
.enumerate()
.map(|(i, variant)| match &variant.fields {
Some(oasis_rpc::EnumFields::Named(fields)) => {
let is_tuple = fields
.iter()
.enumerate()
.all(|(i, field)| field.name == i.to_string());
if !is_tuple {
generate_struct_class(
&variant.name,
fields,
quote!(),
Some(i),
)
} else {
generate_tuple_class(
&variant.name,
&fields
.iter()
.map(|f| f.ty.clone())
.collect::<Vec<_>>(),
quote!(),
Some(i),
)
}
}
Some(oasis_rpc::EnumFields::Tuple(tys)) => {
generate_tuple_class(&variant.name, &tys, quote!(), Some(i))
}
None => generate_tuple_class(
&variant.name,
&[], /* no fields */
quote!(),
Some(i),
),
});
quote! {
export module #type_ident {
#(#variant_classes)*
export function abiDecode(decoder: oasis.Decoder): #type_ident {
const variantId = decoder.readU8();
return (#type_ident as any).VARIANTS[variantId].abiDecode(decoder);
}
export const VARIANTS: Function[] = [ #(#variant_idents),* ];
export function isVariant(obj: any): obj is #type_ident {
for (const variant of #type_ident.VARIANTS) {
if (obj instanceof variant) {
return true;
}
}
return false;
}
}
export type #type_ident = #(#type_ident.#variant_idents)|*;
}
}
TypeDef::Event {
name,
fields: indexed_fields,
} => {
let event_ident = format_ts_ident!(@class, name);
let topic_names = indexed_fields.iter().map(|f| var_name(&f.name));
let topic_idents: Vec<_> = indexed_fields
.iter()
.map(|f| format_ts_ident!(@var, &f.name))
.collect();
let topic_tys = indexed_fields.iter().map(|f| quote_ty(&f.ty));
let topic_schema_tys = indexed_fields.iter().map(|f| quote_schema_ty(&f.ty));
let topics_arg = if !indexed_fields.is_empty() {
quote!(topics?: { #(#topic_idents?: #topic_tys),* })
} else {
quote!()
};
let maybe_dot = make_operator("?.");
let extra_members = quote! {
public static async subscribe(
gateway: oasis.Gateway,
address: oasis.Address | null,
#topics_arg
): Promise<oasis.Subscription<#event_ident>> {
const encodedTopics = [
oasis.encodeEventTopic("string", #event_ident.name),
];
#(
if (topics #maybe_dot hasOwnProperty(#topic_names)) {
encodedTopics.push(
oasis.encodeEventTopic(
#topic_schema_tys,
topics.#topic_idents,
)
);
}
)*
return gateway.subscribe(
address,
encodedTopics,
async (payload: Uint8Array) => {
return oasis.abiDecode(#event_ident, payload);
}
);
}
};
let fields: Vec<_> = indexed_fields
.iter()
.cloned()
.map(|f| oasis_rpc::Field {
name: f.name,
ty: f.ty,
})
.collect();
generate_struct_class(name, &fields, extra_members, None)
}
}
})
.collect()
}
fn generate_struct_class<'a>(
struct_name: &str,
fields: &'a [oasis_rpc::Field],
extra_members: TokenStream,
variant_idx: Option<usize>,
) -> TokenStream {
let class_ident = format_ts_ident!(@class, struct_name);
let field_idents: Vec<_> = fields
.iter()
.map(|field| format_ts_ident!(@var, field.name))
.collect();
let field_decls: Vec<_> = fields.iter().map(generate_field_decl).collect();
let field_schema_tys: Vec<_> = fields
.iter()
.map(|field| quote_schema_ty(&field.ty))
.collect();
let variant_encoder = variant_idx.map(|idx| {
let idx_lit = Literal::usize_unsuffixed(idx);
quote!(encoder.writeU8(#idx_lit);)
});
quote! {
export class #class_ident implements oasis.AbiEncodable {
#(public #field_decls;)*
public constructor(fields: { #(#field_decls;)* }) {
#(this.#field_idents = fields.#field_idents;)*
}
public abiEncode(encoder: oasis.Encoder) {
#variant_encoder
#(oasis.abiEncode(#field_schema_tys as oasis.Schema, this.#field_idents, encoder);)*
}
public static abiDecode(decoder: oasis.Decoder): #class_ident {
return new #class_ident({
#(#field_idents: oasis.abiDecode(#field_schema_tys as oasis.Schema, decoder)),*
});
}
#extra_members
}
}
}
fn generate_tuple_class(
tuple_name: &str,
tys: &[oasis_rpc::Type],
extra_members: TokenStream,
variant_idx: Option<usize>,
) -> TokenStream {
let class_ident = format_ts_ident!(@class, tuple_name);
let (field_idents, arg_idents): (Vec<_>, Vec<_>) = (0..tys.len())
.map(|i| {
(
proc_macro2::Literal::usize_unsuffixed(i),
format_ident!("arg{}", i),
)
})
.unzip();
let field_tys: Vec<_> = tys.iter().map(|ty| quote_ty(ty)).collect();
let field_schema_tys: Vec<_> = tys.iter().map(quote_schema_ty).collect();
let variant_encoder = variant_idx.map(|idx| {
let idx_lit = Literal::usize_unsuffixed(idx);
quote!(encoder.writeU8(#idx_lit);)
});
quote! {
export class #class_ident implements oasis.AbiEncodable {
#(public #field_idents: #field_tys;)*
public constructor(#(#arg_idents: #field_tys),*) {
#(this[#field_idents] = #arg_idents;)*
}
public abiEncode(encoder: oasis.Encoder) {
#variant_encoder
#(oasis.abiEncode(#field_schema_tys as oasis.Schema, this[#field_idents], encoder));*
}
public static abiDecode(decoder: oasis.Decoder): #class_ident {
return new #class_ident(
#(oasis.abiDecode(#field_schema_tys as oasis.Schema, decoder)),*
);
}
#extra_members
}
}
}
fn generate_deploy_function(service_ident: &Ident, ctor: &oasis_rpc::Constructor) -> TokenStream {
let arg_idents: Vec<_> = ctor
.inputs
.iter()
.map(|field| format_ts_ident!(@var, field.name))
.collect();
let arg_tys: Vec<_> = ctor
.inputs
.iter()
.map(|field| quote_ty(&field.ty))
.collect();
let arg_schema_tys = ctor.inputs.iter().map(|field| quote_schema_ty(&field.ty));
let deploy_try_catch = gen_rpc_err_handler(
ctor.error.as_ref(),
quote! {
const deployedAddr = await gateway.deploy(payload, options);
return new #service_ident(deployedAddr, gateway);
},
);
let arg_decls = ctor.inputs.iter().map(generate_field_decl);
let (deploy_args, final_encode_call) = if !ctor.inputs.is_empty() {
(
quote!({ #(#arg_idents),* }: { #(#arg_decls;)* },),
quote! {
oasis.abiEncode(
[ #(#arg_schema_tys as oasis.Schema),* ],
[ #(#arg_idents),* ],
encoder
);
},
)
} else {
(quote!(), quote!(encoder.finish();))
};
// The magic wasm separator string that oasis-parity encourages callers to provide in the
// (code || separator || data) payload.
// It is also a valid WASM section:
// - 00 = section ID for "custom section"
// - 19 = section length
// - 18 = name length
// - the rest is the section name, followed by 0 bytes of contents
// This way, old versions of the runtime (that do not know how to use the separator) can
// still parse and effectively ignore the it.
let wasm_separator: TokenStream = quote! {"\x00\x19\x18==OasisEndOfWasmMarker=="};
quote! {
public static async deploy(
gateway: oasis.Gateway,
#deploy_args
options?: oasis.DeployOptions,
): Promise<#service_ident> {
const payload = #service_ident.makeDeployPayload(#(#arg_idents),*);
#deploy_try_catch
}
private static makeDeployPayload(#(#arg_idents: #arg_tys,)*): Buffer {
const encoder = new oasis.Encoder();
encoder.writeU8Array(Buffer.from(#service_ident.BYTECODE, "base64"));
encoder.writeU8Array(Buffer.from(#wasm_separator, "binary"));
return #final_encode_call
}
}
}
fn generate_rpc_functions<'a>(
service_ident: &'a Ident,
rpcs: &'a [oasis_rpc::Function],
) -> impl Iterator<Item = TokenStream> + 'a {
rpcs.iter().enumerate().map(move |(i, rpc)| {
let fn_id_lit = Literal::usize_unsuffixed(i);
let arg_idents: Vec<_> = rpc
.inputs
.iter()
.map(|inp| format_ts_ident!(@var, inp.name))
.collect();
let arg_tys: Vec<_> = rpc.inputs.iter().map(|inp| quote_ty(&inp.ty)).collect();
let arg_schema_tys = rpc.inputs.iter().map(|inp| quote_schema_ty(&inp.ty));
let fn_ident = format_ts_ident!(@var, rpc.name);
let make_payload_ident = format_ident!("make{}Payload", rpc.name.to_camel_case());
let rpc_ret_ty = rpc
.output
.as_ref()
.map(|out_ty| {
quote_ty(match out_ty {
oasis_rpc::Type::Result(box ok_ty, _) => ok_ty,
_ => out_ty,
})
})
.unwrap_or_else(|| quote!(void));
let returner = rpc
.output
.as_ref()
.and_then(|output| {
use oasis_rpc::Type::{Result, Tuple};
match output {
Tuple(tys) | Result(box Tuple(tys), _) if tys.is_empty() => None,
oasis_rpc::Type::Result(box ok_ty, _) => {
let quot_schema_ty = quote_schema_ty(ok_ty);
//^ unwrap one layer of result, as the outer error is derived
// from the tx status code.
Some(quote! {
return oasis.abiDecode(#quot_schema_ty as oasis.Schema, res);
})
}
_ => {
let quot_schema_ty = quote_schema_ty(output);
Some(quote! {
return oasis.abiDecode(#quot_schema_ty as oasis.Schema, res);
})
}
}
})
.unwrap_or_else(|| quote!(return;));
let rpc_try_catch = gen_rpc_err_handler(
rpc.output.as_ref().and_then(|output| {
if let oasis_rpc::Type::Result(_, box err_ty) = output {
Some(err_ty)
} else {
None
}
}),
quote! {
const res = await this.gateway.rpc(this.address, payload, options);
#returner
},
);
if rpc.inputs.is_empty() {
quote! {
public async #fn_ident(options?: oasis.RpcOptions): Promise<#rpc_ret_ty> {
const payload = #service_ident.#make_payload_ident();
#rpc_try_catch
}
private static #make_payload_ident(): Buffer {
const encoder = new oasis.Encoder();
encoder.writeU8(#fn_id_lit);
return encoder.finish();
}
}
} else {
let arg_decls = rpc.inputs.iter().map(generate_field_decl);
quote! {
public async #fn_ident(
{ #(#arg_idents),* }: { #(#arg_decls;)* },
options?: oasis.RpcOptions
): Promise<#rpc_ret_ty> {
const payload = #service_ident.#make_payload_ident(#(#arg_idents),*);
#rpc_try_catch
}
private static #make_payload_ident(#(#arg_idents: #arg_tys,)*): Buffer {
const encoder = new oasis.Encoder();
encoder.writeU8(#fn_id_lit);
return oasis.abiEncode(
[ #(#arg_schema_tys as oasis.Schema),* ],
[ #(#arg_idents),* ],
encoder
);
}
}
}
})
}
fn generate_field_decl(field: &oasis_rpc::Field) -> TokenStream {
let field_name = format_ts_ident!(@var, &field.name);
let field_ty = quote_ty(&field.ty);
let maybe_optional = match &field.ty {
oasis_rpc::Type::Optional(_) => Some(quote!(?)),
_ => None,
};
quote! {
#field_name #maybe_optional: #field_ty
}
}
fn quote_ty(ty: &oasis_rpc::Type) -> TokenStream {
use oasis_rpc::Type::*;
match ty {
Bool => quote!(boolean),
U8 | I8 | U16 | I16 | U32 | I32 | F32 | F64 => quote!(number),
U64 | I64 => quote!(bigint),
Bytes => quote!(Uint8Array),
String => quote!(string),
Address => quote!(oasis.Address),
Balance => quote!(oasis.Balance),
RpcError => quote!(oasis.RpcError),
Defined { namespace, ty } => {
let ty_ident = format_ts_ident!(@class, ty);
if let Some(ns) = namespace {
let ns_ident = format_ts_ident!(@import, ns);
quote!(#ns_ident.#ty_ident)
} else {
quote!(#ty_ident)
}
}
Tuple(tys) => {
if tys.is_empty() {
quote!(void)
} else {
let quot_tys = tys.iter().map(quote_ty);
quote!([ #(#quot_tys),* ])
}
}
List(box U8) | Array(box U8, _) => quote!(Uint8Array),
List(box I8) | Array(box I8, _) => quote!(Int8Array),
List(box U16) | Array(box U16, _) => quote!(Uint16Array),
List(box I16) | Array(box I16, _) => quote!(Int16Array),
List(box U32) | Array(box U32, _) => quote!(Uint32Array),
List(box I32) | Array(box I32, _) => quote!(Int32Array),
List(box U64) | Array(box U64, _) => quote!(BigUint64Array),
List(box I64) | Array(box I64, _) => quote!(BigInt64Array),
List(box F32) | Array(box F32, _) => quote!(Float32Array),
List(box F64) | Array(box F64, _) => quote!(Float64Array),
List(ty) | Array(ty, _) => {
let quot_ty = quote_ty(ty);
quote!(#quot_ty[])
}
Set(ty) => {
let quot_ty = quote_ty(ty);
quote!(oasis.Set<#quot_ty>)
}
Map(k_ty, v_ty) => {
let quot_k_ty = quote_ty(k_ty);
let quot_v_ty = quote_ty(v_ty);
quote!(oasis.Map<#quot_k_ty, #quot_v_ty>)
}
Optional(ty) => {
let quot_ty = quote_ty(ty);
quote!(#quot_ty | undefined)
}
Result(ok_ty, err_ty) => {
let quot_ok_ty = quote_ty(ok_ty);
let quot_err_ty = quote_ty(err_ty);
quote!(oasis.Result<#quot_ok_ty, #quot_err_ty>)
}
}
}
fn quote_schema_ty(ty: &oasis_rpc::Type) -> TokenStream {
use oasis_rpc::Type::*;
match ty {
Bool => quote!("boolean"),
U8 => quote!("u8"),
I8 => quote!("i8"),
U16 => quote!("u16"),
I16 => quote!("i16"),
U32 => quote!("u32"),
I32 => quote!("i32"),
U64 => quote!("u64"),
I64 => quote!("i64"),
F32 => quote!("f32"),
F64 => quote!("f64"),
Bytes => quote!(["u8", Number.POSITIVE_INFINITY]),
String => quote!("string"),
Address => quote!(oasis.Address),
Balance => quote!(oasis.Balance),
RpcError => quote!(oasis.RpcError),
Defined { namespace, ty } => {
let ty_ident = format_ts_ident!(@class, ty);
if let Some(ns) = namespace {
let ns_ident = format_ts_ident!(@import, ns);
quote!(#ns_ident.#ty_ident)
} else {
quote!(#ty_ident)
}
}
Tuple(tys) => {
let quot_tys = tys.iter().map(quote_schema_ty);
quote!([ #(#quot_tys),* ])
}
Array(ty, len) => {
let quot_ty = quote_schema_ty(ty);
let quot_len = Literal::u64_unsuffixed(*len);
quote!([ #quot_ty, #quot_len ])
}
List(ty) => {
let quot_ty = quote_schema_ty(ty);
quote!([#quot_ty, Number.POSITIVE_INFINITY])
}
Set(ty) => {
let quot_ty = quote_schema_ty(ty);
quote!(["Set", #quot_ty])
}
Map(k_ty, v_ty) => {
let quot_k_ty = quote_schema_ty(k_ty);
let quot_v_ty = quote_schema_ty(v_ty);
quote!(["Map", #quot_k_ty, #quot_v_ty])
}
Optional(ty) => {
let quot_ty = quote_schema_ty(ty);
quote!(["Option", #quot_ty])
}
Result(ok_ty, err_ty) => {
let quot_ok_ty = quote_schema_ty(ok_ty);
let quot_err_ty = quote_schema_ty(err_ty);
quote!(["Result", #quot_ok_ty, #quot_err_ty])
}
}
}
fn gen_rpc_err_handler(err_ty: Option<&oasis_rpc::Type>, try_block: TokenStream) -> TokenStream {
let err_handler = err_ty.map(|err_ty| {
let quot_schema_err_ty = quote_schema_ty(err_ty);
quote! {
if (e instanceof oasis.RpcError.Execution) {
throw oasis.abiDecode(#quot_schema_err_ty as oasis.Schema, e[0]);
}
}
});
quote! {
try {
#try_block
} catch (e) {
#err_handler
throw e;
}
}
}
pub fn module_name(iface_name: impl AsRef<str>) -> String {
iface_name.as_ref().to_kebab_case()
}
fn var_name(name: &str) -> String {
name.to_mixed_case()
}
pub fn make_operator(chars: &str) -> TokenStream {
use proc_macro2::{Punct, Spacing, TokenTree};
chars
.chars()
.enumerate()
.map(|(i, ch)| -> TokenTree {
Punct::new(
ch,
if i == chars.len() - 1 {
Spacing::Alone
} else {
Spacing::Joint
},
)
.into()
})
.collect()
}
| 36.825038 | 101 | 0.455417 |
2fd221b78aba6f725d1dab5f5d4e328a79d2e392
| 8,739 |
// Copyright 2015-2021 Swim Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::*;
use crate::agent::lane::tests::ExactlyOnce;
use stm::transaction::atomically;
#[test]
fn default_summary() {
let default: TransactionSummary<i32, String> = Default::default();
assert_eq!(default.coordination_id, 0);
assert!(!default.clear);
assert!(default.changes.is_empty());
}
#[test]
fn summary_with_id() {
let with_id: TransactionSummary<i32, String> = TransactionSummary::with_id(67);
assert_eq!(with_id.coordination_id, 67);
assert!(!with_id.clear);
assert!(with_id.changes.is_empty());
}
#[test]
fn create_clear_summary() {
let summary: TransactionSummary<Value, String> = TransactionSummary::clear();
assert_eq!(summary.coordination_id, 0);
assert!(summary.clear);
assert!(summary.changes.is_empty());
}
#[test]
fn make_update() {
let key = Value::Int32Value(2);
let value = Arc::new(4);
let summary = TransactionSummary::make_update(key.clone(), value.clone());
assert_eq!(summary.coordination_id, 0);
assert!(!summary.clear);
assert_eq!(summary.changes.len(), 1);
assert!(
matches!(summary.changes.get(&key), Some(EntryModification::Update(v)) if Arc::ptr_eq(v, &value))
);
}
#[test]
fn make_removal() {
let key = Value::Int32Value(2);
let summary: TransactionSummary<Value, i32> = TransactionSummary::make_removal(key.clone());
assert_eq!(summary.coordination_id, 0);
assert!(!summary.clear);
assert_eq!(summary.changes.len(), 1);
assert!(matches!(
summary.changes.get(&key),
Some(EntryModification::Remove)
));
}
#[test]
fn update_existing_no_clear() {
let key = Value::Int32Value(2);
let value = Arc::new(4);
let summary = TransactionSummary::default();
let updated = summary.update(key.clone(), value.clone());
assert_eq!(updated.coordination_id, 0);
assert!(!updated.clear);
assert_eq!(updated.changes.len(), 1);
assert!(
matches!(updated.changes.get(&key), Some(EntryModification::Update(v)) if Arc::ptr_eq(v, &value))
);
}
#[test]
fn update_existing_clear() {
let key = Value::Int32Value(2);
let value = Arc::new(4);
let summary = TransactionSummary::clear();
let updated = summary.update(key.clone(), value.clone());
assert_eq!(updated.coordination_id, 0);
assert!(updated.clear);
assert_eq!(updated.changes.len(), 1);
assert!(
matches!(updated.changes.get(&key), Some(EntryModification::Update(v)) if Arc::ptr_eq(v, &value))
);
}
#[test]
fn remove_existing_no_clear() {
let key = Value::Int32Value(2);
let value = Arc::new(4);
let summary = TransactionSummary::default();
let updated = summary.update(key.clone(), value).remove(key.clone());
assert_eq!(updated.coordination_id, 0);
assert!(!updated.clear);
assert_eq!(updated.changes.len(), 1);
assert!(matches!(
updated.changes.get(&key),
Some(EntryModification::Remove)
));
}
#[test]
fn remove_existing_clear() {
let key = Value::Int32Value(2);
let value = Arc::new(4);
let summary = TransactionSummary::clear();
let updated = summary.update(key.clone(), value).remove(key.clone());
assert_eq!(updated.coordination_id, 0);
assert!(updated.clear);
assert_eq!(updated.changes.len(), 1);
assert!(matches!(
updated.changes.get(&key),
Some(EntryModification::Remove)
));
}
#[test]
fn remove_non_existent_no_clear() {
let key = Value::Int32Value(2);
let summary: TransactionSummary<Value, i32> = TransactionSummary::default();
let updated = summary.remove(key.clone());
assert_eq!(updated.coordination_id, 0);
assert!(!updated.clear);
assert_eq!(updated.changes.len(), 1);
assert!(matches!(
updated.changes.get(&key),
Some(EntryModification::Remove)
));
}
#[test]
fn remove_non_existent_clear() {
let key = Value::Int32Value(2);
let summary: TransactionSummary<Value, i32> = TransactionSummary::clear();
let updated = summary.remove(key.clone());
assert_eq!(updated.coordination_id, 0);
assert!(updated.clear);
assert_eq!(updated.changes.len(), 1);
assert!(matches!(
updated.changes.get(&key),
Some(EntryModification::Remove)
));
}
#[test]
fn to_events_checkpoint() {
let with_id: TransactionSummary<i32, String> = TransactionSummary::with_id(67);
let events = with_id.to_events();
assert!(matches!(events.as_slice(), [MapLaneEvent::Checkpoint(67)]));
}
#[test]
fn to_events_no_clear() {
let key1 = Value::Int32Value(2);
let value1 = Arc::new(4);
let key2 = Value::Int32Value(6);
let summary = TransactionSummary::default();
let updated = summary.update(key1, value1.clone()).remove(key2);
let events = updated.to_events();
assert!(matches!(events.as_slice(),
[MapLaneEvent::Update(Value::Int32Value(2), v), MapLaneEvent::Remove(Value::Int32Value(6))] |
[MapLaneEvent::Remove(Value::Int32Value(6)), MapLaneEvent::Update(Value::Int32Value(2), v)]
if Arc::ptr_eq(v, &value1)));
}
#[test]
fn to_events_clear() {
let key1 = Value::Int32Value(2);
let value1 = Arc::new(4);
let key2 = Value::Int32Value(6);
let summary = TransactionSummary::clear();
let updated = summary.update(key1, value1.clone()).remove(key2);
let events = updated.to_events();
assert!(matches!(events.as_slice(),
[MapLaneEvent::Clear, MapLaneEvent::Update(Value::Int32Value(2), v), MapLaneEvent::Remove(Value::Int32Value(6))] |
[MapLaneEvent::Clear, MapLaneEvent::Remove(Value::Int32Value(6)), MapLaneEvent::Update(Value::Int32Value(2), v)]
if Arc::ptr_eq(v, &value1)));
}
#[tokio::test]
async fn clear_summary_transaction() {
let key = Value::Int32Value(2);
let summary: TransactionSummary<Value, i32> = TransactionSummary::default();
let updated = summary.remove(key.clone());
let var = TVar::new(updated);
let result = atomically(&clear_summary(&var), ExactlyOnce).await;
assert!(result.is_ok());
let after = var.load().await;
let TransactionSummary {
coordination_id,
clear,
changes,
} = after.as_ref();
assert_eq!(*coordination_id, 0);
assert!(*clear);
assert!(changes.is_empty());
}
#[tokio::test]
async fn update_summary_transaction() {
let key1 = Value::Int32Value(2);
let value1 = Arc::new(17);
let key2 = Value::Int32Value(12);
let value2 = Arc::new(34);
let summary = TransactionSummary::default().update(key1.clone(), value1.clone());
let var = TVar::new(summary);
let result = atomically(
&update_summary(&var, key2.clone(), value2.clone()),
ExactlyOnce,
)
.await;
assert!(result.is_ok());
let after = var.load().await;
let TransactionSummary {
coordination_id,
clear,
changes,
} = after.as_ref();
assert_eq!(*coordination_id, 0);
assert!(!*clear);
assert_eq!(changes.len(), 2);
assert!(
matches!(changes.get(&key1), Some(EntryModification::Update(v)) if Arc::ptr_eq(v, &value1))
);
assert!(
matches!(changes.get(&key2), Some(EntryModification::Update(v)) if Arc::ptr_eq(v, &value2))
);
}
#[tokio::test]
async fn remove_summary_transaction() {
let key1 = Value::Int32Value(2);
let value1 = Arc::new(17);
let key2 = Value::Int32Value(12);
let value2 = Arc::new(34);
let summary = TransactionSummary::default()
.update(key1.clone(), value1.clone())
.update(key2.clone(), value2.clone());
let var = TVar::new(summary);
let result = atomically(&remove_summary(&var, key2.clone()), ExactlyOnce).await;
assert!(result.is_ok());
let after = var.load().await;
let TransactionSummary {
coordination_id,
clear,
changes,
} = after.as_ref();
assert_eq!(*coordination_id, 0);
assert!(!*clear);
assert_eq!(changes.len(), 2);
assert!(
matches!(changes.get(&key1), Some(EntryModification::Update(v)) if Arc::ptr_eq(v, &value1))
);
assert!(matches!(
changes.get(&key2),
Some(EntryModification::Remove)
));
}
| 29.825939 | 118 | 0.652935 |
916772dc87fd7b709eaf8723f84d869f262e1a05
| 8,910 |
//! Container Runtime Interface server implementation
use crate::cri::{
api::{image_service_server::ImageServiceServer, runtime_service_server::RuntimeServiceServer},
cri_service::CRIServiceBuilder,
};
use anyhow::{bail, Context, Result};
use clap::crate_name;
use common::unix_stream::UnixStream;
pub use config::{Config, LogScope};
use env_logger::fmt::Color;
use futures::TryFutureExt;
use log::{debug, info, trace, LevelFilter};
use network::{
cni::{CNIBuilder, CNI},
Network, NetworkBuilder,
};
use std::{env, io::Write};
use storage::{default_key_value_storage::DefaultKeyValueStorage, KeyValueStorage};
#[cfg(unix)]
use tokio::net::UnixListener;
use tokio::{
fs,
signal::unix::{signal, SignalKind},
};
use tonic::transport;
mod config;
/// Server is the main instance to run the Container Runtime Interface
pub struct Server {
config: Config,
}
impl Server {
/// Create a new server instance
pub fn new(config: Config) -> Self {
Self { config }
}
/// Start a new server with its default values
pub async fn start(self) -> Result<()> {
self.set_logging_verbosity()
.context("set logging verbosity")?;
// Setup the storage and pass it to the service
let storage =
DefaultKeyValueStorage::open(&self.config.storage_path().join("cri-service"))?;
let cri_service = CRIServiceBuilder::default()
.storage(storage.clone())
.build()?;
let network = self.initialize_network().await.context("init network")?;
// Build a new socket from the config
let uds = self.unix_domain_listener().await?;
// Handle shutdown based on signals
let mut shutdown_terminate = signal(SignalKind::terminate())?;
let mut shutdown_interrupt = signal(SignalKind::interrupt())?;
info!(
"Runtime server listening on {}",
self.config.sock_path().display()
);
#[allow(irrefutable_let_patterns)]
let incoming = async_stream::stream! {
while let item = uds.accept().map_ok(|(st, _)| UnixStream(st)).await {
yield item;
}
};
tokio::select! {
res = transport::Server::builder()
.add_service(RuntimeServiceServer::new(cri_service.clone()))
.add_service(ImageServiceServer::new(cri_service))
.serve_with_incoming(incoming) => {
res.context("run GRPC server")?
}
_ = shutdown_interrupt.recv() => {
info!("Got interrupt signal, shutting down server");
}
_ = shutdown_terminate.recv() => {
info!("Got termination signal, shutting down server");
}
}
self.cleanup(storage, network).await
}
/// Create a new UnixListener from the configs socket path.
async fn unix_domain_listener(&self) -> Result<UnixListener> {
let sock_path = self.config.sock_path();
if !sock_path.is_absolute() {
bail!(
"specified socket path {} is not absolute",
sock_path.display()
)
}
if sock_path.exists() {
fs::remove_file(sock_path)
.await
.with_context(|| format!("unable to remove socket file {}", sock_path.display()))?;
} else {
let sock_dir = sock_path
.parent()
.context("unable to get socket path directory")?;
fs::create_dir_all(sock_dir)
.await
.with_context(|| format!("unable to create socket dir {}", sock_dir.display()))?;
}
Ok(UnixListener::bind(sock_path).context("unable to bind socket from path")?)
}
/// Initialize the logger and set the verbosity to the provided level.
fn set_logging_verbosity(&self) -> Result<()> {
// Set the logging verbosity via the env
let level = if self.config.log_scope() == LogScope::Global {
self.config.log_level().to_string()
} else {
format!("{}={}", crate_name!(), self.config.log_level())
};
env::set_var("RUST_LOG", level);
// Initialize the logger with the format:
// [YYYY-MM-DDTHH:MM:SS:MMMZ LEVEL crate::module file:LINE] MSG…
// The file and line will be only printed when running with debug or trace level.
let log_level = self.config.log_level();
env_logger::builder()
.format(move |buf, r| {
let mut style = buf.style();
style.set_color(Color::Black).set_intense(true);
writeln!(
buf,
"{}{} {:<5} {}{}{} {}",
style.value("["),
buf.timestamp_millis(),
buf.default_styled_level(r.level()),
r.target(),
match (log_level >= LevelFilter::Debug, r.file(), r.line()) {
(true, Some(file), Some(line)) => format!(" {}:{}", file, line),
_ => "".into(),
},
style.value("]"),
r.args()
)
})
.try_init()
.context("init env logger")
}
/// Create a new network and initialize it from the internal configuration.
async fn initialize_network(&self) -> Result<Network<CNI>> {
let mut cni_network = CNIBuilder::default()
.default_network_name(self.config.cni_default_network().clone())
.config_paths(self.config.cni_config_paths().clone())
.plugin_paths(self.config.cni_plugin_paths())
.storage_path(self.config.storage_path().join("cni"))
.build()
.context("build CNI network data")?;
cni_network
.initialize()
.await
.context("initialize CNI network")?;
let network = NetworkBuilder::<CNI>::default()
.implementation(cni_network)
.build()
.context("build CNI network")?;
Ok(network)
}
/// Cleanup the server and persist any data if necessary.
async fn cleanup(
self,
mut storage: DefaultKeyValueStorage,
mut network: Network<CNI>,
) -> Result<()> {
debug!("Cleaning up server");
trace!("Persisting storage");
storage.persist().context("persist storage")?;
trace!("Removing socket path");
std::fs::remove_file(self.config.sock_path()).with_context(|| {
format!(
"unable to remove socket path {}",
self.config.sock_path().display()
)
})?;
trace!("Stopping network");
network.cleanup().await.context("clean up network")?;
trace!("Server shut down");
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::config::ConfigBuilder;
use tempfile::{tempdir, NamedTempFile};
#[tokio::test]
async fn unix_domain_listener_success() -> Result<()> {
let sock_path = &tempdir()?.path().join("test.sock");
let config = ConfigBuilder::default().sock_path(sock_path).build()?;
let sut = Server::new(config);
assert!(!sock_path.exists());
sut.unix_domain_listener().await?;
assert!(sock_path.exists());
Ok(())
}
#[tokio::test]
async fn unix_domain_listener_success_exists() -> Result<()> {
let sock_path = NamedTempFile::new()?;
let config = ConfigBuilder::default()
.sock_path(sock_path.path())
.build()?;
let sut = Server::new(config);
assert!(sock_path.path().exists());
sut.unix_domain_listener().await?;
assert!(sock_path.path().exists());
Ok(())
}
#[tokio::test]
async fn unix_domain_listener_fail_not_absolute() -> Result<()> {
let config = ConfigBuilder::default()
.sock_path("not/absolute/path")
.build()?;
let sut = Server::new(config);
assert!(sut.unix_domain_listener().await.is_err());
Ok(())
}
#[tokio::test]
async fn initialize_network_success() -> Result<()> {
let config = ConfigBuilder::default()
.cni_config_paths(vec![tempdir()?.into_path()])
.storage_path(tempdir()?.path())
.build()?;
let sut = Server::new(config);
sut.initialize_network().await?;
Ok(())
}
#[tokio::test]
async fn initialize_network_wrong_storage_path() -> Result<()> {
let config = ConfigBuilder::default()
.storage_path("/proc/storage")
.build()?;
let sut = Server::new(config);
assert!(sut.initialize_network().await.is_err());
Ok(())
}
}
| 33 | 99 | 0.558698 |
2881772ba07695f22edbd4db89891c6ddab56f4a
| 2,337 |
use specs::prelude::*;
use crate::ecs::components::*;
use crate::effects::*;
pub struct MeleeCombat {}
impl<'a> System<'a> for MeleeCombat {
type SystemData = (
Entities<'a>,
WriteStorage<'a, HasMeleeTarget>,
ReadStorage<'a, HasName>,
ReadStorage<'a, HasHitPoints>,
);
fn run(&mut self, data: Self::SystemData) {
let (entities, mut has_melee_target_storage, has_name_storage, has_hit_points_storage) =
data;
let mut satisfied = vec![];
for (entity, has_melee_target, has_name, has_hit_points) in (
&entities,
&has_melee_target_storage,
&has_name_storage,
&has_hit_points_storage,
)
.join()
{
if has_hit_points.hit_points.current > 0 {
let target_entity = has_melee_target.melee_target;
if !entities.is_alive(target_entity) {
satisfied.push(entity);
continue;
}
let target_hit_points = has_hit_points_storage.get(target_entity).unwrap();
if target_hit_points.hit_points.current > 0 {
let target_name = has_name_storage.get(has_melee_target.melee_target).unwrap();
let damage = i32::max(0, 10);
if damage == 0 {
debug!("{} is unable to hurt {}", &has_name.name, &target_name.name);
} else {
debug!(
"{} hits {} for {} hp",
&has_name.name, &target_name.name, damage
);
enqueue_effect(
Some(entity),
Effect::Damage { amount: damage },
Target::Entity {
entity: has_melee_target.melee_target,
},
);
}
}
}
}
for entity in satisfied.iter() {
debug!(
"{} is satisfied.",
has_name_storage.get(*entity).unwrap().name
);
has_melee_target_storage.remove(*entity);
}
has_melee_target_storage.clear();
}
}
| 34.367647 | 99 | 0.471973 |
09535ff5693f8b27c584a7fe79bd7cd5c9f8c77e
| 2,324 |
use super::common;
use super::osx_bundle;
use crate::Settings;
use handlebars::Handlebars;
use lazy_static::lazy_static;
use std::collections::BTreeMap;
use std::fs::{write, File};
use std::io::Write;
use std::path::PathBuf;
use std::process::{Command, Stdio};
// Create handlebars template for shell scripts
lazy_static! {
static ref HANDLEBARS: Handlebars<'static> = {
let mut handlebars = Handlebars::new();
handlebars
.register_template_string("bundle_dmg", include_str!("templates/bundle_dmg"))
.expect("Failed to setup handlebars template");
handlebars
};
}
// create script files to bundle project and execute bundle_script.
pub fn bundle_project(settings: &Settings) -> crate::Result<Vec<PathBuf>> {
// generate the app.app folder
osx_bundle::bundle_project(settings)?;
// get uppercase string of app name
let upcase = settings.binary_name().to_uppercase();
// generate BTreeMap for templates
let mut sh_map = BTreeMap::new();
sh_map.insert("app_name", settings.binary_name());
sh_map.insert("app_name_upcase", &upcase);
let bundle_temp = HANDLEBARS
.render("bundle_dmg", &sh_map)
.or_else(|e| Err(e.to_string()))?;
// get the target path
let output_path = settings.project_out_directory();
// create paths for script
let bundle_sh = output_path.join("bundle_dmg.sh");
common::print_bundling(format!("{:?}", &output_path.join(format!("{}.dmg", &upcase))).as_str())?;
// write the scripts
write(&bundle_sh, bundle_temp).or_else(|e| Err(e.to_string()))?;
// copy seticon binary
let seticon = include_bytes!("templates/seticon");
let seticon_out = &output_path.join("seticon");
let mut seticon_buffer = File::create(seticon_out).or_else(|e| Err(e.to_string()))?;
seticon_buffer
.write_all(seticon)
.or_else(|e| Err(e.to_string()))?;
// chmod script for execution
Command::new("chmod")
.arg("777")
.arg(&bundle_sh)
.arg(&seticon_out)
.current_dir(output_path)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.expect("Failed to chmod script");
// execute the bundle script
Command::new(&bundle_sh)
.current_dir(output_path)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.expect("Failed to execute shell script");
Ok(vec![bundle_sh])
}
| 27.666667 | 99 | 0.687608 |
ac7f23373528f4c4e11d8eb85e1455920b389a19
| 11,492 |
use crate::definitions::Image;
use crate::drawing::draw_if_in_bounds;
use crate::drawing::line::draw_line_segment_mut;
use crate::drawing::Canvas;
use image::{GenericImage, ImageBuffer};
use std::f32;
use std::i32;
/// Draws the outline of an ellipse on a new copy of an image.
///
/// Draws as much of an ellipse as lies inside the image bounds.
///
/// Uses the [Midpoint Ellipse Drawing Algorithm](https://web.archive.org/web/20160128020853/http://tutsheap.com/c/mid-point-ellipse-drawing-algorithm/).
/// (Modified from Bresenham's algorithm)
///
/// The ellipse is axis-aligned and satisfies the following equation:
///
/// (`x^2 / width_radius^2) + (y^2 / height_radius^2) = 1`
#[must_use = "the function does not modify the original image"]
pub fn draw_hollow_ellipse<I>(
image: &I,
center: (i32, i32),
width_radius: i32,
height_radius: i32,
color: I::Pixel,
) -> Image<I::Pixel>
where
I: GenericImage,
{
let mut out = ImageBuffer::new(image.width(), image.height());
out.copy_from(image, 0, 0).unwrap();
draw_hollow_ellipse_mut(&mut out, center, width_radius, height_radius, color);
out
}
/// Draws the outline of an ellipse on an image in place.
///
/// Draws as much of an ellipse as lies inside the image bounds.
///
/// Uses the [Midpoint Ellipse Drawing Algorithm](https://web.archive.org/web/20160128020853/http://tutsheap.com/c/mid-point-ellipse-drawing-algorithm/).
/// (Modified from Bresenham's algorithm)
///
/// The ellipse is axis-aligned and satisfies the following equation:
///
/// `(x^2 / width_radius^2) + (y^2 / height_radius^2) = 1`
pub fn draw_hollow_ellipse_mut<C>(
canvas: &mut C,
center: (i32, i32),
width_radius: i32,
height_radius: i32,
color: C::Pixel,
) where
C: Canvas,
{
// Circle drawing algorithm is faster, so use it if the given ellipse is actually a circle.
if width_radius == height_radius {
draw_hollow_circle_mut(canvas, center, width_radius, color);
return;
}
let draw_quad_pixels = |x0: i32, y0: i32, x: i32, y: i32| {
draw_if_in_bounds(canvas, x0 + x, y0 + y, color);
draw_if_in_bounds(canvas, x0 - x, y0 + y, color);
draw_if_in_bounds(canvas, x0 + x, y0 - y, color);
draw_if_in_bounds(canvas, x0 - x, y0 - y, color);
};
draw_ellipse(draw_quad_pixels, center, width_radius, height_radius);
}
/// Draws an ellipse and its contents on a new copy of the image.
///
/// Draw as much of the ellipse and its contents as lies inside the image bounds.
///
/// Uses the [Midpoint Ellipse Drawing Algorithm](https://web.archive.org/web/20160128020853/http://tutsheap.com/c/mid-point-ellipse-drawing-algorithm/).
/// (Modified from Bresenham's algorithm)
///
/// The ellipse is axis-aligned and satisfies the following equation:
///
/// `(x^2 / width_radius^2) + (y^2 / height_radius^2) <= 1`
#[must_use = "the function does not modify the original image"]
pub fn draw_filled_ellipse<I>(
image: &I,
center: (i32, i32),
width_radius: i32,
height_radius: i32,
color: I::Pixel,
) -> Image<I::Pixel>
where
I: GenericImage,
{
let mut out = ImageBuffer::new(image.width(), image.height());
out.copy_from(image, 0, 0).unwrap();
draw_filled_ellipse_mut(&mut out, center, width_radius, height_radius, color);
out
}
/// Draws an ellipse and its contents on an image in place.
///
/// Draw as much of the ellipse and its contents as lies inside the image bounds.
///
/// Uses the [Midpoint Ellipse Drawing Algorithm](https://web.archive.org/web/20160128020853/http://tutsheap.com/c/mid-point-ellipse-drawing-algorithm/).
/// (Modified from Bresenham's algorithm)
///
/// The ellipse is axis-aligned and satisfies the following equation:
///
/// `(x^2 / width_radius^2) + (y^2 / height_radius^2) <= 1`
pub fn draw_filled_ellipse_mut<C>(
canvas: &mut C,
center: (i32, i32),
width_radius: i32,
height_radius: i32,
color: C::Pixel,
) where
C: Canvas,
{
// Circle drawing algorithm is faster, so use it if the given ellipse is actually a circle.
if width_radius == height_radius {
draw_filled_circle_mut(canvas, center, width_radius, color);
return;
}
let draw_line_pairs = |x0: i32, y0: i32, x: i32, y: i32| {
draw_line_segment_mut(
canvas,
((x0 - x) as f32, (y0 + y) as f32),
((x0 + x) as f32, (y0 + y) as f32),
color,
);
draw_line_segment_mut(
canvas,
((x0 - x) as f32, (y0 - y) as f32),
((x0 + x) as f32, (y0 - y) as f32),
color,
);
};
draw_ellipse(draw_line_pairs, center, width_radius, height_radius);
}
// Implements the Midpoint Ellipse Drawing Algorithm https://web.archive.org/web/20160128020853/http://tutsheap.com/c/mid-point-ellipse-drawing-algorithm/). (Modified from Bresenham's algorithm)
//
// Takes a function that determines how to render the points on the ellipse.
fn draw_ellipse<F>(mut render_func: F, center: (i32, i32), width_radius: i32, height_radius: i32)
where
F: FnMut(i32, i32, i32, i32),
{
let (x0, y0) = center;
let w2 = width_radius * width_radius;
let h2 = height_radius * height_radius;
let mut x = 0;
let mut y = height_radius;
let mut px = 0;
let mut py = 2 * w2 * y;
render_func(x0, y0, x, y);
// Top and bottom regions.
let mut p = (h2 - (w2 * height_radius)) as f32 + (0.25 * w2 as f32);
while px < py {
x += 1;
px += 2 * h2;
if p < 0.0 {
p += (h2 + px) as f32;
} else {
y -= 1;
py += -2 * w2;
p += (h2 + px - py) as f32;
}
render_func(x0, y0, x, y);
}
// Left and right regions.
p = (h2 as f32) * (x as f32 + 0.5).powi(2) + (w2 * (y - 1).pow(2)) as f32 - (w2 * h2) as f32;
while y > 0 {
y -= 1;
py += -2 * w2;
if p > 0.0 {
p += (w2 - py) as f32;
} else {
x += 1;
px += 2 * h2;
p += (w2 - py + px) as f32;
}
render_func(x0, y0, x, y);
}
}
/// Draws the outline of a circle on a new copy of an image.
///
/// Draw as much of the circle as lies inside the image bounds.
#[must_use = "the function does not modify the original image"]
pub fn draw_hollow_circle<I>(
image: &I,
center: (i32, i32),
radius: i32,
color: I::Pixel,
) -> Image<I::Pixel>
where
I: GenericImage,
{
let mut out = ImageBuffer::new(image.width(), image.height());
out.copy_from(image, 0, 0).unwrap();
draw_hollow_circle_mut(&mut out, center, radius, color);
out
}
/// Draws the outline of a circle on an image in place.
///
/// Draw as much of the circle as lies inside the image bounds.
pub fn draw_hollow_circle_mut<C>(canvas: &mut C, center: (i32, i32), radius: i32, color: C::Pixel)
where
C: Canvas,
{
let mut x = 0i32;
let mut y = radius;
let mut p = 1 - radius;
let x0 = center.0;
let y0 = center.1;
while x <= y {
draw_if_in_bounds(canvas, x0 + x, y0 + y, color);
draw_if_in_bounds(canvas, x0 + y, y0 + x, color);
draw_if_in_bounds(canvas, x0 - y, y0 + x, color);
draw_if_in_bounds(canvas, x0 - x, y0 + y, color);
draw_if_in_bounds(canvas, x0 - x, y0 - y, color);
draw_if_in_bounds(canvas, x0 - y, y0 - x, color);
draw_if_in_bounds(canvas, x0 + y, y0 - x, color);
draw_if_in_bounds(canvas, x0 + x, y0 - y, color);
x += 1;
if p < 0 {
p += 2 * x + 1;
} else {
y -= 1;
p += 2 * (x - y) + 1;
}
}
}
/// Draws a circle and its contents on an image in place.
///
/// Draws as much of a circle and its contents as lies inside the image bounds.
pub fn draw_filled_circle_mut<C>(canvas: &mut C, center: (i32, i32), radius: i32, color: C::Pixel)
where
C: Canvas,
{
let mut x = 0i32;
let mut y = radius;
let mut p = 1 - radius;
let x0 = center.0;
let y0 = center.1;
while x <= y {
draw_line_segment_mut(
canvas,
((x0 - x) as f32, (y0 + y) as f32),
((x0 + x) as f32, (y0 + y) as f32),
color,
);
draw_line_segment_mut(
canvas,
((x0 - y) as f32, (y0 + x) as f32),
((x0 + y) as f32, (y0 + x) as f32),
color,
);
draw_line_segment_mut(
canvas,
((x0 - x) as f32, (y0 - y) as f32),
((x0 + x) as f32, (y0 - y) as f32),
color,
);
draw_line_segment_mut(
canvas,
((x0 - y) as f32, (y0 - x) as f32),
((x0 + y) as f32, (y0 - x) as f32),
color,
);
x += 1;
if p < 0 {
p += 2 * x + 1;
} else {
y -= 1;
p += 2 * (x - y) + 1;
}
}
}
/// Draws a circle and its contents on a new copy of the image.
///
/// Draws as much of a circle and its contents as lies inside the image bounds.
#[must_use = "the function does not modify the original image"]
pub fn draw_filled_circle<I>(
image: &I,
center: (i32, i32),
radius: i32,
color: I::Pixel,
) -> Image<I::Pixel>
where
I: GenericImage,
{
let mut out = ImageBuffer::new(image.width(), image.height());
out.copy_from(image, 0, 0).unwrap();
draw_filled_circle_mut(&mut out, center, radius, color);
out
}
#[cfg(test)]
mod tests {
use image::{GrayImage, Luma};
macro_rules! bench_hollow_ellipse {
($name:ident, $center:expr, $width_radius:expr, $height_radius:expr) => {
#[bench]
fn $name(b: &mut test::Bencher) {
use super::draw_hollow_ellipse_mut;
let mut image = GrayImage::new(500, 500);
let color = Luma([50u8]);
b.iter(|| {
draw_hollow_ellipse_mut(
&mut image,
$center,
$width_radius,
$height_radius,
color,
);
test::black_box(&image);
});
}
};
}
bench_hollow_ellipse!(bench_bench_hollow_ellipse_circle, (200, 200), 80, 80);
bench_hollow_ellipse!(bench_bench_hollow_ellipse_vertical, (200, 200), 40, 100);
bench_hollow_ellipse!(bench_bench_hollow_ellipse_horizontal, (200, 200), 100, 40);
macro_rules! bench_filled_ellipse {
($name:ident, $center:expr, $width_radius:expr, $height_radius:expr) => {
#[bench]
fn $name(b: &mut test::Bencher) {
use super::draw_filled_ellipse_mut;
let mut image = GrayImage::new(500, 500);
let color = Luma([50u8]);
b.iter(|| {
draw_filled_ellipse_mut(
&mut image,
$center,
$width_radius,
$height_radius,
color,
);
test::black_box(&image);
});
}
};
}
bench_filled_ellipse!(bench_bench_filled_ellipse_circle, (200, 200), 80, 80);
bench_filled_ellipse!(bench_bench_filled_ellipse_vertical, (200, 200), 40, 100);
bench_filled_ellipse!(bench_bench_filled_ellipse_horizontal, (200, 200), 100, 40);
}
| 31.313351 | 194 | 0.569962 |
f7c526d776e877e273b4d15ad94bd6657fc93cd8
| 1,708 |
#![forbid(unsafe_code)]
#![deny(
// missing_docs,
unstable_features,
missing_debug_implementations,
// missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unused_import_braces,
// unused_qualifications,
)]
// #![allow(unused_imports, dead_code)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate rocket;
use crate::{
common::new_decoy,
config::{config, validate_config},
};
use regex::Regex;
use rocket::request::Request;
mod common;
mod config;
mod instances;
mod templates;
mod utils;
mod workstations;
#[cfg(test)]
mod tests;
const ENTRIES_DIR: &str = "entries/";
const INSTANCES_DIR: &str = "instances/";
const WORKSTATIONS_DIR: &str = "workstations/";
const SERVER_PUBLIC_KEY: &str = "/Services/Wireguard-tools/pub.key";
const SERVER_PRIVATE_KEY: &str = "/Services/Wireguard-tools/private.key";
lazy_static! {
// this will be reused after first regex compilation:
static ref FILE_NAME_REGEX: Regex = Regex::new(r"^[a-zA-Z0-9 -\.]{3,}$").unwrap();
}
#[catch(500)]
fn internal_error() -> &'static str {
"Internal Error."
}
#[catch(404)]
fn not_found(_req: &Request) -> String {
new_decoy()
}
#[rocket::main]
async fn main() -> Result<(), rocket::Error> {
validate_config(&config());
rocket::build()
.mount(
&format!("/{}/wireguard/instance/", config().uuid),
routes![instances::new],
)
.mount(
&format!("/{}/wireguard/workstation/", config().uuid),
routes![workstations::new],
)
.register("/", catchers![internal_error, not_found])
.launch()
.await
}
| 21.620253 | 86 | 0.625878 |
bb5878b81822f3d001d088779842852ebb6221aa
| 2,803 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::sync::Arc;
use std::{any::Any, pin::Pin};
use arrow::datatypes::SchemaRef;
use async_trait::async_trait;
use datafusion::physical_plan::{ExecutionPlan, Partitioning};
use datafusion::{error::Result, physical_plan::RecordBatchStream};
use uuid::Uuid;
/// QueryStageExec represents a section of a query plan that has consistent partitioning and
/// can be executed as one unit with each partition being executed in parallel. The output of
/// a query stage either forms the input of another query stage or can be the final result of
/// a query.
#[derive(Debug, Clone)]
pub struct QueryStageExec {
/// Unique ID for the job (query) that this stage is a part of
pub job_id: String,
/// Unique query stage ID within the job
pub stage_id: usize,
/// Physical execution plan for this query stage
pub child: Arc<dyn ExecutionPlan>,
}
impl QueryStageExec {
/// Create a new query stage
pub fn try_new(job_id: String, stage_id: usize, child: Arc<dyn ExecutionPlan>) -> Result<Self> {
Ok(Self {
job_id,
stage_id,
child,
})
}
}
#[async_trait]
impl ExecutionPlan for QueryStageExec {
fn as_any(&self) -> &dyn Any {
self
}
fn schema(&self) -> SchemaRef {
self.child.schema()
}
fn output_partitioning(&self) -> Partitioning {
self.child.output_partitioning()
}
fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
vec![self.child.clone()]
}
fn with_new_children(
&self,
children: Vec<Arc<dyn ExecutionPlan>>,
) -> Result<Arc<dyn ExecutionPlan>> {
assert!(children.len() == 1);
Ok(Arc::new(QueryStageExec::try_new(
self.job_id.clone(),
self.stage_id,
children[0].clone(),
)?))
}
async fn execute(
&self,
partition: usize,
) -> Result<Pin<Box<dyn RecordBatchStream + Send + Sync>>> {
self.child.execute(partition).await
}
}
| 31.494382 | 100 | 0.665002 |
e289a154ca256d5bfb1cd1c68e8e2bea76d1114a
| 2,475 |
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::OtgHsHcdma8 {
#[doc = r" Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
}
#[doc = r" Value of the field"]
pub struct DmaaddrR {
bits: u32,
}
impl DmaaddrR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _DmaaddrW<'a> {
w: &'a mut W,
}
impl<'a> _DmaaddrW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u32) -> &'a mut W {
const MASK: u32 = 4294967295;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:31 - DMA address"]
#[inline(always)]
pub fn dmaaddr(&self) -> DmaaddrR {
let bits = {
const MASK: u32 = 4294967295;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u32
};
DmaaddrR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline(always)]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:31 - DMA address"]
#[inline(always)]
pub fn dmaaddr(&mut self) -> _DmaaddrW {
_DmaaddrW { w: self }
}
}
| 24.50495 | 59 | 0.506263 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.