hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
878186059df1f31e9e41e32ca42bbf48db717f49 | 15,278 | /*
Copyright (C) 2018-2019 [email protected]
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
use crate::constant_offsets::ConstantOffsets;
use crate::instruction::Instruction;
use crate::utils::to_value_error;
use core::slice;
use pyo3::class::iter::IterNextOutput;
use pyo3::exceptions::PyTypeError;
use pyo3::gc::{PyGCProtocol, PyVisit};
use pyo3::prelude::*;
use pyo3::types::{PyByteArray, PyBytes};
use pyo3::{PyIterProtocol, PyTraverseError};
enum DecoderDataRef {
None,
Vec(Vec<u8>),
PyObj(PyObject),
}
/// Decodes 16/32/64-bit x86 instructions
///
/// Args:
/// bitness (int): 16, 32 or 64
/// data (bytes, bytearray): Data to decode. For best PERF, use :class:`bytes` since it's immutable and nothing gets copied.
/// options (:class:`DecoderOptions`): (default = :class:`DecoderOptions.NONE`) Decoder options, eg. :class:`DecoderOptions.NO_INVALID_CHECK` | :class:`DecoderOptions.AMD`
///
/// Raises:
/// ValueError: If `bitness` is invalid
/// TypeError: If `data` is not a supported type
///
/// Examples:
///
/// .. testcode::
///
/// from iced_x86 import *
///
/// data = b"\x86\x64\x32\x16\xF0\xF2\x83\x00\x5A\x62\xC1\xFE\xCB\x6F\xD3"
/// decoder = Decoder(64, data)
/// decoder.ip = 0x1234_5678
///
/// # The decoder is iterable
/// for instr in decoder:
/// print(f"Decoded: IP=0x{instr.ip:X}: {instr}")
///
/// Output:
///
/// .. testoutput::
///
/// Decoded: IP=0x12345678: xchg ah,[rdx+rsi+16h]
/// Decoded: IP=0x1234567C: xacquire lock add dword ptr [rax],5Ah
/// Decoded: IP=0x12345681: vmovdqu64 zmm18{k3}{z},zmm11
///
/// .. testcode::
///
/// from iced_x86 import *
///
/// # xchg ah,[rdx+rsi+16h]
/// # xacquire lock add dword ptr [rax],5Ah
/// # vmovdqu64 zmm18{k3}{z},zmm11
/// data = b"\x86\x64\x32\x16\xF0\xF2\x83\x00\x5A\x62\xC1\xFE\xCB\x6F\xD3"
/// decoder = Decoder(64, data)
/// decoder.ip = 0x1234_5678
///
/// instr1 = decoder.decode()
/// assert instr1.code == Code.XCHG_RM8_R8
/// assert instr1.mnemonic == Mnemonic.XCHG
/// assert instr1.len == 4
///
/// instr2 = decoder.decode()
/// assert instr2.code == Code.ADD_RM32_IMM8
/// assert instr2.mnemonic == Mnemonic.ADD
/// assert instr2.len == 5
///
/// instr3 = decoder.decode()
/// assert instr3.code == Code.EVEX_VMOVDQU64_ZMM_K1Z_ZMMM512
/// assert instr3.mnemonic == Mnemonic.VMOVDQU64
/// assert instr3.len == 6
///
/// It's sometimes useful to decode some invalid instructions, eg. ``lock add esi,ecx``.
/// Pass in :class:`DecoderOptions.NO_INVALID_CHECK` to the constructor and the decoder
/// will decode some invalid encodings.
///
/// .. testcode::
///
/// from iced_x86 import *
///
/// # lock add esi,ecx # lock not allowed
/// data = b"\xF0\x01\xCE"
/// decoder = Decoder(64, data)
/// decoder.ip = 0x1234_5678
/// instr = decoder.decode()
/// assert instr.code == Code.INVALID
///
/// # We want to decode some instructions with invalid encodings
/// decoder = Decoder(64, data, DecoderOptions.NO_INVALID_CHECK)
/// decoder.ip = 0x1234_5678
/// instr = decoder.decode()
/// assert instr.code == Code.ADD_RM32_R32
/// assert instr.has_lock_prefix
#[pyclass(module = "_iced_x86_py")]
#[text_signature = "(bitness, data, options, /)"]
pub(crate) struct Decoder {
// * If the decoder ctor was called with a `bytes` object, data_ref is PyObj(`bytes` object)
// and the decoder holds a ref to its data.
// * If the decoder ctor was called with a `bytearray` object, data_ref is Vec(copy of `bytearray` data)
// and the decoder holds a reference to this copied data.
data_ref: DecoderDataRef,
decoder: iced_x86::Decoder<'static>,
}
// iced_x86::Decoder has read only pointer fields which are !Send
unsafe impl Send for Decoder {}
#[pymethods]
impl Decoder {
#[new]
#[args(options = 0)]
fn new(bitness: u32, data: &PyAny, options: u32) -> PyResult<Self> {
// #[args] line assumption
const_assert_eq!(0, iced_x86::DecoderOptions::NONE);
let (data_ref, decoder_data): (DecoderDataRef, &'static [u8]) = if let Ok(bytes) = <PyBytes as PyTryFrom>::try_from(data) {
let slice_data = bytes.as_bytes();
let decoder_data = unsafe { slice::from_raw_parts(slice_data.as_ptr(), slice_data.len()) };
(DecoderDataRef::PyObj(bytes.into()), decoder_data)
} else if let Ok(bytearray) = <PyByteArray as PyTryFrom>::try_from(data) {
//TODO: support bytearray without copying its data by getting a ref to its data every time the Decoder is used (also update the ctor args docs)
let vec_data: Vec<_> = unsafe { bytearray.as_bytes().into() };
let decoder_data = unsafe { slice::from_raw_parts(vec_data.as_ptr(), vec_data.len()) };
(DecoderDataRef::Vec(vec_data), decoder_data)
} else {
//TODO: support memoryview (also update docs and get_temporary_byte_array_ref and the message below)
return Err(PyTypeError::new_err("Expected one of these types: bytes, bytearray"));
};
let decoder = iced_x86::Decoder::try_new(bitness, decoder_data, options).map_err(to_value_error)?;
Ok(Decoder { data_ref, decoder })
}
/// int: (``u64``) The current ``IP``/``EIP``/``RIP`` value, see also :class:`Decoder.position`
///
/// Note:
/// The setter only updates the IP value, it does not change the data position, use the :class:`Decoder.position` setter to change the position.
#[getter]
fn ip(&self) -> u64 {
self.decoder.ip()
}
#[setter]
fn set_ip(&mut self, new_value: u64) {
self.decoder.set_ip(new_value);
}
/// int: Gets the bitness (16, 32 or 64)
#[getter]
fn bitness(&self) -> u32 {
self.decoder.bitness()
}
/// int: (``usize``) Gets the max value that can be written to :class:`Decoder.position`.
///
/// This is the size of the data that gets decoded to instructions and it's the length of the data that was passed to the constructor.
#[getter]
fn max_position(&self) -> usize {
self.decoder.max_position()
}
/// int: (``usize``) The current data position, which is the index into the data passed to the constructor.
///
/// This value is always <= :class:`Decoder.max_position`. When :class:`Decoder.position` == :class:`Decoder.max_position`, it's not possible to decode more
/// instructions and :class:`Decoder.can_decode` returns ``False``.
///
/// Raises:
/// ValueError: If the new position is invalid.
///
/// Examples:
///
/// .. testcode::
///
/// from iced_x86 import *
///
/// # nop and pause
/// data = b"\x90\xF3\x90"
/// decoder = Decoder(64, data)
/// decoder.ip = 0x1234_5678
///
/// assert decoder.position == 0
/// assert decoder.max_position == 3
/// instr = decoder.decode()
/// assert decoder.position == 1
/// assert instr.code == Code.NOPD
///
/// instr = decoder.decode()
/// assert decoder.position == 3
/// assert instr.code == Code.PAUSE
///
/// # Start all over again
/// decoder.position = 0
/// decoder.ip = 0x1234_5678
/// assert decoder.position == 0
/// assert decoder.decode().code == Code.NOPD
/// assert decoder.decode().code == Code.PAUSE
/// assert decoder.position == 3
#[getter]
fn position(&self) -> usize {
self.decoder.position()
}
#[setter]
fn set_position(&mut self, new_value: usize) -> PyResult<()> {
self.decoder.try_set_position(new_value).map_err(to_value_error)
}
/// bool: Returns ``True`` if there's at least one more byte to decode.
///
/// It doesn't verify that the next instruction is valid, it only checks if there's
/// at least one more byte to read. See also :class:`Decoder.position` and :class:`Decoder.max_position`.
///
/// It's not required to call this method. If this method returns ``False``, then :class:`Decoder.decode_out`
/// and :class:`Decoder.decode` will return an instruction whose :class:`Instruction.code` == :class:`Code.INVALID`.
///
/// Examples:
///
/// .. testcode::
///
/// from iced_x86 import *
///
/// # nop and an incomplete instruction
/// data = b"\x90\xF3\x0F"
/// decoder = Decoder(64, data)
/// decoder.ip = 0x1234_5678
///
/// # 3 bytes left to read
/// assert decoder.can_decode
/// instr = decoder.decode()
/// assert instr.code == Code.NOPD
///
/// # 2 bytes left to read
/// assert decoder.can_decode
/// instr = decoder.decode()
/// # Not enough bytes left to decode a full instruction
/// assert decoder.last_error == DecoderError.NO_MORE_BYTES
/// assert instr.code == Code.INVALID
/// assert not instr
/// assert instr.is_invalid
///
/// # 0 bytes left to read
/// assert not decoder.can_decode
#[getter]
fn can_decode(&self) -> bool {
self.decoder.can_decode()
}
/// :class:`DecoderError`: Gets the last decoder error (a :class:`DecoderError` enum value).
///
/// Unless you need to know the reason it failed, it's better to check :class:`Instruction.is_invalid` or ``if not instruction:``.
#[getter]
fn last_error(&self) -> u32 {
self.decoder.last_error() as u32
}
/// Decodes and returns the next instruction.
///
/// See also :class:`Decoder.decode_out` which avoids copying the decoded instruction to the caller's return variable.
/// See also :class:`Decoder.last_error`.
///
/// Returns:
/// Instruction: The next instruction
///
/// Examples:
///
/// .. testcode::
///
/// from iced_x86 import *
///
/// # xrelease lock add [rax],ebx
/// data = b"\xF0\xF3\x01\x18"
/// decoder = Decoder(64, data)
/// decoder.ip = 0x1234_5678
/// instr = decoder.decode()
///
/// assert instr.code == Code.ADD_RM32_R32
/// assert instr.mnemonic == Mnemonic.ADD
/// assert instr.len == 4
/// assert instr.op_count == 2
///
/// assert instr.op0_kind == OpKind.MEMORY
/// assert instr.memory_base == Register.RAX
/// assert instr.memory_index == Register.NONE
/// assert instr.memory_index_scale == 1
/// assert instr.memory_displacement == 0
/// assert instr.memory_segment == Register.DS
/// assert instr.segment_prefix == Register.NONE
/// assert instr.memory_size == MemorySize.UINT32
///
/// assert instr.op1_kind == OpKind.REGISTER
/// assert instr.op1_register == Register.EBX
///
/// assert instr.has_lock_prefix
/// assert instr.has_xrelease_prefix
#[text_signature = "($self, /)"]
fn decode(&mut self) -> Instruction {
Instruction { instr: self.decoder.decode() }
}
/// Decodes the next instruction.
///
/// The difference between this method and :class:`Decoder.decode` is that this method doesn't need to
/// allocate a new instruction since it overwrites the input instruction.
///
/// See also :class:`Decoder.last_error`.
///
/// Args:
/// instruction (:class:`Instruction`): Updated with the decoded instruction.
///
/// Examples:
///
/// .. testcode::
///
/// from iced_x86 import *
///
/// # xrelease lock add [rax],ebx
/// data = b"\xF0\xF3\x01\x18"
/// decoder = Decoder(64, data)
/// decoder.ip = 0x1234_5678
/// instr = Instruction()
/// decoder.decode_out(instr)
///
/// assert instr.code == Code.ADD_RM32_R32
/// assert instr.mnemonic == Mnemonic.ADD
/// assert instr.len == 4
/// assert instr.op_count == 2
///
/// assert instr.op0_kind == OpKind.MEMORY
/// assert instr.memory_base == Register.RAX
/// assert instr.memory_index == Register.NONE
/// assert instr.memory_index_scale == 1
/// assert instr.memory_displacement == 0
/// assert instr.memory_segment == Register.DS
/// assert instr.segment_prefix == Register.NONE
/// assert instr.memory_size == MemorySize.UINT32
///
/// assert instr.op1_kind == OpKind.REGISTER
/// assert instr.op1_register == Register.EBX
///
/// assert instr.has_lock_prefix
/// assert instr.has_xrelease_prefix
#[text_signature = "($self, instruction, /)"]
fn decode_out(&mut self, instruction: &mut Instruction) {
self.decoder.decode_out(&mut instruction.instr)
}
/// Gets the offsets of the constants (memory displacement and immediate) in the decoded instruction.
///
/// The caller can check if there are any relocations at those addresses.
///
/// Args:
/// instruction (:class:`Instruction`): The latest instruction that was decoded by this decoder
///
/// Returns:
/// ConstantOffsets: Offsets and sizes of immediates
///
/// Examples:
///
/// .. testcode::
///
/// from iced_x86 import *
///
/// # nop
/// # xor dword ptr [rax-5AA5EDCCh],5Ah
/// # 00 01 02 03 04 05 06
/// # \opc\mrm\displacement___\imm
/// data = b"\x90\x83\xB3\x34\x12\x5A\xA5\x5A"
/// decoder = Decoder(64, data)
/// decoder.ip = 0x1234_5678
/// assert decoder.decode().code == Code.NOPD
/// instr = decoder.decode()
/// co = decoder.get_constant_offsets(instr)
///
/// assert co.has_displacement
/// assert co.displacement_offset == 2
/// assert co.displacement_size == 4
/// assert co.has_immediate
/// assert co.immediate_offset == 6
/// assert co.immediate_size == 1
/// # It's not an instruction with two immediates (e.g. enter)
/// assert not co.has_immediate2
/// assert co.immediate_offset2 == 0
/// assert co.immediate_size2 == 0
#[text_signature = "($self, instruction, /)"]
fn get_constant_offsets(&self, instruction: &Instruction) -> ConstantOffsets {
ConstantOffsets { offsets: self.decoder.get_constant_offsets(&instruction.instr) }
}
}
#[pyproto]
impl PyGCProtocol for Decoder {
fn __traverse__(&self, visit: PyVisit) -> Result<(), PyTraverseError> {
if let DecoderDataRef::PyObj(ref data_obj) = self.data_ref {
visit.call(data_obj)?
}
Ok(())
}
fn __clear__(&mut self) {
if let DecoderDataRef::PyObj(_) = self.data_ref {
self.data_ref = DecoderDataRef::None;
}
}
}
#[pyproto]
impl PyIterProtocol for Decoder {
fn __iter__(slf: PyRef<Self>) -> PyRef<Self> {
slf
}
fn __next__(mut slf: PyRefMut<Self>) -> IterNextOutput<Instruction, ()> {
if slf.decoder.can_decode() {
IterNextOutput::Yield(slf.decode())
} else {
IterNextOutput::Return(())
}
}
}
| 34.178971 | 175 | 0.651394 |
0ead64e388a304b49bd997de9c6da159d72fd168 | 1,570 | // Copyright 2020 Ant Financial. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
#[macro_use]
pub mod error;
pub use error::*;
pub mod exec;
pub use exec::*;
#[macro_use]
extern crate log;
#[cfg(feature = "fusedev")]
pub mod fuse;
#[cfg(feature = "fusedev")]
pub use self::fuse::{FuseChannel, FuseSession};
pub mod signal;
pub fn log_level_to_verbosity(level: log::LevelFilter) -> usize {
level as usize - 1
}
pub fn div_round_up(n: u64, d: u64) -> u64 {
(n + d - 1) / d
}
pub fn round_up_4k(x: u64) -> Option<u64> {
((x - 1) | 4095u64).checked_add(1)
}
pub fn round_down_4k(x: u64) -> u64 {
x & (!4095u64)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_rounders() {
assert_eq!(round_down_4k(100), 0);
assert_eq!(round_down_4k(4300), 4096);
assert_eq!(round_down_4k(4096), 4096);
assert_eq!(round_down_4k(4095), 0);
assert_eq!(round_down_4k(4097), 4096);
assert_eq!(round_down_4k(u64::MAX - 1), u64::MAX - 4095);
assert_eq!(round_down_4k(u64::MAX - 4095), u64::MAX - 4095);
assert_eq!(round_down_4k(0), 0);
assert_eq!(round_up_4k(100), Some(4096));
assert_eq!(round_up_4k(4100), Some(8192));
assert_eq!(round_up_4k(4096), Some(4096));
assert_eq!(round_up_4k(4095), Some(4096));
assert_eq!(round_up_4k(4097), Some(8192));
assert_eq!(round_up_4k(u64::MAX - 1), None);
assert_eq!(round_up_4k(u64::MAX), None);
assert_eq!(round_up_4k(u64::MAX - 4096), Some(u64::MAX - 4095));
}
}
| 26.166667 | 72 | 0.617834 |
0afb83546003a19e29d068f1f391a34a776c7baf | 25,991 | //! The `Window` struct and associated types.
use std::fmt;
use crate::{
dpi::{LogicalPosition, LogicalSize},
error::{ExternalError, NotSupportedError, OsError},
event_loop::EventLoopWindowTarget,
monitor::{AvailableMonitorsIter, MonitorHandle, VideoMode},
platform_impl,
};
pub use crate::icon::*;
/// Represents a window.
///
/// # Example
///
/// ```no_run
/// use winit::{
/// event::{Event, WindowEvent},
/// event_loop::{ControlFlow, EventLoop},
/// window::Window,
/// };
///
/// let mut event_loop = EventLoop::new();
/// let window = Window::new(&event_loop).unwrap();
///
/// event_loop.run(move |event, _, control_flow| {
/// match event {
/// Event::WindowEvent {
/// event: WindowEvent::CloseRequested,
/// ..
/// } => *control_flow = ControlFlow::Exit,
/// _ => *control_flow = ControlFlow::Wait,
/// }
/// });
/// ```
pub struct Window {
pub(crate) window: platform_impl::Window,
}
impl fmt::Debug for Window {
fn fmt(&self, fmtr: &mut fmt::Formatter<'_>) -> fmt::Result {
fmtr.pad("Window { .. }")
}
}
impl Drop for Window {
fn drop(&mut self) {
// If the window is in exclusive fullscreen, we must restore the desktop
// video mode (generally this would be done on application exit, but
// closing the window doesn't necessarily always mean application exit,
// such as when there are multiple windows)
if let Some(Fullscreen::Exclusive(_)) = self.fullscreen() {
self.set_fullscreen(None);
}
}
}
/// Identifier of a window. Unique for each window.
///
/// Can be obtained with `window.id()`.
///
/// Whenever you receive an event specific to a window, this event contains a `WindowId` which you
/// can then compare to the ids of your windows.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct WindowId(pub(crate) platform_impl::WindowId);
impl WindowId {
/// Returns a dummy `WindowId`, useful for unit testing. The only guarantee made about the return
/// value of this function is that it will always be equal to itself and to future values returned
/// by this function. No other guarantees are made. This may be equal to a real `WindowId`.
///
/// **Passing this into a winit function will result in undefined behavior.**
pub unsafe fn dummy() -> Self {
WindowId(platform_impl::WindowId::dummy())
}
}
/// Object that allows you to build windows.
#[derive(Clone)]
pub struct WindowBuilder {
/// The attributes to use to create the window.
pub window: WindowAttributes,
// Platform-specific configuration.
pub(crate) platform_specific: platform_impl::PlatformSpecificWindowBuilderAttributes,
}
impl fmt::Debug for WindowBuilder {
fn fmt(&self, fmtr: &mut fmt::Formatter<'_>) -> fmt::Result {
fmtr.debug_struct("WindowBuilder")
.field("window", &self.window)
.finish()
}
}
/// Attributes to use when creating a window.
#[derive(Debug, Clone)]
pub struct WindowAttributes {
/// The dimensions of the window. If this is `None`, some platform-specific dimensions will be
/// used.
///
/// The default is `None`.
pub inner_size: Option<LogicalSize>,
/// The minimum dimensions a window can be, If this is `None`, the window will have no minimum dimensions (aside from reserved).
///
/// The default is `None`.
pub min_inner_size: Option<LogicalSize>,
/// The maximum dimensions a window can be, If this is `None`, the maximum will have no maximum or will be set to the primary monitor's dimensions by the platform.
///
/// The default is `None`.
pub max_inner_size: Option<LogicalSize>,
/// Whether the window is resizable or not.
///
/// The default is `true`.
pub resizable: bool,
/// Whether the window should be set as fullscreen upon creation.
///
/// The default is `None`.
pub fullscreen: Option<Fullscreen>,
/// The title of the window in the title bar.
///
/// The default is `"winit window"`.
pub title: String,
/// Whether the window should be maximized upon creation.
///
/// The default is `false`.
pub maximized: bool,
/// Whether the window should be immediately visible upon creation.
///
/// The default is `true`.
pub visible: bool,
/// Whether the the window should be transparent. If this is true, writing colors
/// with alpha values different than `1.0` will produce a transparent window.
///
/// The default is `false`.
pub transparent: bool,
/// Whether the window should have borders and bars.
///
/// The default is `true`.
pub decorations: bool,
/// Whether the window should always be on top of other windows.
///
/// The default is `false`.
pub always_on_top: bool,
/// The window icon.
///
/// The default is `None`.
pub window_icon: Option<Icon>,
}
impl Default for WindowAttributes {
#[inline]
fn default() -> WindowAttributes {
WindowAttributes {
inner_size: None,
min_inner_size: None,
max_inner_size: None,
resizable: true,
title: "winit window".to_owned(),
maximized: false,
fullscreen: None,
visible: true,
transparent: false,
decorations: true,
always_on_top: false,
window_icon: None,
}
}
}
impl WindowBuilder {
/// Initializes a new `WindowBuilder` with default values.
#[inline]
pub fn new() -> WindowBuilder {
WindowBuilder {
window: Default::default(),
platform_specific: Default::default(),
}
}
/// Requests the window to be of specific dimensions.
#[inline]
pub fn with_inner_size(mut self, size: LogicalSize) -> WindowBuilder {
self.window.inner_size = Some(size);
self
}
/// Sets a minimum dimension size for the window
#[inline]
pub fn with_min_inner_size(mut self, min_size: LogicalSize) -> WindowBuilder {
self.window.min_inner_size = Some(min_size);
self
}
/// Sets a maximum dimension size for the window
#[inline]
pub fn with_max_inner_size(mut self, max_size: LogicalSize) -> WindowBuilder {
self.window.max_inner_size = Some(max_size);
self
}
/// Sets whether the window is resizable or not
///
/// Note that making the window unresizable doesn't exempt you from handling `Resized`, as that event can still be
/// triggered by DPI scaling, entering fullscreen mode, etc.
///
/// ## Platform-specific
///
/// This only has an effect on desktop platforms.
///
/// Due to a bug in XFCE, this has no effect on Xfwm.
#[inline]
pub fn with_resizable(mut self, resizable: bool) -> WindowBuilder {
self.window.resizable = resizable;
self
}
/// Requests a specific title for the window.
#[inline]
pub fn with_title<T: Into<String>>(mut self, title: T) -> WindowBuilder {
self.window.title = title.into();
self
}
/// Sets the window fullscreen state. None means a normal window, Some(Fullscreen)
/// means a fullscreen window on that specific monitor
///
/// ## Platform-specific
///
/// - **Windows:** Screen saver is disabled in fullscreen mode.
#[inline]
pub fn with_fullscreen(mut self, monitor: Option<Fullscreen>) -> WindowBuilder {
self.window.fullscreen = monitor;
self
}
/// Requests maximized mode.
#[inline]
pub fn with_maximized(mut self, maximized: bool) -> WindowBuilder {
self.window.maximized = maximized;
self
}
/// Sets whether the window will be initially hidden or visible.
#[inline]
pub fn with_visible(mut self, visible: bool) -> WindowBuilder {
self.window.visible = visible;
self
}
/// Sets whether the background of the window should be transparent.
#[inline]
pub fn with_transparent(mut self, transparent: bool) -> WindowBuilder {
self.window.transparent = transparent;
self
}
/// Sets whether the window should have a border, a title bar, etc.
#[inline]
pub fn with_decorations(mut self, decorations: bool) -> WindowBuilder {
self.window.decorations = decorations;
self
}
/// Sets whether or not the window will always be on top of other windows.
#[inline]
pub fn with_always_on_top(mut self, always_on_top: bool) -> WindowBuilder {
self.window.always_on_top = always_on_top;
self
}
/// Sets the window icon. On Windows and X11, this is typically the small icon in the top-left
/// corner of the titlebar.
///
/// ## Platform-specific
///
/// This only has an effect on Windows and X11.
///
/// On Windows, this sets `ICON_SMALL`. The base size for a window icon is 16x16, but it's
/// recommended to account for screen scaling and pick a multiple of that, i.e. 32x32.
///
/// X11 has no universal guidelines for icon sizes, so you're at the whims of the WM. That
/// said, it's usually in the same ballpark as on Windows.
#[inline]
pub fn with_window_icon(mut self, window_icon: Option<Icon>) -> WindowBuilder {
self.window.window_icon = window_icon;
self
}
/// Builds the window.
///
/// Possible causes of error include denied permission, incompatible system, and lack of memory.
#[inline]
pub fn build<T: 'static>(
self,
window_target: &EventLoopWindowTarget<T>,
) -> Result<Window, OsError> {
platform_impl::Window::new(&window_target.p, self.window, self.platform_specific)
.map(|window| Window { window })
}
}
/// Base Window functions.
impl Window {
/// Creates a new Window for platforms where this is appropriate.
///
/// This function is equivalent to `WindowBuilder::new().build(event_loop)`.
///
/// Error should be very rare and only occur in case of permission denied, incompatible system,
/// out of memory, etc.
#[inline]
pub fn new<T: 'static>(event_loop: &EventLoopWindowTarget<T>) -> Result<Window, OsError> {
let builder = WindowBuilder::new();
builder.build(event_loop)
}
/// Returns an identifier unique to the window.
#[inline]
pub fn id(&self) -> WindowId {
WindowId(self.window.id())
}
/// Returns the DPI factor that can be used to map logical pixels to physical pixels, and vice versa.
///
/// See the [`dpi`](../dpi/index.html) module for more information.
///
/// Note that this value can change depending on user action (for example if the window is
/// moved to another screen); as such, tracking `WindowEvent::HiDpiFactorChanged` events is
/// the most robust way to track the DPI you need to use to draw.
///
/// ## Platform-specific
///
/// - **X11:** This respects Xft.dpi, and can be overridden using the `WINIT_HIDPI_FACTOR` environment variable.
/// - **Android:** Always returns 1.0.
/// - **iOS:** Can only be called on the main thread. Returns the underlying `UIView`'s
/// [`contentScaleFactor`].
///
/// [`contentScaleFactor`]: https://developer.apple.com/documentation/uikit/uiview/1622657-contentscalefactor?language=objc
#[inline]
pub fn hidpi_factor(&self) -> f64 {
self.window.hidpi_factor()
}
/// Emits a `WindowEvent::RedrawRequested` event in the associated event loop after all OS
/// events have been processed by the event loop.
///
/// This is the **strongly encouraged** method of redrawing windows, as it can integrate with
/// OS-requested redraws (e.g. when a window gets resized).
///
/// This function can cause `RedrawRequested` events to be emitted after `Event::EventsCleared`
/// but before `Event::NewEvents` if called in the following circumstances:
/// * While processing `EventsCleared`.
/// * While processing a `RedrawRequested` event that was sent during `EventsCleared` or any
/// directly subsequent `RedrawRequested` event.
///
/// ## Platform-specific
///
/// - **iOS:** Can only be called on the main thread.
#[inline]
pub fn request_redraw(&self) {
self.window.request_redraw()
}
}
/// Position and size functions.
impl Window {
/// Returns the position of the top-left hand corner of the window's client area relative to the
/// top-left hand corner of the desktop.
///
/// The same conditions that apply to `outer_position` apply to this method.
///
/// ## Platform-specific
///
/// - **iOS:** Can only be called on the main thread. Returns the top left coordinates of the
/// window's [safe area] in the screen space coordinate system.
///
/// [safe area]: https://developer.apple.com/documentation/uikit/uiview/2891103-safeareainsets?language=objc
#[inline]
pub fn inner_position(&self) -> Result<LogicalPosition, NotSupportedError> {
self.window.inner_position()
}
/// Returns the position of the top-left hand corner of the window relative to the
/// top-left hand corner of the desktop.
///
/// Note that the top-left hand corner of the desktop is not necessarily the same as
/// the screen. If the user uses a desktop with multiple monitors, the top-left hand corner
/// of the desktop is the top-left hand corner of the monitor at the top-left of the desktop.
///
/// The coordinates can be negative if the top-left hand corner of the window is outside
/// of the visible screen region.
///
/// ## Platform-specific
///
/// - **iOS:** Can only be called on the main thread. Returns the top left coordinates of the
/// window in the screen space coordinate system.
#[inline]
pub fn outer_position(&self) -> Result<LogicalPosition, NotSupportedError> {
self.window.outer_position()
}
/// Modifies the position of the window.
///
/// See `outer_position` for more information about the coordinates.
///
/// This is a no-op if the window has already been closed.
///
/// ## Platform-specific
///
/// - **iOS:** Can only be called on the main thread. Sets the top left coordinates of the
/// window in the screen space coordinate system.
#[inline]
pub fn set_outer_position(&self, position: LogicalPosition) {
self.window.set_outer_position(position)
}
/// Returns the logical size of the window's client area.
///
/// The client area is the content of the window, excluding the title bar and borders.
///
/// Converting the returned `LogicalSize` to `PhysicalSize` produces the size your framebuffer should be.
///
/// ## Platform-specific
///
/// - **iOS:** Can only be called on the main thread. Returns the `LogicalSize` of the window's
/// [safe area] in screen space coordinates.
///
/// [safe area]: https://developer.apple.com/documentation/uikit/uiview/2891103-safeareainsets?language=objc
#[inline]
pub fn inner_size(&self) -> LogicalSize {
self.window.inner_size()
}
/// Modifies the inner size of the window.
///
/// See `inner_size` for more information about the values.
///
/// ## Platform-specific
///
/// - **iOS:** Unimplemented. Currently this panics, as it's not clear what `set_inner_size`
/// would mean for iOS.
#[inline]
pub fn set_inner_size(&self, size: LogicalSize) {
self.window.set_inner_size(size)
}
/// Returns the logical size of the entire window.
///
/// These dimensions include the title bar and borders. If you don't want that (and you usually don't),
/// use `inner_size` instead.
///
/// ## Platform-specific
///
/// - **iOS:** Can only be called on the main thread. Returns the `LogicalSize` of the window in
/// screen space coordinates.
#[inline]
pub fn outer_size(&self) -> LogicalSize {
self.window.outer_size()
}
/// Sets a minimum dimension size for the window.
///
/// ## Platform-specific
///
/// - **iOS:** Has no effect.
#[inline]
pub fn set_min_inner_size(&self, dimensions: Option<LogicalSize>) {
self.window.set_min_inner_size(dimensions)
}
/// Sets a maximum dimension size for the window.
///
/// ## Platform-specific
///
/// - **iOS:** Has no effect.
#[inline]
pub fn set_max_inner_size(&self, dimensions: Option<LogicalSize>) {
self.window.set_max_inner_size(dimensions)
}
}
/// Misc. attribute functions.
impl Window {
/// Modifies the title of the window.
///
/// ## Platform-specific
///
/// - Has no effect on iOS.
#[inline]
pub fn set_title(&self, title: &str) {
self.window.set_title(title)
}
/// Modifies the window's visibility.
///
/// If `false`, this will hide the window. If `true`, this will show the window.
/// ## Platform-specific
///
/// - **Android:** Has no effect.
/// - **iOS:** Can only be called on the main thread.
#[inline]
pub fn set_visible(&self, visible: bool) {
self.window.set_visible(visible)
}
/// Sets whether the window is resizable or not.
///
/// Note that making the window unresizable doesn't exempt you from handling `Resized`, as that event can still be
/// triggered by DPI scaling, entering fullscreen mode, etc.
///
/// ## Platform-specific
///
/// This only has an effect on desktop platforms.
///
/// Due to a bug in XFCE, this has no effect on Xfwm.
///
/// ## Platform-specific
///
/// - **iOS:** Has no effect.
#[inline]
pub fn set_resizable(&self, resizable: bool) {
self.window.set_resizable(resizable)
}
/// Sets the window to maximized or back.
///
/// ## Platform-specific
///
/// - **iOS:** Has no effect.
#[inline]
pub fn set_maximized(&self, maximized: bool) {
self.window.set_maximized(maximized)
}
/// Sets the window to fullscreen or back.
///
/// ## Platform-specific
///
/// - **macOS:** `Fullscreen::Exclusive` provides true exclusive mode with a
/// video mode change. *Caveat!* macOS doesn't provide task switching (or
/// spaces!) while in exclusive fullscreen mode. This mode should be used
/// when a video mode change is desired, but for a better user experience,
/// borderless fullscreen might be preferred.
///
/// `Fullscreen::Borderless` provides a borderless fullscreen window on a
/// separate space. This is the idiomatic way for fullscreen games to work
/// on macOS. See [`WindowExtMacOs::set_simple_fullscreen`][simple] if
/// separate spaces are not preferred.
///
/// The dock and the menu bar are always disabled in fullscreen mode.
/// - **iOS:** Can only be called on the main thread.
/// - **Wayland:** Does not support exclusive fullscreen mode.
/// - **Windows:** Screen saver is disabled in fullscreen mode.
///
/// [simple]:
/// ../platform/macos/trait.WindowExtMacOS.html#tymethod.set_simple_fullscreen
#[inline]
pub fn set_fullscreen(&self, fullscreen: Option<Fullscreen>) {
self.window.set_fullscreen(fullscreen)
}
/// Gets the window's current fullscreen state.
///
/// ## Platform-specific
///
/// - **iOS:** Can only be called on the main thread.
#[inline]
pub fn fullscreen(&self) -> Option<Fullscreen> {
self.window.fullscreen()
}
/// Turn window decorations on or off.
///
/// ## Platform-specific
///
/// - **iOS:** Has no effect.
#[inline]
pub fn set_decorations(&self, decorations: bool) {
self.window.set_decorations(decorations)
}
/// Change whether or not the window will always be on top of other windows.
///
/// ## Platform-specific
///
/// - **iOS:** Has no effect.
#[inline]
pub fn set_always_on_top(&self, always_on_top: bool) {
self.window.set_always_on_top(always_on_top)
}
/// Sets the window icon. On Windows and X11, this is typically the small icon in the top-left
/// corner of the titlebar.
///
/// For more usage notes, see `WindowBuilder::with_window_icon`.
///
/// ## Platform-specific
///
/// This only has an effect on Windows and X11.
#[inline]
pub fn set_window_icon(&self, window_icon: Option<Icon>) {
self.window.set_window_icon(window_icon)
}
/// Sets location of IME candidate box in client area coordinates relative to the top left.
///
/// ## Platform-specific
///
/// **iOS:** Has no effect.
#[inline]
pub fn set_ime_position(&self, position: LogicalPosition) {
self.window.set_ime_position(position)
}
}
/// Cursor functions.
impl Window {
/// Modifies the cursor icon of the window.
///
/// ## Platform-specific
///
/// - **iOS:** Has no effect.
/// - **Android:** Has no effect.
#[inline]
pub fn set_cursor_icon(&self, cursor: CursorIcon) {
self.window.set_cursor_icon(cursor);
}
/// Changes the position of the cursor in window coordinates.
///
/// ## Platform-specific
///
/// - **iOS:** Always returns an `Err`.
#[inline]
pub fn set_cursor_position(&self, position: LogicalPosition) -> Result<(), ExternalError> {
self.window.set_cursor_position(position)
}
/// Grabs the cursor, preventing it from leaving the window.
///
/// ## Platform-specific
///
/// - **macOS:** This presently merely locks the cursor in a fixed location, which looks visually
/// awkward.
/// - **Android:** Has no effect.
/// - **iOS:** Always returns an Err.
#[inline]
pub fn set_cursor_grab(&self, grab: bool) -> Result<(), ExternalError> {
self.window.set_cursor_grab(grab)
}
/// Modifies the cursor's visibility.
///
/// If `false`, this will hide the cursor. If `true`, this will show the cursor.
///
/// ## Platform-specific
///
/// - **Windows:** The cursor is only hidden within the confines of the window.
/// - **X11:** The cursor is only hidden within the confines of the window.
/// - **macOS:** The cursor is hidden as long as the window has input focus, even if the cursor is
/// outside of the window.
/// - **iOS:** Has no effect.
/// - **Android:** Has no effect.
#[inline]
pub fn set_cursor_visible(&self, visible: bool) {
self.window.set_cursor_visible(visible)
}
}
/// Monitor info functions.
impl Window {
/// Returns the monitor on which the window currently resides
///
/// ## Platform-specific
///
/// **iOS:** Can only be called on the main thread.
#[inline]
pub fn current_monitor(&self) -> MonitorHandle {
self.window.current_monitor()
}
/// Returns the list of all the monitors available on the system.
///
/// This is the same as `EventLoop::available_monitors`, and is provided for convenience.
///
/// ## Platform-specific
///
/// **iOS:** Can only be called on the main thread.
#[inline]
pub fn available_monitors(&self) -> AvailableMonitorsIter {
let data = self.window.available_monitors();
AvailableMonitorsIter {
data: data.into_iter(),
}
}
/// Returns the primary monitor of the system.
///
/// This is the same as `EventLoop::primary_monitor`, and is provided for convenience.
///
/// ## Platform-specific
///
/// **iOS:** Can only be called on the main thread.
#[inline]
pub fn primary_monitor(&self) -> MonitorHandle {
MonitorHandle {
inner: self.window.primary_monitor(),
}
}
}
unsafe impl raw_window_handle::HasRawWindowHandle for Window {
fn raw_window_handle(&self) -> raw_window_handle::RawWindowHandle {
self.window.raw_window_handle()
}
}
/// Describes the appearance of the mouse cursor.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum CursorIcon {
/// The platform-dependent default cursor.
Default,
/// A simple crosshair.
Crosshair,
/// A hand (often used to indicate links in web browsers).
Hand,
/// Self explanatory.
Arrow,
/// Indicates something is to be moved.
Move,
/// Indicates text that may be selected or edited.
Text,
/// Program busy indicator.
Wait,
/// Help indicator (often rendered as a "?")
Help,
/// Progress indicator. Shows that processing is being done. But in contrast
/// with "Wait" the user may still interact with the program. Often rendered
/// as a spinning beach ball, or an arrow with a watch or hourglass.
Progress,
/// Cursor showing that something cannot be done.
NotAllowed,
ContextMenu,
Cell,
VerticalText,
Alias,
Copy,
NoDrop,
Grab,
Grabbing,
AllScroll,
ZoomIn,
ZoomOut,
/// Indicate that some edge is to be moved. For example, the 'SeResize' cursor
/// is used when the movement starts from the south-east corner of the box.
EResize,
NResize,
NeResize,
NwResize,
SResize,
SeResize,
SwResize,
WResize,
EwResize,
NsResize,
NeswResize,
NwseResize,
ColResize,
RowResize,
}
impl Default for CursorIcon {
fn default() -> Self {
CursorIcon::Default
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum Fullscreen {
Exclusive(VideoMode),
Borderless(MonitorHandle),
}
| 32.570175 | 167 | 0.626255 |
bbd230fd4a56a60655c448e3f38ab81ddbc5ac75 | 113,744 | // Copyright Materialize, Inc. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
use std::cmp::max;
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::fmt;
use std::io::Read;
use std::{cell::RefCell, iter, rc::Rc};
use anyhow::{anyhow, bail};
use byteorder::{BigEndian, ByteOrder, NetworkEndian, WriteBytesExt};
use chrono::format::{DelayedFormat, StrftimeItems};
use chrono::{NaiveDateTime, Timelike};
use itertools::Itertools;
use lazy_static::lazy_static;
use log::{debug, error, info, warn};
use serde::{Deserialize, Serialize};
use serde_json::json;
use sha2::Sha256;
use uuid::Uuid;
use mz_avro::schema::{
resolve_schemas, RecordField, Schema, SchemaFingerprint, SchemaNode, SchemaPiece,
SchemaPieceOrNamed,
};
use mz_avro::{
define_unexpected,
error::{DecodeError, Error as AvroError},
give_value,
types::{DecimalValue, Scalar, Value},
AvroArrayAccess, AvroDecode, AvroDeserializer, AvroRead, AvroRecordAccess, GeneralDeserializer,
StatefulAvroDecodeable, TrivialDecoder, ValueDecoder, ValueOrReader,
};
use repr::adt::decimal::{Significand, MAX_DECIMAL_PRECISION};
use repr::adt::jsonb::{JsonbPacker, JsonbRef};
use repr::{ColumnName, ColumnType, Datum, RelationDesc, Row, RowPacker, ScalarType};
use ordered_float::OrderedFloat;
use smallvec::SmallVec;
lazy_static! {
// TODO(rkhaitan): this schema intentionally omits the data_collections field
// that is typically present in Debezium transaction metadata topics. See
// https://debezium.io/documentation/reference/connectors/postgresql.html#postgresql-transaction-metadata
// for more information. We chose to omit this field because it is redundant
// for sinks where each consistency topic corresponds to exactly one sink.
// We will need to add it in order to be able to reingest sinked topics.
static ref DEBEZIUM_TRANSACTION_SCHEMA: Schema =
Schema::parse(&json!({
"type": "record",
"name": "envelope",
"fields": [
{
"name": "id",
"type": "string"
},
{
"name": "status",
"type": "string"
},
{
"name": "event_count",
"type": [
"null",
"long"
]
}
]
})).expect("valid schema constructed");
}
/// Validates an Avro key schema for use as a source.
///
/// An Avro key schema is valid for our purposes iff every field
/// mentioned in the key schema exists in the specified relation
/// type with the same type. If the schema is valid, returns a
/// vector describing the order and position of the primary key
/// columns.
pub fn validate_key_schema(
key_schema: &str,
value_desc: &RelationDesc,
) -> anyhow::Result<Vec<usize>> {
let key_schema = parse_schema(key_schema)?;
let key_desc = validate_schema_1(key_schema.top_node())?;
let mut indices = Vec::new();
for (name, key_type) in key_desc.iter() {
match value_desc.get_by_name(name) {
Some((index, value_type)) if key_type == value_type => {
indices.push(index);
}
Some((_, value_type)) => bail!(
"key and value column types do not match: key {:?} vs. value {:?}",
key_type,
value_type,
),
None => bail!("Value schema missing primary key column: {}", name),
}
}
Ok(indices)
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum EnvelopeType {
None,
Debezium,
Upsert,
CdcV2,
}
// See https://rusanu.com/2012/01/17/what-is-an-lsn-log-sequence-number/
#[derive(Debug, Copy, Clone)]
struct MSSqlLsn {
file_seq_num: u32,
log_block_offset: u32,
slot_num: u16,
}
#[derive(Debug, Copy, Clone)]
enum RowCoordinates {
MySql {
pos: usize,
row: usize,
},
Postgres {
lsn: usize,
},
MSSql {
change_lsn: MSSqlLsn,
event_serial_no: usize,
},
Unknown,
}
#[derive(Debug)]
pub struct DebeziumSourceCoordinates {
snapshot: bool,
row: RowCoordinates,
}
struct DebeziumSourceDecoder<'a> {
file_buf: &'a mut Vec<u8>,
}
struct AvroStringDecoder<'a> {
pub buf: &'a mut Vec<u8>,
}
impl<'a> AvroStringDecoder<'a> {
pub fn with_buf(buf: &'a mut Vec<u8>) -> Self {
Self { buf }
}
}
impl<'a> AvroDecode for AvroStringDecoder<'a> {
type Out = ();
fn string<'b, R: AvroRead>(
self,
r: ValueOrReader<'b, &'b str, R>,
) -> Result<Self::Out, AvroError> {
match r {
ValueOrReader::Value(val) => {
self.buf.resize_with(val.len(), Default::default);
val.as_bytes().read_exact(self.buf)?;
}
ValueOrReader::Reader { len, r } => {
self.buf.resize_with(len, Default::default);
r.read_exact(self.buf)?;
}
}
Ok(())
}
define_unexpected! {
record, union_branch, array, map, enum_variant, scalar, decimal, bytes, json, uuid, fixed
}
}
#[derive(Debug, PartialEq, Eq)]
enum DbzSnapshot {
True,
Last,
False,
}
struct AvroDbzSnapshotDecoder;
impl AvroDecode for AvroDbzSnapshotDecoder {
type Out = Option<DbzSnapshot>;
fn union_branch<'a, R: AvroRead, D: AvroDeserializer>(
self,
_idx: usize,
_n_variants: usize,
_null_variant: Option<usize>,
deserializer: D,
reader: &'a mut R,
) -> Result<Self::Out, AvroError> {
deserializer.deserialize(reader, self)
}
fn scalar(self, scalar: Scalar) -> Result<Self::Out, AvroError> {
match scalar {
Scalar::Null => Ok(None),
Scalar::Boolean(val) => Ok(Some(if val {
DbzSnapshot::True
} else {
DbzSnapshot::False
})),
_ => {
Err(DecodeError::Custom("`snapshot` value had unexpected type".to_string()).into())
}
}
}
fn string<'a, R: AvroRead>(
self,
r: ValueOrReader<'a, &'a str, R>,
) -> Result<Self::Out, AvroError> {
let mut s = SmallVec::<[u8; 8]>::new();
let s = match r {
ValueOrReader::Value(val) => val.as_bytes(),
ValueOrReader::Reader { len, r } => {
s.resize_with(len, Default::default);
r.read_exact(&mut s)?;
&s
}
};
Ok(Some(match s {
b"true" => DbzSnapshot::True,
b"last" => DbzSnapshot::Last,
b"false" => DbzSnapshot::False,
_ => {
return Err(DecodeError::Custom(format!(
"`snapshot` had unexpected value {}",
String::from_utf8_lossy(s)
))
.into())
}
}))
}
define_unexpected! {
record, array, map, enum_variant, decimal, bytes, json, uuid, fixed
}
}
fn decode_change_lsn(input: &str) -> Option<MSSqlLsn> {
// SQL Server change LSNs are 10-byte integers. Debezium
// encodes them as hex, in the following format: xxxxxxxx:xxxxxxxx:xxxx
if input.len() != 22 {
return None;
}
if input.as_bytes()[8] != b':' || input.as_bytes()[17] != b':' {
return None;
}
let file_seq_num = u32::from_str_radix(&input[0..8], 16).ok()?;
let log_block_offset = u32::from_str_radix(&input[9..17], 16).ok()?;
let slot_num = u16::from_str_radix(&input[18..22], 16).ok()?;
Some(MSSqlLsn {
file_seq_num,
log_block_offset,
slot_num,
})
}
impl<'a> AvroDecode for DebeziumSourceDecoder<'a> {
type Out = DebeziumSourceCoordinates;
fn record<R: AvroRead, A: AvroRecordAccess<R>>(
self,
a: &mut A,
) -> Result<Self::Out, AvroError> {
let mut snapshot = false;
// Binlog file "pos" and "row" - present in MySQL sources.
let mut pos = None;
let mut row = None;
// "log sequence number" - monotonically increasing log offset in Postgres
let mut lsn = None;
// SQL Server lsn - 10-byte, hex-encoded value.
// and "event_serial_no" - serial number of the event, when there is more than one per LSN.
let mut change_lsn = None;
let mut event_serial_no = None;
let mut has_file = false;
while let Some((name, _, field)) = a.next_field()? {
match name {
"snapshot" => {
let d = AvroDbzSnapshotDecoder;
let maybe_snapshot = field.decode_field(d)?;
snapshot = match maybe_snapshot {
None | Some(DbzSnapshot::False) => false,
Some(DbzSnapshot::True) | Some(DbzSnapshot::Last) => true,
};
}
// MySQL
"pos" => {
let next = ValueDecoder;
let val = field.decode_field(next)?;
pos = Some(val.into_integral().ok_or_else(|| {
DecodeError::Custom("\"pos\" is not an integer".to_string())
})?);
}
"row" => {
let next = ValueDecoder;
let val = field.decode_field(next)?;
row = Some(val.into_integral().ok_or_else(|| {
DecodeError::Custom("\"row\" is not an integer".to_string())
})?);
}
"file" => {
let d = AvroStringDecoder::with_buf(self.file_buf);
field.decode_field(d)?;
has_file = true;
}
// Postgres
"lsn" => {
let next = ValueDecoder;
let val = field.decode_field(next)?;
let val = match val {
Value::Union { inner, .. } => *inner,
val => val,
};
lsn = Some(val.into_integral().ok_or_else(|| {
DecodeError::Custom("\"lsn\" is not an integer".to_string())
})?);
}
// SQL Server
"change_lsn" => {
let next = ValueDecoder;
let val = field.decode_field(next)?;
let val = match val {
Value::Union { inner, .. } => *inner,
val => val,
};
match val {
Value::Null => {}
Value::String(s) => {
if let Some(i) = decode_change_lsn(&s) {
change_lsn = Some(i);
} else {
return Err(AvroError::Decode(DecodeError::Custom(format!(
"Couldn't decode MS SQL LSN: {}",
s
))));
}
}
_ => {
return Err(AvroError::Decode(DecodeError::Custom(
"\"change_lsn\" is not a string".to_string(),
)))
}
}
}
"event_serial_no" => {
let next = ValueDecoder;
let val = field.decode_field(next)?;
let val = match val {
Value::Union { inner, .. } => *inner,
val => val,
};
event_serial_no = match val {
Value::Null => None,
Value::Int(i) => Some(i.into()),
Value::Long(i) => Some(i),
val => {
return Err(AvroError::Decode(DecodeError::Custom(format!(
"\"event_serial_no\" is not an integer: {:?}",
val
))))
}
};
}
_ => {
field.decode_field(TrivialDecoder)?;
}
}
}
let mysql_any = pos.is_some() || row.is_some() || has_file;
let pg_any = lsn.is_some();
let mssql_any = change_lsn.is_some() || event_serial_no.is_some();
if (mysql_any as usize) + (pg_any as usize) + (mssql_any as usize) > 1 {
return Err(DecodeError::Custom(
"Found source coordinate information for multiple databases - we don't know how to interpret this.".to_string()).into());
}
let row = if mysql_any {
let pos = pos.ok_or_else(|| DecodeError::Custom("no pos".to_string()))? as usize;
let row = row.ok_or_else(|| DecodeError::Custom("no row".to_string()))? as usize;
if !has_file {
return Err(DecodeError::Custom("no file".to_string()).into());
}
RowCoordinates::MySql { pos, row }
} else if pg_any {
let lsn = lsn.ok_or_else(|| DecodeError::Custom("no lsn".to_string()))? as usize;
RowCoordinates::Postgres { lsn }
} else if mssql_any {
let change_lsn =
change_lsn.ok_or_else(|| DecodeError::Custom("no change_lsn".to_string()))?;
let event_serial_no = event_serial_no
.ok_or_else(|| DecodeError::Custom("no event_serial_no".to_string()))?
as usize;
RowCoordinates::MSSql {
change_lsn,
event_serial_no,
}
} else {
RowCoordinates::Unknown
};
Ok(DebeziumSourceCoordinates { snapshot, row })
}
define_unexpected! {
union_branch, array, map, enum_variant, scalar, decimal, bytes, string, json, uuid, fixed
}
}
struct OptionalRowDecoder<'a> {
pub packer: &'a mut RowPacker,
pub buf: &'a mut Vec<u8>,
}
impl<'a> AvroDecode for OptionalRowDecoder<'a> {
type Out = bool;
fn union_branch<'b, R: AvroRead, D: AvroDeserializer>(
self,
idx: usize,
_n_variants: usize,
null_variant: Option<usize>,
deserializer: D,
reader: &'b mut R,
) -> Result<Self::Out, AvroError> {
if Some(idx) == null_variant {
// we are done, the row is null!
Ok(false)
} else {
let d = AvroFlatDecoder {
packer: self.packer,
buf: self.buf,
is_top: true,
};
deserializer.deserialize(reader, d)?;
Ok(true)
}
}
define_unexpected! {
record, array, map, enum_variant, scalar, decimal, bytes, string, json, uuid, fixed
}
}
#[derive(Debug)]
pub struct AvroDebeziumDecoder<'a> {
pub packer: &'a mut RowPacker,
pub buf: &'a mut Vec<u8>,
pub file_buf: &'a mut Vec<u8>,
}
impl<'a> AvroDecode for AvroDebeziumDecoder<'a> {
type Out = (DiffPair<Row>, Option<DebeziumSourceCoordinates>);
fn record<R: AvroRead, A: AvroRecordAccess<R>>(
self,
a: &mut A,
) -> Result<Self::Out, AvroError> {
let mut before = None;
let mut after = None;
let mut coords = None;
while let Some((name, _, field)) = a.next_field()? {
match name {
"before" => {
let d = OptionalRowDecoder {
packer: self.packer,
buf: self.buf,
};
let decoded_row = field.decode_field(d)?;
before = if decoded_row {
self.packer.push(Datum::Int64(-1));
Some(self.packer.finish_and_reuse())
} else {
None
}
}
"after" => {
let d = OptionalRowDecoder {
packer: self.packer,
buf: self.buf,
};
let decoded_row = field.decode_field(d)?;
after = if decoded_row {
self.packer.push(Datum::Int64(1));
Some(self.packer.finish_and_reuse())
} else {
None
}
}
"source" => {
let d = DebeziumSourceDecoder {
file_buf: self.file_buf,
};
coords = Some(field.decode_field(d)?);
}
_ => {
field.decode_field(TrivialDecoder)?;
}
}
}
Ok((DiffPair { before, after }, coords))
}
define_unexpected! {
union_branch, array, map, enum_variant, scalar, decimal, bytes, string, json, uuid, fixed
}
}
struct RowDecoder {
state: (Rc<RefCell<RowPacker>>, Rc<RefCell<Vec<u8>>>),
}
impl AvroDecode for RowDecoder {
type Out = RowWrapper;
fn record<R: AvroRead, A: AvroRecordAccess<R>>(
self,
a: &mut A,
) -> Result<Self::Out, AvroError> {
let mut packer_borrow = self.state.0.borrow_mut();
let mut buf_borrow = self.state.1.borrow_mut();
let inner = AvroFlatDecoder {
packer: &mut packer_borrow,
buf: &mut buf_borrow,
is_top: true,
};
inner.record(a)?;
let row = packer_borrow.finish_and_reuse();
Ok(RowWrapper(row))
}
define_unexpected! {
union_branch, array, map, enum_variant, scalar, decimal, bytes, string, json, uuid, fixed
}
}
// Get around orphan rule
#[derive(Debug)]
struct RowWrapper(Row);
impl StatefulAvroDecodeable for RowWrapper {
type Decoder = RowDecoder;
// TODO - can we make this some sort of &'a mut (RowPacker, Vec<u8>) without
// running into lifetime crap?
type State = (Rc<RefCell<RowPacker>>, Rc<RefCell<Vec<u8>>>);
fn new_decoder(state: Self::State) -> Self::Decoder {
Self::Decoder { state }
}
}
#[derive(Debug)]
pub struct AvroFlatDecoder<'a> {
pub packer: &'a mut RowPacker,
pub buf: &'a mut Vec<u8>,
pub is_top: bool,
}
impl<'a> AvroDecode for AvroFlatDecoder<'a> {
type Out = ();
#[inline]
fn record<R: AvroRead, A: AvroRecordAccess<R>>(
self,
a: &mut A,
) -> Result<Self::Out, AvroError> {
let mut str_buf = std::mem::take(self.buf);
let mut pack_record = |rp: &mut RowPacker| -> Result<(), AvroError> {
let mut expected = 0;
let mut stash = vec![];
// The idea here is that if the deserializer gives us fields in the order we're expecting,
// we can decode them directly into the row.
// If not, we need to decode them into a Value (the old, slow decoding path) and stash them,
// so that we can put everything in the right order at the end.
//
// TODO(btv) - this is pretty bad, as a misordering at the top of the schema graph will
// cause the _entire_ chunk under it to be decoded in the slow way!
// Maybe instead, we should decode to separate sub-RowPackers and then add an API
// to RowPacker that just copies in the bytes from another one.
while let Some((_name, idx, f)) = a.next_field()? {
let dec = AvroFlatDecoder {
packer: rp,
buf: &mut str_buf,
is_top: false,
};
if idx == expected {
expected += 1;
f.decode_field(dec)?;
} else {
let next = ValueDecoder;
let val = f.decode_field(next)?;
stash.push((idx, val));
}
}
stash.sort_by_key(|(idx, _val)| *idx);
for (idx, val) in stash {
assert!(idx == expected);
expected += 1;
let dec = AvroFlatDecoder {
packer: rp,
buf: &mut str_buf,
is_top: false,
};
give_value(dec, &val)?;
}
Ok(())
};
if self.is_top {
pack_record(self.packer)?;
} else {
self.packer.push_list_with(pack_record)?;
}
*self.buf = str_buf;
Ok(())
}
#[inline]
fn union_branch<'b, R: AvroRead, D: AvroDeserializer>(
self,
idx: usize,
n_variants: usize,
null_variant: Option<usize>,
deserializer: D,
reader: &'b mut R,
) -> Result<Self::Out, AvroError> {
if null_variant == Some(idx) {
for _ in 0..n_variants - 1 {
self.packer.push(Datum::Null)
}
} else {
let mut deserializer = Some(deserializer);
for i in 0..n_variants {
let dec = AvroFlatDecoder {
packer: self.packer,
buf: self.buf,
is_top: false,
};
if null_variant != Some(i) {
if i == idx {
deserializer.take().unwrap().deserialize(reader, dec)?;
} else {
self.packer.push(Datum::Null)
}
}
}
}
Ok(())
}
#[inline]
fn enum_variant(self, symbol: &str, _idx: usize) -> Result<Self::Out, AvroError> {
self.packer.push(Datum::String(symbol));
Ok(())
}
#[inline]
fn scalar(self, scalar: mz_avro::types::Scalar) -> Result<Self::Out, AvroError> {
match scalar {
mz_avro::types::Scalar::Null => self.packer.push(Datum::Null),
mz_avro::types::Scalar::Boolean(val) => {
if val {
self.packer.push(Datum::True)
} else {
self.packer.push(Datum::False)
}
}
mz_avro::types::Scalar::Int(val) => self.packer.push(Datum::Int32(val)),
mz_avro::types::Scalar::Long(val) => self.packer.push(Datum::Int64(val)),
mz_avro::types::Scalar::Float(val) => {
self.packer.push(Datum::Float32(OrderedFloat(val)))
}
mz_avro::types::Scalar::Double(val) => {
self.packer.push(Datum::Float64(OrderedFloat(val)))
}
mz_avro::types::Scalar::Date(val) => self.packer.push(Datum::Date(val)),
mz_avro::types::Scalar::Timestamp(val) => self.packer.push(Datum::Timestamp(val)),
}
Ok(())
}
#[inline]
fn decimal<'b, R: AvroRead>(
self,
_precision: usize,
_scale: usize,
r: ValueOrReader<'b, &'b [u8], R>,
) -> Result<Self::Out, AvroError> {
let buf = match r {
ValueOrReader::Value(val) => val,
ValueOrReader::Reader { len, r } => {
self.buf.resize_with(len, Default::default);
r.read_exact(self.buf)?;
&self.buf
}
};
self.packer.push(Datum::Decimal(
Significand::from_twos_complement_be(buf)
.map_err(|e| DecodeError::Custom(e.to_string()))?,
));
Ok(())
}
#[inline]
fn bytes<'b, R: AvroRead>(
self,
r: ValueOrReader<'b, &'b [u8], R>,
) -> Result<Self::Out, AvroError> {
let buf = match r {
ValueOrReader::Value(val) => val,
ValueOrReader::Reader { len, r } => {
self.buf.resize_with(len, Default::default);
r.read_exact(self.buf)?;
&self.buf
}
};
self.packer.push(Datum::Bytes(buf));
Ok(())
}
#[inline]
fn string<'b, R: AvroRead>(
self,
r: ValueOrReader<'b, &'b str, R>,
) -> Result<Self::Out, AvroError> {
let s = match r {
ValueOrReader::Value(val) => val,
ValueOrReader::Reader { len, r } => {
// TODO - this copy is unnecessary,
// we should special case to just look at the bytes
// directly when r is &[u8].
// It probably doesn't make a huge difference though.
self.buf.resize_with(len, Default::default);
r.read_exact(self.buf)?;
std::str::from_utf8(&self.buf).map_err(|_| DecodeError::StringUtf8Error)?
}
};
self.packer.push(Datum::String(s));
Ok(())
}
#[inline]
fn json<'b, R: AvroRead>(
self,
r: ValueOrReader<'b, &'b serde_json::Value, R>,
) -> Result<Self::Out, AvroError> {
match r {
ValueOrReader::Value(val) => {
*self.packer = JsonbPacker::new(std::mem::take(self.packer))
.pack_serde_json(val.clone())
.map_err(|e| DecodeError::Custom(e.to_string()))?;
}
ValueOrReader::Reader { len, r } => {
self.buf.resize_with(len, Default::default);
r.read_exact(self.buf)?;
*self.packer = JsonbPacker::new(std::mem::take(self.packer))
.pack_slice(&self.buf)
.map_err(|e| DecodeError::Custom(e.to_string()))?;
}
}
Ok(())
}
#[inline]
fn uuid<'b, R: AvroRead>(
self,
r: ValueOrReader<'b, &'b [u8], R>,
) -> Result<Self::Out, AvroError> {
let buf = match r {
ValueOrReader::Value(val) => val,
ValueOrReader::Reader { len, r } => {
self.buf.resize_with(len, Default::default);
r.read_exact(self.buf)?;
&self.buf
}
};
let s = std::str::from_utf8(&buf).map_err(|_e| DecodeError::UuidUtf8Error)?;
self.packer.push(Datum::Uuid(
Uuid::parse_str(s).map_err(DecodeError::BadUuid)?,
));
Ok(())
}
#[inline]
fn fixed<'b, R: AvroRead>(
self,
r: ValueOrReader<'b, &'b [u8], R>,
) -> Result<Self::Out, AvroError> {
self.bytes(r)
}
#[inline]
fn array<A: AvroArrayAccess>(mut self, a: &mut A) -> Result<Self::Out, AvroError> {
self.is_top = false;
let mut str_buf = std::mem::take(self.buf);
self.packer.push_list_with(|rp| -> Result<(), AvroError> {
loop {
let next = AvroFlatDecoder {
packer: rp,
buf: &mut str_buf,
is_top: false,
};
if a.decode_next(next)?.is_none() {
break;
}
}
Ok(())
})?;
*self.buf = str_buf;
Ok(())
}
define_unexpected! {map}
}
/// Converts an Apache Avro schema into a list of column names and types.
pub fn validate_value_schema(
schema: &str,
envelope: EnvelopeType,
) -> anyhow::Result<Vec<(ColumnName, ColumnType)>> {
let schema = parse_schema(schema)?;
let node = schema.top_node();
let row_schema = match envelope {
EnvelopeType::Debezium => {
// The top-level record needs to be a diff "envelope" that contains
// `before` and `after` fields, where the `before` and `after` fields
// have the same schema.
match node.inner {
SchemaPiece::Record { fields, .. } => {
let before = fields.iter().find(|f| f.name == "before");
let after = fields.iter().find(|f| f.name == "after");
match (before, after) {
(Some(before), Some(after)) => {
let left = node.step(&before.schema);
let right = node.step(&after.schema);
match (left.inner, right.inner) {
(SchemaPiece::Union(before), SchemaPiece::Union(after)) => {
if before.variants().len() != 2 {
bail!("Source schema 'before' field has the wrong number of variants");
}
if after.variants().len() != 2 {
bail!("Source schema 'after' field has the wrong number of variants");
}
let before_null =
before.variants().iter().position(|s| is_null(s));
let after_null =
before.variants().iter().position(|s| is_null(s));
if before_null != after_null {
bail!("Source schema 'before' and 'after' fields do not match.");
}
let null_idx = match before_null {
Some(null_idx) => null_idx,
None => bail!("Source schema 'before'/'after' fields are not of expected type")
};
let record_idx = 1 - null_idx;
let (before_piece, after_piece) = (
&before.variants()[record_idx],
&after.variants()[record_idx],
);
let before_name = match before_piece {
SchemaPieceOrNamed::Piece(_) => bail!(
"Source schema 'before' field should be a record."
),
SchemaPieceOrNamed::Named(name) => name,
};
let after_name = match after_piece {
SchemaPieceOrNamed::Piece(_) => {
bail!("Source schema 'after' field should be a record.")
}
SchemaPieceOrNamed::Named(name) => name,
};
if before_name != after_name {
bail!("Source schema 'before' and 'after' fields should be the same named record.");
}
match schema.lookup(*before_name).piece {
SchemaPiece::Record { .. } => node.step(before_piece),
_ => bail!("Source schema 'before' and 'after' fields should contain a record."),
}
}
(_, SchemaPiece::Union(_)) => {
bail!("Source schema 'before' field should be a union.")
}
(SchemaPiece::Union(_), _) => {
bail!("Source schema 'after' field should be a union.")
}
(_, _) => bail!(
"Source schema 'before' and 'after' fields should be unions."
),
}
}
(None, _) => bail!("source schema is missing 'before' field"),
(_, None) => bail!("source schema is missing 'after' field"),
}
}
_ => bail!("Top-level envelope must be a record."),
}
}
EnvelopeType::Upsert => match node.inner {
SchemaPiece::Record { .. } => schema.top_node(),
_ => bail!("upsert schema can only be record, got: {:?}", schema.top),
},
EnvelopeType::CdcV2 => cdc_v2::extract_data_columns(&schema)?,
EnvelopeType::None => schema.top_node(),
};
// The diff envelope is sane. Convert the actual record schema for the row.
validate_schema_1(row_schema)
}
fn validate_schema_1(schema: SchemaNode) -> anyhow::Result<Vec<(ColumnName, ColumnType)>> {
match schema.inner {
SchemaPiece::Record { fields, .. } => {
let mut columns = vec![];
let mut seen_avro_nodes = Default::default();
for f in fields {
columns.extend(get_named_columns(
&mut seen_avro_nodes,
schema.step(&f.schema),
&f.name,
)?);
}
Ok(columns)
}
_ => bail!("row schemas must be records, got: {:?}", schema.inner),
}
}
fn get_named_columns<'a>(
seen_avro_nodes: &mut HashSet<usize>,
schema: SchemaNode<'a>,
base_name: &str,
) -> anyhow::Result<Vec<(ColumnName, ColumnType)>> {
if let SchemaPiece::Union(us) = schema.inner {
let mut columns = vec![];
let vs = us.variants();
if vs.is_empty() || (vs.len() == 1 && is_null(&vs[0])) {
bail!(anyhow!("Empty or null-only unions are not supported"));
} else {
for (i, v) in vs.iter().filter(|v| !is_null(v)).enumerate() {
let named_idx = match v {
SchemaPieceOrNamed::Named(idx) => Some(*idx),
_ => None,
};
if let Some(named_idx) = named_idx {
if !seen_avro_nodes.insert(named_idx) {
bail!(
"Recursively defined type in schema: {}",
v.get_human_name(schema.root)
);
}
}
let node = schema.step(v);
if let SchemaPiece::Union(_) = node.inner {
unreachable!("Internal error: directly nested avro union!");
}
let name = if vs.len() == 1 || (vs.len() == 2 && vs.iter().any(is_null)) {
// There is only one non-null variant in the
// union, so we can use the field name directly.
base_name.to_string()
} else {
// There are multiple non-null variants in the
// union, so we need to invent field names for
// each variant.
format!("{}{}", &base_name, i + 1)
};
// If there is more than one variant in the union,
// the column's output type is nullable, as this
// column will be null whenever it is uninhabited.
let ty = validate_schema_2(seen_avro_nodes, node)?;
columns.push((name.into(), ty.nullable(vs.len() > 1)));
if let Some(named_idx) = named_idx {
seen_avro_nodes.remove(&named_idx);
}
}
}
Ok(columns)
} else {
let scalar_type = validate_schema_2(seen_avro_nodes, schema)?;
Ok(vec![(base_name.into(), scalar_type.nullable(false))])
}
}
fn validate_schema_2(
seen_avro_nodes: &mut HashSet<usize>,
schema: SchemaNode,
) -> anyhow::Result<ScalarType> {
Ok(match schema.inner {
SchemaPiece::Null => bail!("null outside of union types is not supported"),
SchemaPiece::Boolean => ScalarType::Bool,
SchemaPiece::Int => ScalarType::Int32,
SchemaPiece::Long => ScalarType::Int64,
SchemaPiece::Float => ScalarType::Float32,
SchemaPiece::Double => ScalarType::Float64,
SchemaPiece::Date => ScalarType::Date,
SchemaPiece::TimestampMilli => ScalarType::Timestamp,
SchemaPiece::TimestampMicro => ScalarType::Timestamp,
SchemaPiece::Decimal {
precision, scale, ..
} => {
if *precision > MAX_DECIMAL_PRECISION as usize {
bail!(
"decimals with precision greater than {} are not supported",
MAX_DECIMAL_PRECISION
)
}
ScalarType::Decimal(*precision as u8, *scale as u8)
}
SchemaPiece::Bytes | SchemaPiece::Fixed { .. } => ScalarType::Bytes,
SchemaPiece::String | SchemaPiece::Enum { .. } => ScalarType::String,
SchemaPiece::Json => ScalarType::Jsonb,
SchemaPiece::Uuid => ScalarType::Uuid,
SchemaPiece::Record { fields, .. } => {
let mut columns = vec![];
for f in fields {
let named_idx = match &f.schema {
SchemaPieceOrNamed::Named(idx) => Some(*idx),
_ => None,
};
if let Some(named_idx) = named_idx {
if !seen_avro_nodes.insert(named_idx) {
bail!(
"Recursively defined type in schema: {}",
f.schema.get_human_name(schema.root)
);
}
}
let next_node = schema.step(&f.schema);
columns.extend(
get_named_columns(seen_avro_nodes, next_node, &f.name)?
.into_iter()
// We strip out the nullability flag, because ScalarType::Record
// fields are always nullable.
.map(|(name, coltype)| (name, coltype.scalar_type)),
);
if let Some(named_idx) = named_idx {
seen_avro_nodes.remove(&named_idx);
}
}
ScalarType::Record { fields: columns }
}
SchemaPiece::Array(inner) => {
let named_idx = match inner.as_ref() {
SchemaPieceOrNamed::Named(idx) => Some(*idx),
_ => None,
};
if let Some(named_idx) = named_idx {
if !seen_avro_nodes.insert(named_idx) {
bail!(
"Recursively defined type in schema: {}",
inner.get_human_name(schema.root)
);
}
}
let next_node = schema.step(inner);
let ret = ScalarType::List(Box::new(validate_schema_2(seen_avro_nodes, next_node)?));
if let Some(named_idx) = named_idx {
seen_avro_nodes.remove(&named_idx);
}
ret
}
_ => bail!("Unsupported type in schema: {:?}", schema.inner),
})
}
pub fn parse_schema(schema: &str) -> anyhow::Result<Schema> {
let schema = serde_json::from_str(schema)?;
Ok(Schema::parse(&schema)?)
}
fn is_null(schema: &SchemaPieceOrNamed) -> bool {
matches!(schema, SchemaPieceOrNamed::Piece(SchemaPiece::Null))
}
fn pack_value(v: Value, mut row: RowPacker, n: SchemaNode) -> anyhow::Result<RowPacker> {
match v {
Value::Null => row.push(Datum::Null),
Value::Boolean(true) => row.push(Datum::True),
Value::Boolean(false) => row.push(Datum::False),
Value::Int(i) => row.push(Datum::Int32(i)),
Value::Long(i) => row.push(Datum::Int64(i)),
Value::Float(f) => row.push(Datum::Float32((f).into())),
Value::Double(f) => row.push(Datum::Float64((f).into())),
Value::Date(d) => row.push(Datum::Date(d)),
Value::Timestamp(d) => row.push(Datum::Timestamp(d)),
Value::Decimal(DecimalValue { unscaled, .. }) => row.push(Datum::Decimal(
Significand::from_twos_complement_be(&unscaled)?,
)),
Value::Bytes(b) => row.push(Datum::Bytes(&b)),
Value::String(s) | Value::Enum(_ /* idx */, s) => row.push(Datum::String(&s)),
Value::Union { index, inner, .. } => {
let mut v = Some(*inner);
if let SchemaPiece::Union(us) = n.inner {
for (var_idx, var_s) in us
.variants()
.iter()
.enumerate()
.filter(|(_, s)| !is_null(s))
{
if var_idx == index {
let next = n.step(var_s);
row = pack_value(v.take().unwrap(), row, next)?;
} else {
row.push(Datum::Null);
}
}
} else {
unreachable!("Avro value out of sync with schema");
}
}
Value::Json(j) => row = JsonbPacker::new(row).pack_serde_json(j)?,
Value::Uuid(u) => row.push(Datum::Uuid(u)),
other @ Value::Fixed(..)
| other @ Value::Array(_)
| other @ Value::Map(_)
| other @ Value::Record(_) => bail!("unsupported avro value: {:?}", other),
};
Ok(row)
}
pub fn extract_nullable_row<'a, I>(v: Value, extra: I, n: SchemaNode) -> anyhow::Result<Option<Row>>
where
I: IntoIterator<Item = Datum<'a>>,
{
let (v, n) = match v {
Value::Union { index, inner, .. } => {
let next = if let SchemaPiece::Union(us) = n.inner {
n.step(&us.variants()[index])
} else {
unreachable!("Avro value out of sync with schema")
};
(*inner, next)
}
_ => bail!("unsupported avro value: {:?}", v),
};
extract_row(v, extra, n)
}
pub fn extract_row<'a, I>(v: Value, extra: I, n: SchemaNode) -> anyhow::Result<Option<Row>>
where
I: IntoIterator<Item = Datum<'a>>,
{
match v {
Value::Record(fields) => match n.inner {
SchemaPiece::Record {
fields: schema_fields,
..
} => {
let mut row = RowPacker::new();
for (i, (_, col)) in fields.into_iter().enumerate() {
let f_schema = &schema_fields[i].schema;
let f_node = n.step(f_schema);
row = pack_value(col, row, f_node)?;
}
for d in extra {
row.push(d);
}
Ok(Some(row.finish()))
}
_ => unreachable!("Avro value out of sync with schema"),
},
Value::Null => Ok(None),
_ => bail!("unsupported avro value: {:?}", v),
}
}
/// Extract a debezium-format Avro object by parsing it fully,
/// i.e., when the record isn't laid out such that we can extract the `before` and
/// `after` fields without decoding the entire record.
fn unwrap_record_fields(n: SchemaNode) -> &[RecordField] {
if let SchemaPiece::Record { fields, .. } = n.inner {
fields
} else {
panic!("node is not a record!");
}
}
#[derive(Debug)]
pub struct DiffPair<T> {
pub before: Option<T>,
pub after: Option<T>,
}
#[derive(Debug, Clone, Copy)]
pub struct BinlogSchemaIndices {
/// Index of the "source" field in the payload schema
source_idx: usize,
/// Index of the "file" field in the source schema
source_file_idx: usize,
/// Index of the "pos" field in the source schema
source_pos_idx: usize,
/// Index of the "row" field in the source schema
source_row_idx: usize,
/// Index of the "snapshot" field in the source schema
source_snapshot_idx: usize,
}
impl BinlogSchemaIndices {
pub fn new_from_schema(top_node: SchemaNode) -> Option<Self> {
let top_indices = field_indices(top_node)?;
let source_idx = *top_indices.get("source")?;
let source_node = top_node.step(&unwrap_record_fields(top_node)[source_idx].schema);
let source_indices = field_indices(source_node)?;
let source_file_idx = *source_indices.get("file")?;
let source_pos_idx = *source_indices.get("pos")?;
let source_row_idx = *source_indices.get("row")?;
let source_snapshot_idx = *source_indices.get("snapshot")?;
Some(Self {
source_idx,
source_file_idx,
source_pos_idx,
source_row_idx,
source_snapshot_idx,
})
}
}
/// Ordered means we can trust Debezium high water marks
///
/// In standard operation, Debezium should always emit messages in position order, but
/// messages may be duplicated.
///
/// For example, this is a legal stream of Debezium event positions:
///
/// ```text
/// 1 2 3 2
/// ```
///
/// Note that `2` appears twice, but the *first* time it appeared it appeared in order.
/// Any position below the highest-ever seen position is guaranteed to be a duplicate,
/// and can be ignored.
///
/// Now consider this stream:
///
/// ```text
/// 1 3 2
/// ```
///
/// In this case, `2` is sent *out* of order, and if it is ignored we will miss important
/// state.
///
/// It is possible for users to do things with multiple databases and multiple Debezium
/// instances pointing at the same Kafka topic that mean that the Debezium guarantees do
/// not hold, in which case we are required to track individual messages, instead of just
/// the highest-ever-seen message.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum DebeziumDeduplicationStrategy {
/// We can trust high water mark
Ordered,
/// We need to store some piece of state for every message
Full,
FullInRange {
pad_start: Option<NaiveDateTime>,
start: NaiveDateTime,
end: NaiveDateTime,
},
}
impl DebeziumDeduplicationStrategy {
/// Create a deduplication strategy with start and end times
///
/// Returns an error if either datetime does not parse, or if there is no time in between them
pub fn full_in_range(
start: &str,
end: &str,
pad_start: Option<&str>,
) -> anyhow::Result<DebeziumDeduplicationStrategy> {
let fallback_parse = |s: &str| {
for format in &["%Y-%m-%d %H:%M:%S%.f", "%Y-%m-%d %H:%M:%S"] {
if let Ok(dt) = NaiveDateTime::parse_from_str(s, format) {
return Ok(dt);
}
}
if let Ok(d) = chrono::NaiveDate::parse_from_str(s, "%Y-%m-%d") {
return Ok(d.and_hms(0, 0, 0));
}
bail!(
"UTC DateTime specifier '{}' should match 'YYYY-MM-DD', 'YYYY-MM-DD HH:MM:SS' or \
'YYYY-MM-DD HH:MM:SS.FF",
s
)
};
let start = fallback_parse(start)?;
let end = fallback_parse(end)?;
let pad_start = pad_start.map(fallback_parse).transpose()?;
if start >= end {
bail!(
"Debezium deduplication start {} is not before end {}",
start,
end
);
}
Ok(DebeziumDeduplicationStrategy::FullInRange {
start,
end,
pad_start,
})
}
}
/// Track whether or not we should skip a specific debezium message
#[derive(Debug)]
struct DebeziumDeduplicationState {
/// Last recorded (pos, row, offset) for each MySQL binlog file.
/// (Or "", in the Postgres case)
///
/// [`DebeziumDeduplicationstrategy`] determines whether messages that are not ahead
/// of the last recorded pos/row will be skipped.
binlog_offsets: HashMap<Vec<u8>, (usize, usize, Option<i64>)>,
/// Whether or not to track every message we've ever seen
full: Option<TrackFull>,
/// Whether we have printed a warning due to seeing unknown source coordinates
warned_on_unknown: bool,
}
/// If we need to deal with debezium possibly going back after it hasn't seen things.
/// During normal (non-snapshot) operation, we deduplicate based on binlog position: (pos, row), for MySQL.
/// During the initial snapshot, (pos, row) values are all the same, but primary keys
/// are unique and thus we can get deduplicate based on those.
#[derive(Debug)]
struct TrackFull {
/// binlog filename to (offset to (timestamp that this binlog entry was first seen))
seen_offsets: HashMap<Box<[u8]>, HashMap<(usize, usize), i64>>,
seen_snapshot_keys: HashMap<Box<[u8]>, HashSet<Row>>,
/// The highest-ever seen timestamp, used in logging to let us know how far backwards time might go
max_seen_time: i64,
key_indices: Option<Vec<usize>>,
/// Optimization to avoid re-allocating the row packer over and over when extracting the key..
key_buf: RowPacker,
range: Option<TrackRange>,
}
/// When to start and end full-range tracking
///
/// All values are milliseconds since the unix epoch and are meant to be compared to the
/// `upstream_time_millis` argument to [`DebeziumDeduplicationState::should_use_record`].
///
/// We throw away all tracking data after we see the first record past `end`.
#[derive(Debug)]
struct TrackRange {
/// Start pre-filling the seen data before we start trusting it
///
/// At some point we need to start trusting the [`TrackFull::seen_offsets`] map more
/// than we trust the Debezium high water mark. In order to do that, the
/// `seen_offsets` map must have some data, otherwise all records would show up as
/// new immediately at the phase transition.
///
/// For example, consider the following series of records, presented vertically in
/// the order that they were received:
///
/// ```text
/// ts val
/// -------
/// 1 a
/// 2 b
/// 1 a
/// ```
///
/// If we start tracking at ts 2 and immediately start trusting the hashmap more than
/// the Debezium high water mark then ts 1 will be falsely double-inserted. So we
/// need to start building a buffer before we can start trusting it.
///
/// `pad_start` is the upstream_time_millis at we we start building the buffer, and
/// [`TrackRange::start`] is the point at which we start trusting the buffer.
/// Currently `pad_start` defaults to 1 hour (wall clock time) before `start`,
/// as a value that seems overwhelmingly likely to cause the buffer to always have
/// enough data that it doesn't give incorrect answers.
pad_start: i64,
start: i64,
end: i64,
}
impl TrackFull {
fn from_keys(mut key_indices: Option<Vec<usize>>) -> Self {
if let Some(key_indices) = key_indices.as_mut() {
key_indices.sort_unstable();
}
Self {
seen_offsets: Default::default(),
seen_snapshot_keys: Default::default(),
max_seen_time: 0,
key_indices,
key_buf: Default::default(),
range: None,
}
}
fn from_keys_in_range(
key_indices: Option<Vec<usize>>,
start: NaiveDateTime,
end: NaiveDateTime,
pad_start: Option<NaiveDateTime>,
) -> Self {
let mut tracker = Self::from_keys(key_indices);
let pad_start = pad_start
.unwrap_or_else(|| (start - chrono::Duration::hours(1)))
.timestamp_millis();
tracker.range = Some(TrackRange {
pad_start,
start: start.timestamp_millis(),
end: end.timestamp_millis(),
});
tracker
}
}
impl DebeziumDeduplicationState {
fn new(strat: DebeziumDeduplicationStrategy, key_indices: Option<Vec<usize>>) -> Self {
let full = match strat {
DebeziumDeduplicationStrategy::Ordered => None,
DebeziumDeduplicationStrategy::Full => Some(TrackFull::from_keys(key_indices)),
DebeziumDeduplicationStrategy::FullInRange {
start,
end,
pad_start,
} => Some(TrackFull::from_keys_in_range(
key_indices,
start,
end,
pad_start,
)),
};
DebeziumDeduplicationState {
binlog_offsets: Default::default(),
full,
warned_on_unknown: false,
}
}
#[must_use]
#[allow(clippy::too_many_arguments)]
fn should_use_record(
&mut self,
file: &[u8],
row: RowCoordinates,
coord: Option<i64>,
upstream_time_millis: Option<i64>,
debug_name: &str,
worker_idx: usize,
is_snapshot: bool,
update: &DiffPair<Row>,
) -> bool {
let (pos, row) = match row {
RowCoordinates::MySql { pos, row } => (pos, row),
RowCoordinates::Postgres { lsn } => (lsn, 0),
RowCoordinates::MSSql {
change_lsn,
event_serial_no,
} => {
// Consider everything but the file ID to be the offset within the file.
let offset_in_file =
((change_lsn.log_block_offset as usize) << 16) | (change_lsn.slot_num as usize);
(offset_in_file, event_serial_no)
}
RowCoordinates::Unknown => {
if !self.warned_on_unknown {
self.warned_on_unknown = true;
log::warn!("Record with unrecognized source coordinates in {}. You might be using an unsupported upstream database.", debug_name);
}
return true;
}
};
// If in the initial snapshot, binlog (pos, row) is meaningless for detecting
// duplicates, since it is always the same.
let should_skip = if is_snapshot {
None
} else {
match self.binlog_offsets.get_mut(file) {
Some((old_max_pos, old_max_row, old_offset)) => {
if (*old_max_pos, *old_max_row) >= (pos, row) {
Some(SkipInfo {
old_max_pos,
old_max_row,
old_offset,
})
} else {
// update the debezium high water mark
*old_max_pos = pos;
*old_max_row = row;
*old_offset = coord;
None
}
}
None => {
// The extra lookup is fine - this is the cold path.
self.binlog_offsets
.insert(file.to_owned(), (pos, row, coord));
None
}
}
};
let mut delete_full = false;
let should_use = match &mut self.full {
// Always none if in snapshot, see comment above where `should_skip` is bound.
None => should_skip.is_none(),
Some(TrackFull {
seen_offsets,
seen_snapshot_keys,
max_seen_time,
key_indices,
key_buf,
range,
}) => {
if is_snapshot {
let key_indices = match key_indices.as_ref() {
None => {
// No keys, so we can't do anything sensible for snapshots.
// Return "all OK" and hope their data isn't corrupted.
return true;
}
Some(ki) => ki,
};
let mut row_iter = match update.after.as_ref() {
None => {
error!(
"Snapshot row at pos {:?}, message_time={} source={} was not an insert.",
coord, fmt_timestamp(upstream_time_millis), debug_name);
return false;
}
Some(r) => r.iter(),
};
let key = {
let mut cumsum = 0;
for k in key_indices.iter() {
let adjusted_idx = *k - cumsum;
cumsum += adjusted_idx + 1;
key_buf.push(row_iter.nth(adjusted_idx).unwrap());
}
key_buf.finish_and_reuse()
};
if let Some(seen_keys) = seen_snapshot_keys.get_mut(file) {
// Your reaction on reading this code might be:
// "Ugh, we are cloning the key row just to support logging a warning!"
// But don't worry -- since `Row`s use a 16-byte smallvec, the clone
// won't involve an extra allocation unless the key overflows that.
//
// Anyway, TODO: avoid this via `get_or_insert` once rust-lang/rust#60896 is resolved.
let is_new = seen_keys.insert(key.clone());
if !is_new {
warn!(
"Snapshot row with key={:?} source={} seen multiple times (most recent message_time={})",
key, debug_name, fmt_timestamp(upstream_time_millis)
);
}
is_new
} else {
let mut hs = HashSet::new();
hs.insert(key);
seen_snapshot_keys.insert(file.into(), hs);
true
}
} else {
*max_seen_time = max(upstream_time_millis.unwrap_or(0), *max_seen_time);
if let Some(seen_offsets) = seen_offsets.get_mut(file) {
// first check if we are in a special case of range-bounded track full
if let Some(range) = range {
if let Some(upstream_time_millis) = upstream_time_millis {
if upstream_time_millis < range.pad_start {
return should_skip.is_none();
}
if upstream_time_millis < range.start {
seen_offsets.insert((pos, row), upstream_time_millis);
return should_skip.is_none();
}
if upstream_time_millis > range.end {
// don't abort early, but we will clean up after this validation
delete_full = true;
}
}
}
// Now we know that we are in either trackfull or a range-bounded trackfull
let seen = seen_offsets.entry((pos, row));
let is_new = matches!(seen, std::collections::hash_map::Entry::Vacant(_));
let original_time =
seen.or_insert_with(|| upstream_time_millis.unwrap_or(0));
log_duplication_info(
file,
pos,
row,
coord,
upstream_time_millis,
debug_name,
worker_idx,
is_new,
&should_skip,
original_time,
max_seen_time,
);
is_new
} else {
let mut hs = HashMap::new();
hs.insert((pos, row), upstream_time_millis.unwrap_or(0));
seen_offsets.insert(file.into(), hs);
true
}
}
}
};
if delete_full {
info!(
"Deleting debezium deduplication tracking data source={} message_time={}",
debug_name,
fmt_timestamp(upstream_time_millis)
);
self.full = None;
}
should_use
}
}
/// Helper to track information for logging on deduplication
struct SkipInfo<'a> {
old_max_pos: &'a usize,
old_max_row: &'a usize,
old_offset: &'a Option<i64>,
}
#[allow(clippy::too_many_arguments)]
fn log_duplication_info(
file: &[u8],
pos: usize,
row: usize,
coord: Option<i64>,
upstream_time_millis: Option<i64>,
debug_name: &str,
worker_idx: usize,
is_new: bool,
should_skip: &Option<SkipInfo>,
original_time: &mut i64,
max_seen_time: &mut i64,
) {
let file_name_holder;
let file_name = match std::str::from_utf8(file) {
Ok(s) => s,
Err(_) => {
file_name_holder = hex::encode(file);
&file_name_holder
}
};
match (is_new, should_skip) {
// new item that correctly is past the highest item we've ever seen
(true, None) => {}
// new item that violates Debezium "guarantee" that the no new
// records will ever be sent with a position below the highest
// position ever seen
(true, Some(skipinfo)) => {
warn!(
"Created a new record behind the highest point in source={}:{} binlog_file={} \
new_record_position={}:{} new_record_kafka_offset={} old_max_position={}:{} \
message_time={} message_first_seen={} max_seen_time={}",
debug_name,
worker_idx,
file_name,
pos,
row,
coord.unwrap_or(-1),
skipinfo.old_max_pos,
skipinfo.old_max_row,
fmt_timestamp(upstream_time_millis),
fmt_timestamp(*original_time),
fmt_timestamp(*max_seen_time),
);
}
// Duplicate item below the highest seen item
(false, Some(skipinfo)) => {
debug!(
"already ingested source={}:{} binlog_coordinates={}:{}:{} old_binlog={}:{} \
kafka_offset={} message_time={} message_first_seen={} max_seen_time={}",
debug_name,
worker_idx,
file_name,
pos,
row,
skipinfo.old_max_pos,
skipinfo.old_max_row,
skipinfo.old_offset.unwrap_or(-1),
fmt_timestamp(upstream_time_millis),
fmt_timestamp(*original_time),
fmt_timestamp(*max_seen_time),
);
}
// already exists, but is past the debezium high water mark.
//
// This should be impossible because we set the high-water mark
// every time we insert something
(false, None) => {
error!(
"We surprisingly are seeing a duplicate record that \
is beyond the highest record we've ever seen. {}:{}:{} kafka_offset={} \
message_time={} message_first_seen={} max_seen_time={}",
file_name,
pos,
row,
coord.unwrap_or(-1),
fmt_timestamp(upstream_time_millis),
fmt_timestamp(*original_time),
fmt_timestamp(*max_seen_time),
);
}
}
}
fn fmt_timestamp(ts: impl Into<Option<i64>>) -> DelayedFormat<StrftimeItems<'static>> {
let (seconds, nanos) = ts
.into()
.map(|ts| (ts / 1000, (ts % 1000) * 1_000_000))
.unwrap_or((0, 0));
NaiveDateTime::from_timestamp(seconds, nanos as u32).format("%Y-%m-%dT%H:%S:%S%.f")
}
/// Additional context needed for decoding
/// Debezium-formatted data.
#[derive(Debug)]
pub struct DebeziumDecodeState {
/// Index of the "before" field in the payload schema
before_idx: usize,
/// Index of the "after" field in the payload schema
after_idx: usize,
dedup: DebeziumDeduplicationState,
binlog_schema_indices: Option<BinlogSchemaIndices>,
/// Human-readable name used for printing debug information
debug_name: String,
/// Worker we are running on (used for printing debug information)
worker_idx: usize,
}
fn field_indices(node: SchemaNode) -> Option<HashMap<String, usize>> {
if let SchemaPiece::Record { fields, .. } = node.inner {
Some(
fields
.iter()
.enumerate()
.map(|(i, f)| (f.name.clone(), i))
.collect(),
)
} else {
None
}
}
fn take_field_by_index(
idx: usize,
expected_name: &str,
fields: &mut [(String, Value)],
) -> anyhow::Result<Value> {
let (name, value) = fields.get_mut(idx).ok_or_else(|| {
anyhow!(
"Value does not match schema: \"{}\" field not at index {}",
expected_name,
idx
)
})?;
if name != expected_name {
bail!(
"Value does not match schema: expected \"{}\", found \"{}\"",
expected_name,
name
);
}
Ok(std::mem::replace(value, Value::Null))
}
impl DebeziumDecodeState {
pub fn new(
schema: &Schema,
debug_name: String,
worker_idx: usize,
dedup_strat: DebeziumDeduplicationStrategy,
) -> Option<Self> {
let top_node = schema.top_node();
let top_indices = field_indices(top_node)?;
let before_idx = *top_indices.get("before")?;
let after_idx = *top_indices.get("after")?;
let binlog_schema_indices = BinlogSchemaIndices::new_from_schema(top_node);
Some(Self {
before_idx,
after_idx,
dedup: DebeziumDeduplicationState::new(dedup_strat, None),
binlog_schema_indices,
debug_name,
worker_idx,
})
}
pub fn extract(
&mut self,
v: Value,
n: SchemaNode,
coord: Option<i64>,
upstream_time_millis: Option<i64>,
) -> anyhow::Result<DiffPair<Row>> {
fn is_snapshot(v: Value) -> anyhow::Result<Option<bool>> {
let answer = match v {
Value::Union { inner, .. } => is_snapshot(*inner)?,
Value::Boolean(b) => Some(b),
// Since https://issues.redhat.com/browse/DBZ-1295 ,
// "snapshot" is three-valued. "last" is the last row
// in the snapshot, but still part of it.
Value::String(s) => Some(&s == "true" || &s == "last"),
Value::Null => None,
_ => bail!("\"snapshot\" is neither a boolean nor a string"),
};
Ok(answer)
}
match v {
Value::Record(mut fields) => {
if let Some(schema_indices) = self.binlog_schema_indices {
let source_val =
take_field_by_index(schema_indices.source_idx, "source", &mut fields)?;
let mut source_fields = match source_val {
Value::Record(fields) => fields,
_ => bail!("\"source\" is not a record: {:?}", source_val),
};
let snapshot_val = take_field_by_index(
schema_indices.source_snapshot_idx,
"snapshot",
&mut source_fields,
)?;
if is_snapshot(snapshot_val)? != Some(true) {
let file_val = take_field_by_index(
schema_indices.source_file_idx,
"file",
&mut source_fields,
)?
.into_string()
.ok_or_else(|| anyhow!("\"file\" is not a string"))?;
let pos_val = take_field_by_index(
schema_indices.source_pos_idx,
"pos",
&mut source_fields,
)?
.into_integral()
.ok_or_else(|| anyhow!("\"pos\" is not an integer"))?;
let row_val = take_field_by_index(
schema_indices.source_row_idx,
"row",
&mut source_fields,
)?
.into_integral()
.ok_or_else(|| anyhow!("\"row\" is not an integer"))?;
let pos = usize::try_from(pos_val)?;
let row = usize::try_from(row_val)?;
// TODO(btv) Add LSN handling here too (OCF code path).
// Better yet, just delete this and make it go through the same code path as Kafka
if !self.dedup.should_use_record(
file_val.as_bytes(),
RowCoordinates::MySql { pos, row },
coord,
upstream_time_millis,
&self.debug_name,
self.worker_idx,
false,
&DiffPair {
before: None,
after: None,
},
) {
return Ok(DiffPair {
before: None,
after: None,
});
}
}
}
let before_val = take_field_by_index(self.before_idx, "before", &mut fields)?;
let after_val = take_field_by_index(self.after_idx, "after", &mut fields)?;
// we will not have gotten this far if before/after aren't records, so the unwrap is okay.
let before_node = n.step(&unwrap_record_fields(n)[self.before_idx].schema);
let after_node = n.step(&unwrap_record_fields(n)[self.after_idx].schema);
let before =
extract_nullable_row(before_val, iter::once(Datum::Int64(-1)), before_node)?;
let after =
extract_nullable_row(after_val, iter::once(Datum::Int64(1)), after_node)?;
Ok(DiffPair { before, after })
}
_ => bail!("avro envelope had unexpected type: {:?}", v),
}
}
}
pub struct ConfluentAvroResolver {
reader_schema: Schema,
writer_schemas: Option<SchemaCache>,
}
impl ConfluentAvroResolver {
pub fn new(reader_schema: &str, config: Option<ccsr::ClientConfig>) -> anyhow::Result<Self> {
let reader_schema = parse_schema(reader_schema)?;
let writer_schemas =
config.map(|sr| SchemaCache::new(sr, reader_schema.fingerprint::<Sha256>()));
Ok(Self {
reader_schema,
writer_schemas,
})
}
pub async fn resolve<'a, 'b>(
&'a mut self,
mut bytes: &'b [u8],
) -> anyhow::Result<(&'b [u8], &'a Schema)> {
// The first byte is a magic byte (0) that indicates the Confluent
// serialization format version, and the next four bytes are a big
// endian 32-bit schema ID.
//
// https://docs.confluent.io/current/schema-registry/docs/serializer-formatter.html#wire-format
if bytes.len() < 5 {
bail!(
"Confluent-style avro datum is too few bytes: expected at least 5 bytes, got {}",
bytes.len()
);
}
let magic = bytes[0];
let schema_id = BigEndian::read_i32(&bytes[1..5]);
bytes = &bytes[5..];
if magic != 0 {
bail!(
"wrong Confluent-style avro serialization magic: expected 0, got {}",
magic
);
}
let resolved_schema = match &mut self.writer_schemas {
Some(cache) => cache.get(schema_id, &self.reader_schema).await?,
// If we haven't been asked to use a schema registry, we have no way
// to discover the writer's schema. That's ok; we'll just use the
// reader's schema and hope it lines up.
None => &self.reader_schema,
};
Ok((bytes, resolved_schema))
}
}
impl fmt::Debug for ConfluentAvroResolver {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ConfluentAvroResolver")
.field("reader_schema", &self.reader_schema)
.field(
"write_schema",
if self.writer_schemas.is_some() {
&"some"
} else {
&"none"
},
)
.finish()
}
}
/// Manages decoding of Avro-encoded bytes.
pub struct Decoder {
csr_avro: ConfluentAvroResolver,
envelope: EnvelopeType,
debezium_dedup: Option<DebeziumDeduplicationState>,
debug_name: String,
worker_index: usize,
buf1: Vec<u8>,
buf2: Vec<u8>,
packer: RowPacker,
}
impl fmt::Debug for Decoder {
// TODO - rethink the usefulness of this debug impl. The Decoder
// has become much more complicated since it was written
// (though, maybe _that_ is the root problem we should solve...)
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Decoder")
.field("csr_avro", &self.csr_avro)
.finish()
}
}
impl Decoder {
/// Creates a new `Decoder`
///
/// The provided schema is called the "reader schema", which is the schema
/// that we are expecting to use to decode records. The records may indicate
/// that they are encoded with a different schema; as long as those.
pub fn new(
reader_schema: &str,
schema_registry: Option<ccsr::ClientConfig>,
envelope: EnvelopeType,
debug_name: String,
worker_index: usize,
debezium_dedup: Option<DebeziumDeduplicationStrategy>,
key_indices: Option<Vec<usize>>,
) -> anyhow::Result<Decoder> {
assert!(
(envelope == EnvelopeType::Debezium && debezium_dedup.is_some())
|| (envelope != EnvelopeType::Debezium && debezium_dedup.is_none())
);
let debezium_dedup =
debezium_dedup.map(|strat| DebeziumDeduplicationState::new(strat, key_indices));
let csr_avro = ConfluentAvroResolver::new(reader_schema, schema_registry)?;
Ok(Decoder {
csr_avro,
envelope,
debezium_dedup,
debug_name,
worker_index,
buf1: vec![],
buf2: vec![],
packer: Default::default(),
})
}
/// Decodes Avro-encoded `bytes` into a `DiffPair`.
pub async fn decode(
&mut self,
bytes: &[u8],
coord: Option<i64>,
upstream_time_millis: Option<i64>,
) -> anyhow::Result<DiffPair<Row>> {
let (mut bytes, resolved_schema) = self.csr_avro.resolve(bytes).await?;
let result = if self.envelope == EnvelopeType::Debezium {
let dec = AvroDebeziumDecoder {
packer: &mut self.packer,
buf: &mut self.buf1,
file_buf: &mut self.buf2,
};
let dsr = GeneralDeserializer {
schema: resolved_schema.top_node(),
};
// Unwrap is OK: we assert in Decoder::new that this is non-none when envelope == dbz.
let dedup = self.debezium_dedup.as_mut().unwrap();
let (diff, coords) = dsr.deserialize(&mut bytes, dec)?;
let should_use = if let Some(source) = coords {
let mssql_fsn_buf;
// This would have ideally been `Option<&[u8]>`,
// but that can't be used to lookup in a `HashMap` of `Option<Vec<u8>>` without cloning.
// So, just use `""` to represent lack of a filename.
let file = match source.row {
RowCoordinates::MySql { .. } => &self.buf2,
RowCoordinates::MSSql { change_lsn, .. } => {
mssql_fsn_buf = change_lsn.file_seq_num.to_ne_bytes();
&mssql_fsn_buf[..]
}
RowCoordinates::Postgres { .. } => &b""[..],
RowCoordinates::Unknown { .. } => &b""[..],
};
dedup.should_use_record(
file,
source.row,
coord,
upstream_time_millis,
&self.debug_name,
self.worker_index,
source.snapshot,
&diff,
)
} else {
true
};
if should_use {
diff
} else {
DiffPair {
before: None,
after: None,
}
}
} else {
let dec = AvroFlatDecoder {
packer: &mut self.packer,
buf: &mut self.buf1,
is_top: true,
};
let dsr = GeneralDeserializer {
schema: resolved_schema.top_node(),
};
dsr.deserialize(&mut bytes, dec)?;
DiffPair {
before: None,
after: Some(self.packer.finish_and_reuse()),
}
};
log::trace!(
"[customer-data] Decoded diff pair {:?}{} in {}",
result,
if let Some(coord) = coord {
format!(" at offset {}", coord)
} else {
format!("")
},
self.debug_name
);
Ok(result)
}
}
/// Builds a Debezium-encoded Avro schema that corresponds to `desc`.
///
/// Requires that all column names in `desc` are present. The returned schema
/// has some special properties to ease encoding:
///
/// * Union schemas are only used to represent nullability. The first
/// variant is always the null variant, and the second and last variant
/// is the non-null variant.
fn build_schema(columns: &[(ColumnName, ColumnType)], include_transaction: bool) -> Schema {
let row_schema = build_row_schema_json(columns, "row");
let mut schema_fields = Vec::new();
schema_fields.push(json!({
"name": "before",
"type": [
"null",
row_schema
]
}));
schema_fields.push(json!({
"name": "after",
"type": ["null", "row"],
}));
// TODO(rkhaitan): this schema omits the total_order and data collection_order
// fields found in Debezium's transaction metadata struct. We chose to omit
// those because the order is not stable across reruns and has no semantic
// meaning for records within a timestamp in Materialize. These fields may
// be useful in the future for deduplication.
if include_transaction {
schema_fields.push(json!({
"name": "transaction",
"type":
{
"name": "transaction_metadata",
"type": "record",
"fields": [
{
"name": "id",
"type": "string",
}
]
}
}));
}
let schema = json!({
"type": "record",
"name": "envelope",
"fields": schema_fields,
});
Schema::parse(&schema).expect("valid schema constructed")
}
pub fn get_debezium_transaction_schema() -> &'static Schema {
&DEBEZIUM_TRANSACTION_SCHEMA
}
pub fn encode_debezium_transaction_unchecked(
schema_id: i32,
id: &str,
status: &str,
message_count: Option<i64>,
) -> Vec<u8> {
let mut buf = Vec::new();
encode_avro_header(&mut buf, schema_id);
let transaction_id = Value::String(id.to_owned());
let status = Value::String(status.to_owned());
let event_count = match message_count {
None => Value::Union {
index: 0,
inner: Box::new(Value::Null),
n_variants: 2,
null_variant: Some(0),
},
Some(count) => Value::Union {
index: 1,
inner: Box::new(Value::Long(count)),
n_variants: 2,
null_variant: Some(0),
},
};
let avro = Value::Record(vec![
("id".into(), transaction_id),
("status".into(), status),
("event_count".into(), event_count),
]);
debug_assert!(avro.validate(DEBEZIUM_TRANSACTION_SCHEMA.top_node()));
mz_avro::encode_unchecked(&avro, &DEBEZIUM_TRANSACTION_SCHEMA, &mut buf);
buf
}
fn encode_avro_header(buf: &mut Vec<u8>, schema_id: i32) {
// The first byte is a magic byte (0) that indicates the Confluent
// serialization format version, and the next four bytes are a
// 32-bit schema ID.
//
// https://docs.confluent.io/current/schema-registry/docs/serializer-formatter.html#wire-format
buf.write_u8(0).expect("writing to vec cannot fail");
buf.write_i32::<NetworkEndian>(schema_id)
.expect("writing to vec cannot fail");
}
/// Manages encoding of Avro-encoded bytes.
pub struct Encoder {
columns: Vec<(ColumnName, ColumnType)>,
writer_schema: Schema,
include_transaction: bool,
key_schema_and_indices: Option<(Schema, Vec<usize>)>,
}
impl fmt::Debug for Encoder {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Encoder")
.field("writer_schema", &self.writer_schema)
.finish()
}
}
impl Encoder {
fn key_indices(&self) -> Option<&[usize]> {
self.key_schema_and_indices
.as_ref()
.map(|(_, indices)| indices.as_slice())
}
pub fn new(
desc: RelationDesc,
include_transaction: bool,
key_indices: Option<Vec<usize>>,
) -> Self {
let columns = column_names_and_types(desc);
let writer_schema = build_schema(&columns, include_transaction);
let key_schema_and_indices = key_indices.map(|key_indices| {
let key_columns = key_indices
.iter()
.map(|&key_idx| columns[key_idx].clone())
.collect::<Vec<_>>();
let row_schema = build_row_schema_json(&key_columns, "row");
(
Schema::parse(&row_schema).expect("valid schema constructed"),
key_indices,
)
});
Encoder {
columns,
writer_schema,
include_transaction,
key_schema_and_indices,
}
}
pub fn writer_schema(&self) -> &Schema {
&self.writer_schema
}
pub fn key_writer_schema(&self) -> Option<&Schema> {
self.key_schema_and_indices
.as_ref()
.map(|(schema, _)| schema)
}
fn validate_transaction_id(&self, transaction_id: &Option<String>) {
// We need to preserve the invariant that transaction id is always Some(..)
// when users requested that we emit transaction information, and never
// otherwise.
assert_eq!(
self.include_transaction,
transaction_id.is_some(),
"Testing to make sure transaction IDs are always present only when required"
);
}
pub fn encode_unchecked(
&self,
key_schema_id: Option<i32>,
value_schema_id: i32,
diff_pair: DiffPair<&Row>,
transaction_id: Option<String>,
) -> (Option<Vec<u8>>, Vec<u8>) {
self.validate_transaction_id(&transaction_id);
let mut key_buf = vec![];
let mut buf = vec![];
encode_avro_header(&mut buf, value_schema_id);
let (avro_key, avro_value) = self.diff_pair_to_avro(diff_pair, transaction_id);
debug_assert!(avro_value.validate(self.writer_schema.top_node()));
mz_avro::encode_unchecked(&avro_value, &self.writer_schema, &mut buf);
let key_buf = avro_key.map(|avro_key| {
encode_avro_header(&mut key_buf, key_schema_id.unwrap());
let key_schema = self.key_writer_schema().unwrap();
debug_assert!(
avro_key.validate(key_schema.top_node()),
"{:#?}\n{}",
avro_key,
key_schema.canonical_form()
);
mz_avro::encode_unchecked(&avro_key, key_schema, &mut key_buf);
key_buf
});
(key_buf, buf)
}
pub fn diff_pair_to_avro(
&self,
diff_pair: DiffPair<&Row>,
transaction_id: Option<String>,
) -> (Option<Value>, Value) {
let (before_key, before) = match diff_pair.before {
None => (
None,
Value::Union {
index: 0,
inner: Box::new(Value::Null),
n_variants: 2,
null_variant: Some(0),
},
),
Some(row) => {
let (key, row) = self.row_to_avro(row.iter());
(
key,
Value::Union {
index: 1,
inner: Box::new(row),
n_variants: 2,
null_variant: Some(0),
},
)
}
};
let (after_key, after) = match diff_pair.after {
None => (
None,
Value::Union {
index: 0,
inner: Box::new(Value::Null),
n_variants: 2,
null_variant: Some(0),
},
),
Some(row) => {
let (key, row) = self.row_to_avro(row.iter());
(
key,
Value::Union {
index: 1,
inner: Box::new(row),
n_variants: 2,
null_variant: Some(0),
},
)
}
};
// TODO [btv]: Decoding the key twice and then validating that they match is probably wasteful.
// But it doesn't matter for now since (1) in sinks we always have before or after populated, but not both,
// and (2) avro encoding for sinks is un-optimized anyway.
//
// Look into it if/when sink encoding becomes a bottleneck.
if let (Some(before_key), Some(after_key)) = (before_key.as_ref(), after_key.as_ref()) {
assert_eq!(before_key, after_key, "Mismatched keys in sink!");
}
let key = before_key.or(after_key);
let transaction = if let Some(transaction_id) = transaction_id {
let id = Value::String(transaction_id);
Some(Value::Record(vec![("id".into(), id)]))
} else {
None
};
let mut fields = Vec::new();
fields.push(("before".into(), before));
fields.push(("after".into(), after));
if let Some(transaction) = transaction {
fields.push(("transaction".into(), transaction));
}
(key, Value::Record(fields))
}
pub fn row_to_avro<'a, I>(&self, row: I) -> (Option<Value>, Value)
where
I: IntoIterator<Item = Datum<'a>>,
{
encode_datums_as_avro(row, &self.columns, self.key_indices())
}
}
/// Extracts deduplicated column names and types from a relation description.
pub fn column_names_and_types(desc: RelationDesc) -> Vec<(ColumnName, ColumnType)> {
// Invent names for columns that don't have a name.
let mut columns: Vec<_> = desc
.into_iter()
.enumerate()
.map(|(i, (name, ty))| match name {
None => (ColumnName::from(format!("column{}", i + 1)), ty),
Some(name) => (name, ty),
})
.collect();
// Deduplicate names.
let mut seen = HashSet::new();
for (name, _ty) in &mut columns {
let stem_len = name.as_str().len();
let mut i = 1;
while seen.contains(name) {
name.as_mut_str().truncate(stem_len);
if name.as_str().ends_with(|c: char| c.is_ascii_digit()) {
name.as_mut_str().push('_');
}
name.as_mut_str().push_str(&i.to_string());
i += 1;
}
seen.insert(name);
}
columns
}
/// Encodes a sequence of `Datum` as Avro (key and value), using supplied column names and types.
pub fn encode_datums_as_avro<'a, I>(
datums: I,
names_types: &[(ColumnName, ColumnType)],
key_indices: Option<&[usize]>,
) -> (Option<Value>, Value)
where
I: IntoIterator<Item = Datum<'a>>,
{
let value_fields: Vec<(String, Value)> = names_types
.iter()
.zip_eq(datums)
.map(|((name, typ), datum)| {
let name = name.as_str().to_owned();
use mz_avro::types::ToAvro;
(name, TypedDatum::new(datum, typ.clone()).avro())
})
.collect();
let k = key_indices.map(|key_indices| {
let key_fields = key_indices
.iter()
.map(|&idx| value_fields[idx].clone())
.collect();
Value::Record(key_fields)
});
let v = Value::Record(value_fields);
(k, v)
}
/// Bundled information sufficient to encode as Avro.
#[derive(Debug)]
pub struct TypedDatum<'a> {
datum: Datum<'a>,
typ: ColumnType,
}
impl<'a> TypedDatum<'a> {
/// Pairs a datum and its type, for Avro encoding.
pub fn new(datum: Datum<'a>, typ: ColumnType) -> Self {
Self { datum, typ }
}
}
impl<'a> mz_avro::types::ToAvro for TypedDatum<'a> {
fn avro(self) -> Value {
let TypedDatum { datum, typ } = self;
if typ.nullable && datum.is_null() {
Value::Union {
index: 0,
inner: Box::new(Value::Null),
n_variants: 2,
null_variant: Some(0),
}
} else {
let mut val = match &typ.scalar_type {
ScalarType::Bool => Value::Boolean(datum.unwrap_bool()),
ScalarType::Int32 | ScalarType::Oid => Value::Int(datum.unwrap_int32()),
ScalarType::Int64 => Value::Long(datum.unwrap_int64()),
ScalarType::Float32 => Value::Float(datum.unwrap_float32()),
ScalarType::Float64 => Value::Double(datum.unwrap_float64()),
ScalarType::Decimal(p, s) => Value::Decimal(DecimalValue {
unscaled: datum.unwrap_decimal().as_i128().to_be_bytes().to_vec(),
precision: (*p).into(),
scale: (*s).into(),
}),
ScalarType::Date => Value::Date(datum.unwrap_date()),
ScalarType::Time => Value::Long({
let time = datum.unwrap_time();
(time.num_seconds_from_midnight() * 1_000_000) as i64
+ (time.nanosecond() as i64) / 1_000
}),
ScalarType::Timestamp => Value::Timestamp(datum.unwrap_timestamp()),
ScalarType::TimestampTz => Value::Timestamp(datum.unwrap_timestamptz().naive_utc()),
// This feature isn't actually supported by the Avro Java
// client (https://issues.apache.org/jira/browse/AVRO-2123),
// so no one is likely to be using it, so we're just using
// our own very convenient format.
ScalarType::Interval => Value::Fixed(20, {
let iv = datum.unwrap_interval();
let mut buf = Vec::with_capacity(24);
buf.extend(&iv.months.to_le_bytes());
buf.extend(&iv.duration.to_le_bytes());
debug_assert_eq!(buf.len(), 20);
buf
}),
ScalarType::Bytes => Value::Bytes(Vec::from(datum.unwrap_bytes())),
ScalarType::String => Value::String(datum.unwrap_str().to_owned()),
ScalarType::Jsonb => Value::Json(JsonbRef::from_datum(datum).to_serde_json()),
ScalarType::Uuid => Value::Uuid(datum.unwrap_uuid()),
ScalarType::Array(_t) => unimplemented!("array types"),
ScalarType::List(_t) => unimplemented!("list types"),
ScalarType::Record { .. } => unimplemented!("record types"),
ScalarType::Map { .. } => unimplemented!("map types"),
};
if typ.nullable {
val = Value::Union {
index: 1,
inner: Box::new(val),
n_variants: 2,
null_variant: Some(0),
};
}
val
}
}
}
struct SchemaCache {
cache: HashMap<i32, Result<Schema, AvroError>>,
ccsr_client: ccsr::Client,
reader_fingerprint: SchemaFingerprint,
}
impl SchemaCache {
fn new(
schema_registry: ccsr::ClientConfig,
reader_fingerprint: SchemaFingerprint,
) -> SchemaCache {
SchemaCache {
cache: HashMap::new(),
ccsr_client: schema_registry.build(),
reader_fingerprint,
}
}
/// Looks up the writer schema for ID. If the schema is literally identical
/// to the reader schema, as determined by the reader schema fingerprint
/// that this schema cache was initialized with, returns the schema directly.
/// If not, performs schema resolution on the reader and writer and
/// returns the result.
async fn get(&mut self, id: i32, reader_schema: &Schema) -> anyhow::Result<&Schema> {
let entry = match self.cache.entry(id) {
Entry::Occupied(o) => o.into_mut(),
Entry::Vacant(v) => {
// An issue with _fetching_ the schema should be returned
// immediately, and not cached, since it might get better on the
// next retry.
// TODO - some sort of exponential backoff or similar logic
let response = self.ccsr_client.get_schema_by_id(id).await?;
// Now, we've gotten some json back, so we want to cache it (regardless of whether it's a valid
// avro schema, it won't change).
//
// However, we can't just cache it directly, since resolving schemas takes significant CPU work,
// which we don't want to repeat for every record. So, parse and resolve it, and cache the
// result (whether schema or error).
let rf = &self.reader_fingerprint.bytes;
let result = Schema::parse_str(&response.raw).and_then(|schema| {
if &schema.fingerprint::<Sha256>().bytes == rf {
Ok(schema)
} else {
// the writer schema differs from the reader schema,
// so we need to perform schema resolution.
let resolved = resolve_schemas(&schema, reader_schema)?;
Ok(resolved)
}
});
v.insert(result)
}
};
entry.as_ref().map_err(|e| anyhow::Error::new(e.clone()))
}
}
/// Builds the JSON for the row schema, which can be independently useful.
fn build_row_schema_json(
columns: &[(ColumnName, ColumnType)],
name: &str,
) -> serde_json::value::Value {
let mut fields = Vec::new();
for (name, typ) in columns.iter() {
let mut field_type = match &typ.scalar_type {
ScalarType::Bool => json!("boolean"),
ScalarType::Int32 | ScalarType::Oid => json!("int"),
ScalarType::Int64 => json!("long"),
ScalarType::Float32 => json!("float"),
ScalarType::Float64 => json!("double"),
ScalarType::Decimal(p, s) => json!({
"type": "bytes",
"logicalType": "decimal",
"precision": p,
"scale": s,
}),
ScalarType::Date => json!({
"type": "int",
"logicalType": "date",
}),
ScalarType::Time => json!({
"type": "long",
"logicalType": "time-micros",
}),
ScalarType::Timestamp | ScalarType::TimestampTz => json!({
"type": "long",
"logicalType": "timestamp-micros"
}),
ScalarType::Interval => json!({
"type": "fixed",
"size": 12,
"logicalType": "duration"
}),
ScalarType::Bytes => json!("bytes"),
ScalarType::String => json!("string"),
ScalarType::Jsonb => json!({
"type": "string",
"connect.name": "io.debezium.data.Json",
}),
ScalarType::Uuid => json!({
"type": "string",
"logicalType": "uuid",
}),
ScalarType::Array(_t) => unimplemented!("array types"),
ScalarType::List(_t) => unimplemented!("list types"),
ScalarType::Record { .. } => unimplemented!("record types"),
ScalarType::Map { .. } => unimplemented!("map types"),
};
if typ.nullable {
field_type = json!(["null", field_type]);
}
fields.push(json!({
"name": name,
"type": field_type,
}));
}
json!({
"type": "record",
"fields": fields,
"name": name
})
}
/// Logic for the Avro representation of the CDCv2 protocol.
pub mod cdc_v2 {
use mz_avro::schema::{FullName, SchemaNode};
use repr::{ColumnName, ColumnType, Diff, RelationDesc, Row, RowPacker, Timestamp};
use serde_json::json;
use super::build_row_schema_json;
use super::RowWrapper;
use anyhow::anyhow;
use avro_derive::AvroDecodeable;
use differential_dataflow::capture::{Message, Progress};
use mz_avro::schema::Schema;
use mz_avro::types::Value;
use mz_avro::{
define_unexpected,
error::{DecodeError, Error as AvroError},
ArrayAsVecDecoder, AvroDecode, AvroDecodeable, AvroDeserializer, AvroRead,
StatefulAvroDecodeable,
};
use std::{cell::RefCell, rc::Rc};
pub fn extract_data_columns<'a>(schema: &'a Schema) -> anyhow::Result<SchemaNode<'a>> {
let data_name = FullName::from_parts("data", Some("com.materialize.cdc"), "");
let data_schema = &schema
.try_lookup_name(&data_name)
.ok_or_else(|| anyhow!("record not found: {}", data_name))?
.piece;
Ok(SchemaNode {
root: &schema,
inner: data_schema,
name: None,
})
}
/// Collected state to encode update batches and progress statements.
#[derive(Debug)]
pub struct Encoder {
columns: Vec<(ColumnName, ColumnType)>,
schema: Schema,
}
impl Encoder {
/// Creates a new CDCv2 encoder from a relation description.
pub fn new(desc: RelationDesc) -> Self {
let columns = super::column_names_and_types(desc);
let row_schema = build_row_schema_json(&columns, "data");
let schema = build_schema(row_schema);
Self { columns, schema }
}
/// Encodes a batch of updates as an Avro value.
pub fn encode_updates(&self, updates: &[(Row, i64, i64)]) -> Value {
let mut enc_updates = Vec::new();
for (data, time, diff) in updates {
let enc_data = super::encode_datums_as_avro(data, &self.columns, None).1;
let enc_time = Value::Long(time.clone());
let enc_diff = Value::Long(diff.clone());
let mut enc_update = Vec::new();
enc_update.push(("data".to_string(), enc_data));
enc_update.push(("time".to_string(), enc_time));
enc_update.push(("diff".to_string(), enc_diff));
enc_updates.push(Value::Record(enc_update));
}
Value::Union {
index: 0,
inner: Box::new(Value::Array(enc_updates)),
n_variants: 2,
null_variant: None,
}
}
/// Encodes the contents of a progress statement as an Avro value.
pub fn encode_progress(
&self,
lower: &[i64],
upper: &[i64],
counts: &[(i64, i64)],
) -> Value {
let enc_lower = Value::Array(lower.iter().cloned().map(Value::Long).collect());
let enc_upper = Value::Array(upper.iter().cloned().map(Value::Long).collect());
let enc_counts = Value::Array(
counts
.iter()
.map(|(time, count)| {
Value::Record(vec![
("time".to_string(), Value::Long(time.clone())),
("count".to_string(), Value::Long(count.clone())),
])
})
.collect(),
);
let enc_progress = Value::Record(vec![
("lower".to_string(), enc_lower),
("upper".to_string(), enc_upper),
("counts".to_string(), enc_counts),
]);
Value::Union {
index: 1,
inner: Box::new(enc_progress),
n_variants: 2,
null_variant: None,
}
}
}
#[derive(AvroDecodeable)]
#[state_type(Rc<RefCell<RowPacker>>, Rc<RefCell<Vec<u8>>>)]
struct MyUpdate {
#[state_expr(self._STATE.0.clone(), self._STATE.1.clone())]
data: RowWrapper,
time: Timestamp,
diff: Diff,
}
#[derive(AvroDecodeable)]
struct Count {
time: Timestamp,
count: usize,
}
fn make_counts_decoder() -> impl AvroDecode<Out = Vec<(Timestamp, usize)>> {
ArrayAsVecDecoder::new(|| {
<Count as AvroDecodeable>::new_decoder().map_decoder(|ct| Ok((ct.time, ct.count)))
})
}
#[derive(AvroDecodeable)]
struct MyProgress {
lower: Vec<Timestamp>,
upper: Vec<Timestamp>,
#[decoder_factory(make_counts_decoder)]
counts: Vec<(Timestamp, usize)>,
}
impl AvroDecode for Decoder {
type Out = Message<Row, Timestamp, Diff>;
fn union_branch<'a, R: AvroRead, D: AvroDeserializer>(
self,
idx: usize,
_n_variants: usize,
_null_variant: Option<usize>,
deserializer: D,
r: &'a mut R,
) -> Result<Self::Out, AvroError> {
match idx {
0 => {
let packer = Rc::new(RefCell::new(RowPacker::new()));
let buf = Rc::new(RefCell::new(vec![]));
let d = ArrayAsVecDecoder::new(|| {
<MyUpdate as StatefulAvroDecodeable>::new_decoder((
packer.clone(),
buf.clone(),
))
.map_decoder(|update| Ok((update.data.0, update.time, update.diff)))
});
let updates = deserializer.deserialize(r, d)?;
Ok(Message::Updates(updates))
}
1 => {
let progress = deserializer
.deserialize(r, <MyProgress as AvroDecodeable>::new_decoder())?;
let progress = Progress {
lower: progress.lower,
upper: progress.upper,
counts: progress.counts,
};
Ok(Message::Progress(progress))
}
other => Err(DecodeError::Custom(format!(
"Unrecognized union variant in CDCv2 decoder: {}",
other
))
.into()),
}
}
define_unexpected! {
record, array, map, enum_variant, scalar, decimal, bytes, string, json, uuid, fixed
}
}
/// Collected state to decode update batches and progress statements.
#[derive(Debug)]
pub struct Decoder;
/// Construct the schema for the CDC V2 protocol.
pub fn build_schema(row_schema: serde_json::Value) -> Schema {
let updates_schema = json!({
"type": "array",
"items": {
"name" : "update",
"type" : "record",
"fields" : [
{
"name": "data",
"type": row_schema,
},
{
"name" : "time",
"type" : "long",
},
{
"name" : "diff",
"type" : "long",
},
],
},
});
let progress_schema = json!({
"name" : "progress",
"type" : "record",
"fields" : [
{
"name": "lower",
"type": {
"type": "array",
"items": "long"
}
},
{
"name": "upper",
"type": {
"type": "array",
"items": "long"
}
},
{
"name": "counts",
"type": {
"type": "array",
"items": {
"type": "record",
"name": "counts",
"fields": [
{
"name": "time",
"type": "long",
},
{
"name": "count",
"type": "long",
},
],
}
}
},
],
});
let message_schema = json!([updates_schema, progress_schema,]);
Schema::parse(&message_schema).expect("schema constrution failed")
}
#[cfg(test)]
mod tests {
use super::*;
use mz_avro::AvroDeserializer;
use mz_avro::GeneralDeserializer;
use repr::ScalarType;
#[test]
fn test_roundtrip() {
let desc = RelationDesc::empty()
.with_column("id", ScalarType::Int64.nullable(false))
.with_column("price", ScalarType::Float64.nullable(true));
let encoder = Encoder::new(desc.clone());
let row_schema =
build_row_schema_json(&crate::avro::column_names_and_types(desc), "data");
let schema = build_schema(row_schema);
let values = vec![
encoder.encode_updates(&[]),
encoder.encode_progress(&[0], &[3], &[]),
encoder.encode_progress(&[3], &[], &[]),
];
use mz_avro::encode::encode_to_vec;;
let mut values: Vec<_> = values
.into_iter()
.map(|v| encode_to_vec(&v, &schema))
.collect();
let g = GeneralDeserializer {
schema: schema.top_node(),
};
assert!(matches!(
g.deserialize(&mut &values.remove(0)[..], Decoder).unwrap(),
Message::Updates(_)
),);
assert!(matches!(
g.deserialize(&mut &values.remove(0)[..], Decoder).unwrap(),
Message::Progress(_)
),);
assert!(matches!(
g.deserialize(&mut &values.remove(0)[..], Decoder).unwrap(),
Message::Progress(_)
),);
}
}
}
#[cfg(test)]
mod tests {
use anyhow::Context;
use chrono::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, Utc};
use ordered_float::OrderedFloat;
use serde::Deserialize;
use std::fs::File;
use mz_avro::types::{DecimalValue, Value};
use repr::adt::decimal::Significand;
use repr::{Datum, RelationDesc};
use super::*;
#[derive(Deserialize)]
struct TestCase {
name: String,
input: serde_json::Value,
expected: Vec<(ColumnName, ColumnType)>,
}
#[test]
#[ignore] // TODO(benesch): update tests for diff envelope.
fn test_schema_parsing() -> anyhow::Result<()> {
let file = File::open("interchange/testdata/avro-schema.json")
.context("opening test data file")?;
let test_cases: Vec<TestCase> =
serde_json::from_reader(file).context("parsing JSON test data")?;
for tc in test_cases {
// Stringifying the JSON we just parsed is rather silly, but it
// avoids embedding JSON strings inside of JSON, which is hard on
// the eyes.
let schema = serde_json::to_string(&tc.input)?;
let output = super::validate_value_schema(&schema, EnvelopeType::Debezium)?;
assert_eq!(output, tc.expected, "failed test case name: {}", tc.name)
}
Ok(())
}
#[test]
/// Test that primitive Avro Schema types are allow Datums to be correctly
/// serialized into Avro Values.
///
/// Complete list of primitive types in test, also found in this
/// documentation:
/// https://avro.apache.org/docs/current/spec.html#schemas
fn test_diff_pair_to_avro_primitive_types() -> anyhow::Result<()> {
// Data to be used later in assertions.
let date = NaiveDate::from_ymd(2020, 1, 8);
let date_time = NaiveDateTime::new(date, NaiveTime::from_hms(1, 1, 1));
let bytes: Vec<u8> = vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1];
let string = String::from("test");
// Simple transformations from primitive Avro Schema types
// to Avro Values.
let valid_pairings = vec![
(ScalarType::Bool, Datum::True, Value::Boolean(true)),
(ScalarType::Bool, Datum::False, Value::Boolean(false)),
(ScalarType::Int32, Datum::Int32(1), Value::Int(1)),
(ScalarType::Int64, Datum::Int64(1), Value::Long(1)),
(
ScalarType::Float32,
Datum::Float32(OrderedFloat::from(1f32)),
Value::Float(1f32),
),
(
ScalarType::Float64,
Datum::Float64(OrderedFloat::from(1f64)),
Value::Double(1f64),
),
(ScalarType::Date, Datum::Date(date), Value::Date(date)),
(
ScalarType::Timestamp,
Datum::Timestamp(date_time),
Value::Timestamp(date_time),
),
(
ScalarType::TimestampTz,
Datum::TimestampTz(DateTime::from_utc(date_time, Utc)),
Value::Timestamp(date_time),
),
(
ScalarType::Decimal(1, 1),
Datum::Decimal(Significand::new(1i128)),
Value::Decimal(DecimalValue {
unscaled: bytes.clone(),
precision: 1,
scale: 1,
}),
),
(
ScalarType::Bytes,
Datum::Bytes(&bytes),
Value::Bytes(bytes.clone()),
),
(
ScalarType::String,
Datum::String(&string),
Value::String(string.clone()),
),
];
for (typ, datum, expected) in valid_pairings {
let desc = RelationDesc::empty().with_column("column1", typ.nullable(false));
let (_, avro_value) =
Encoder::new(desc, false, None).row_to_avro(std::iter::once(datum));
assert_eq!(
Value::Record(vec![("column1".into(), expected)]),
avro_value
);
}
Ok(())
}
}
| 37.026042 | 150 | 0.496334 |
d7c33d08e8336745110088031c07959098ce92c0 | 246 | pub mod renderer;
pub mod sdl2;
#[cfg(feature = "vulkan")]
pub mod vulkan;
/*
#[cfg(not(feature = "vulkan"))]
pub mod vulkan {
impl VulkanRenderer {
pub fn new(app_settings: &ApplicationSettings) -> Result<Self, String> {
}
}
*/
| 17.571429 | 80 | 0.634146 |
ff5806b7c6496cb900916a47901f5a640056b28e | 16,293 | use std::path::{Component, Path, PathBuf};
// This mod's errors need an individual namespace because the sub_module macro imports the
// module::errors into this scope which leads to name / type conflicts.
mod errors {
error_chain! {
errors {
FailedToLoadProfile {
description("Failed to load profile")
display("Failed to load profile")
}
NoLocalBaseDir {
description("No local base directory configured for this profile")
display("No local base directory configured for this profile")
}
FailedToFindResources {
description("Failed to find resources")
display("Failed to find resources")
}
FailedParseResourcesFromPath(path: String) {
description("Failed to parse resources from path")
display("Failed to parse resources from path '{}'", path)
}
FailedOutput {
description("Failed to output")
display("Failed to output")
}
FailedToParseDuration {
description("Failed to parse duration")
display("Failed to parse duration")
}
FailedToParseOutputType {
description("Failed to parse output type")
display("Failed to parse output type")
}
FailedToBuildCommand {
description("Failed to build command")
display("Failed to build command")
}
FailedToRunCommand {
description("Failed to run command")
display("Failed to run command")
}
}
}
}
#[derive(Debug, Serialize)]
pub struct Resource {
pub project: String,
pub name: String,
}
impl Resource {
fn to_path<T: AsRef<Path>>(&self, base_dir: T, resources_prefix: T) -> PathBuf {
let mut p: PathBuf = base_dir.as_ref().to_path_buf();
p.push(&self.project);
p.push(resources_prefix);
p.push(&self.name);
p
}
/// This function assumes that it gets a relative path starting with the project directory
///
/// Example: "logimon/packer/resources/elk_elasticsearch/" instead of "/Users/lukas/Documents/src/ceres/tests/base_dir/logimon/packer/resources/elk_elasticsearch"
pub fn from_path<P: AsRef<Path>>(path: P) -> errors::Result<Self> {
let path: &Path = path.as_ref();
let components: Vec<_> = path.components().collect();
match components.as_slice() {
[Component::Normal(project), _, _, Component::Normal(resource)] =>
Ok( Resource {
project: project.to_string_lossy().to_string(),
name: resource.to_string_lossy().to_string(),
} ),
_ => Err(errors::Error::from_kind(errors::ErrorKind::FailedParseResourcesFromPath(path.to_string_lossy().to_string()))),
}
}
}
macro_rules! list_resources {
($description:tt,$resources_prefix:tt) => {
use clap::{App, Arg, ArgMatches, SubCommand};
use ignore::WalkBuilder;
use std::path::{Path, PathBuf};
use config::CeresConfig as Config;
use modules::{Result as ModuleResult, Error as ModuleError, ErrorKind as ModuleErrorKind, Module};
use modules::infrastructure::Resource;
use modules::infrastructure::errors::*;
use output::OutputType;
use output::infrastructure::{JsonOutputResourceListResult, OutputResourceListResult, PlainOutputResourceListResult, TableOutputResourceListResult};
use run_config::RunConfig;
pub const NAME: &str = "list";
pub struct SubModule;
impl Module for SubModule {
fn build_sub_cli() -> App<'static, 'static> {
SubCommand::with_name(NAME)
.about($description)
.arg(
Arg::with_name("base-dir")
.long("base-dir")
.takes_value(true)
.help("Overwrites base dir from ceres configuration file"),
)
.arg(
Arg::with_name("output")
.long("output")
.short("o")
.takes_value(true)
.default_value("human")
.possible_values(&["human", "json", "plain"])
.help("Selects output format"),
)
}
fn call(cli_args: Option<&ArgMatches>, run_config: &RunConfig, config: &Config) -> ModuleResult<()> {
let args = cli_args.unwrap(); // Safe unwrap
do_call(args, run_config, config)
.map_err(|e| ModuleError::with_chain(e, ModuleErrorKind::ModuleFailed(NAME.to_owned())))
}
}
#[allow(unstable_name_collisions)] // flatten from itertools
fn do_call(args: &ArgMatches, run_config: &RunConfig, config: &Config) -> Result<()> {
let profile = match run_config.active_profile.as_ref() {
"default" => config.get_default_profile(),
s => config.get_profile(s),
}.chain_err(|| ErrorKind::FailedToLoadProfile)?;
let local_base_dir = if let Some(base_dir) = args.value_of("base-dir") {
base_dir
} else {
profile.local_base_dir.as_ref()
.ok_or(Error::from_kind(ErrorKind::NoLocalBaseDir))?
};
let asps: Result<Vec<_>> = find_resources(local_base_dir)?
.iter()
.flat_map(|x| x.strip_prefix(local_base_dir))
.map(|x| Resource::from_path(x))
.collect();
let asps = asps?;
info!("Outputting resource list");
output_list(args, run_config, config, &asps)?;
Ok(())
}
fn find_resources<P: AsRef<Path>>(base_dir: P) -> Result<Vec<PathBuf>> {
let walker = WalkBuilder::new(base_dir).build();
walker
.filter(|x| // Does the path point to a Makefile?
x.is_ok() &&
x.as_ref().unwrap().path().ends_with("Makefile")
)
.filter(|x| // Does the path to the Makefile contain the _resources prefix_
x.as_ref().unwrap().path().parent().is_some() && // Safe see above
x.as_ref().unwrap().path().parent().unwrap().to_string_lossy().contains($resources_prefix)
)
.map(|x|
x
.map(|d| d.path().parent().unwrap().to_path_buf()) // Safe
.map_err(|e| Error::with_chain(e, ErrorKind::FailedToFindResources)))
.filter(|x| { // Does the parent directory contain a file "project.cfg"
if let Ok(x) = x {
let mut p = x.clone();
p.pop(); // Will be true since at least two parents are guaranteed; see above.
p.push("project.cfg");
p.exists() && p.is_file()
} else {
false
}
})
.collect()
}
fn output_list(
args: &ArgMatches,
_: &RunConfig,
_: &Config,
resources: &[Resource],
) -> Result<()> {
let output_type = args.value_of("output").unwrap() // Safe
.parse::<OutputType>()
.chain_err(|| ErrorKind::FailedToParseOutputType)?;
let mut stdout = ::std::io::stdout();
match output_type {
OutputType::Human => {
let output = TableOutputResourceListResult;
output
.output(&mut stdout, resources)
.chain_err(|| ErrorKind::FailedOutput)
},
OutputType::Json => {
let output = JsonOutputResourceListResult;
output
.output(&mut stdout, resources)
.chain_err(|| ErrorKind::FailedOutput)
},
OutputType::Plain => {
let output = PlainOutputResourceListResult;
output
.output(&mut stdout, resources)
.chain_err(|| ErrorKind::FailedOutput)
},
}
}
}
}
macro_rules! build_resource {
($description:tt,$resources_prefix:tt,$($command:tt),+) => {
use clap::{App, Arg, ArgMatches, SubCommand};
use itertools::Itertools;
use std::path::Path;
use std::time::Duration;
use config::{CeresConfig as Config};
use modules::{Result as ModuleResult, Error as ModuleError, ErrorKind as ModuleErrorKind, Module};
use modules::infrastructure::Resource;
use modules::infrastructure::errors::*;
use output::OutputType;
use run_config::RunConfig;
use tempfile;
use utils::command::{Command, CommandResult};
use utils::run;
pub const NAME: &str = "build";
const COMMANDS: &'static [&'static str] = &[
$($command,)*
];
pub struct SubModule;
impl Module for SubModule {
fn build_sub_cli() -> App<'static, 'static> {
SubCommand::with_name(NAME)
.about($description)
.arg(
Arg::with_name("base-dir")
.long("base-dir")
.takes_value(true)
.help("Overwrites base dir from ceres configuration file"),
)
.arg(
Arg::with_name("project")
.long("project")
.short("p")
.takes_value(true)
.required(true)
.help("Sets project"),
)
.arg(
Arg::with_name("resource")
.long("resource")
.short("r")
.takes_value(true)
.required(true)
.help("Sets resource to build"),
)
.arg(
Arg::with_name("no-progress-bar")
.long("no-progress-bar")
.help("Do not show progressbar during command execution"),
)
.arg(
Arg::with_name("output")
.long("output")
.short("o")
.takes_value(true)
.default_value("human")
.possible_values(&["human", "json"])
.help("Selects output format"),
)
.arg(
Arg::with_name("show-all")
.long("show-all")
.help("Show all command results; by default show only results of failed commands"),
)
.arg(
Arg::with_name("timeout")
.long("timeout")
.takes_value(true)
.default_value("300")
.help("Timeout in sec for command to finish"),
)
}
fn call(cli_args: Option<&ArgMatches>, run_config: &RunConfig, config: &Config) -> ModuleResult<()> {
let args = cli_args.unwrap(); // Safe unwrap
do_call(args, run_config, config)
.map_err(|e| ModuleError::with_chain(e, ModuleErrorKind::ModuleFailed(NAME.to_owned())))
}
}
fn do_call(args: &ArgMatches, run_config: &RunConfig, config: &Config) -> Result<()> {
let profile = match run_config.active_profile.as_ref() {
"default" => config.get_default_profile(),
s => config.get_profile(s),
}.chain_err(|| ErrorKind::FailedToLoadProfile)?;
// Parse my args
let local_base_dir = if let Some(base_dir) = args.value_of("base-dir") {
base_dir
} else {
profile.local_base_dir.as_ref()
.ok_or(Error::from_kind(ErrorKind::NoLocalBaseDir))?
};
let resource = Resource {
project: args.value_of("project").unwrap().to_string(), // Safe
name: args.value_of("resource").unwrap().to_string(), // Safe
};
debug!("Resource path is = '{:#?}'", resource.to_path(local_base_dir, $resources_prefix));
let timeout = Duration::from_secs(
args.value_of("timeout").unwrap() // safe unwrap
.parse()
.chain_err(|| ErrorKind::FailedToParseDuration)?
);
let progress_bar = !args.is_present("no-progress-bar");
let show_all = args.is_present("show-all");
let output_type = args.value_of("output").unwrap() // Safe
.parse::<OutputType>()
.chain_err(|| ErrorKind::FailedToParseOutputType)?;
debug!("Building commands.");
let commands: Result<Vec<_>> = COMMANDS.iter()
.map(|c| {
let cwd = &resource.to_path(local_base_dir, $resources_prefix);
build_command(c, cwd, timeout)
})
.collect();
let commands = commands?;
debug!("Running commands.");
let results = run_commands(commands, progress_bar)?;
debug!("Outputting results.");
run::output_results(output_type, show_all, results.as_slice())
.chain_err(|| ErrorKind::FailedToRunCommand)?;
Ok(())
}
fn build_command<T: AsRef<Path>>(command: &str, cwd: T, timeout: Duration) -> Result<Command> {
let id = command.to_string();
let mut command_args: Vec<_> = command.split(' ').map(|x| x.to_string()).collect();
if command_args.len() == 0 {
return Err(Error::from_kind(ErrorKind::FailedToBuildCommand));
}
let cmd = command_args.remove(0);
let args = if command_args.len() > 0 {
Some(command_args)
} else {
None
};
let cwd = cwd.as_ref().to_str().map(|x| x.to_string());
let log_path = tempfile::NamedTempFile::new()
.chain_err(|| ErrorKind::FailedToBuildCommand)?
.path().to_path_buf();
let c = Command {
id,
cmd,
args,
cwd,
log: log_path,
timeout: Some(timeout),
};
Ok(c)
}
#[allow(unstable_name_collisions)] // flatten from itertools
fn run_commands(commands: Vec<Command>, progress_bar: bool) -> Result<Vec<CommandResult>> {
let mut results = Vec::new();
for c in commands.into_iter() {
let mut res = run::run(vec![c], progress_bar)
.chain_err(|| ErrorKind::FailedToRunCommand)?;
if res.iter().filter(|x| !x.exit_status.success()).count() > 0 {
results.push(res);
break;
} else {
results.push(res);
}
}
let results: Vec<_> = Itertools::flatten(results.into_iter()).collect();
Ok(results)
}
}
}
sub_module!("infrastructure", "Do stuff with infrastructure repos", asp, images, resources);
| 39.355072 | 166 | 0.487817 |
f7b81778e2c4ae02a686355aacc99a084b6451eb | 502 | use super::vtable;
use proc_macro2::{Ident, TokenStream as HelperTokenStream};
use quote::{format_ident, quote};
pub fn generate(interface_ident: &Ident) -> HelperTokenStream {
let vptr_ident = ident(&interface_ident.to_string());
let vtable_ident = vtable::ident(&interface_ident.to_string());
quote!(
#[allow(missing_docs)]
pub type #vptr_ident = *const #vtable_ident;
)
}
pub fn ident(interface_name: &str) -> Ident {
format_ident!("{}VPtr", interface_name)
}
| 27.888889 | 67 | 0.693227 |
fc8e1d24c0f23667fdf6f11886d90a66028e238a | 1,813 | use prettytable::{cell, row, Row};
#[derive(Debug)]
pub struct Playlist {
name: String,
url: String,
published: String,
video_count: String,
owner: String,
}
impl Playlist {
pub fn new(
name: String,
url: String,
published: String,
video_count: String,
owner: String,
) -> Self {
Playlist {
name,
url,
published,
video_count,
owner,
}
}
pub fn url(&self) -> &str {
&self.url
}
pub fn to_string(&self) -> String {
format!(
"{:<100} {:<60} {:<30} {:<10}",
self.name, self.owner, self.published, self.video_count
)
}
pub fn to_row(&self, i: usize) -> Row {
row![
format!("P{}", i),
self.name,
self.owner,
self.published,
self.video_count
]
}
}
#[derive(Debug)]
pub struct Video {
name: String,
length: String,
url: String,
owner: String,
published: String,
}
impl Video {
pub fn new(
name: String,
length: String,
url: String,
owner: String,
published: String,
) -> Self {
Video {
name,
length,
url,
owner,
published,
}
}
pub fn url(&self) -> &str {
&self.url
}
pub fn to_string(&self) -> String {
format!(
"{:<100} {:<60} {:<30} {:<10}",
self.name, self.owner, self.published, self.length
)
}
pub fn to_row(&self, i: usize) -> Row {
row![
format!("V{}", i),
self.name,
self.owner,
self.published,
self.length
]
}
}
| 18.885417 | 67 | 0.437948 |
560904fb9ec443ed522ecaf9e8a9c0721e2d47d0 | 793 | use super::{input::Input, ComputerError, ComputerInput};
use std::{cell::RefCell, collections::VecDeque, rc::Rc};
#[derive(Debug, Clone)]
pub struct ListInput {
list: Rc<RefCell<VecDeque<i64>>>,
}
impl ListInput {
pub fn new() -> Self {
Self {
list: Rc::default(),
}
}
pub fn provide_single(&mut self, input: i64) {
(*self.list.borrow_mut()).push_back(input)
}
pub fn provide<I: Input>(&mut self, input: I) -> Result<(), ComputerError> {
Ok((*self.list.borrow_mut()).extend(input.get_data()?))
}
pub fn clear(&mut self) {
(*self.list.borrow_mut()).clear()
}
}
impl ComputerInput for ListInput {
fn get_next_input(&mut self) -> Option<i64> {
(*self.list.borrow_mut()).pop_front()
}
}
| 23.323529 | 80 | 0.588903 |
f9f4ccf0f07519e9a0e7a2fe1286de1cb4d184b3 | 870 | #[no_std];
struct S<T> {
contents: T,
}
impl<T> S<T> {
fn new<U>(x: T, _: U) -> S<T> {
S {
contents: x,
}
}
}
trait Trait<T> {
fn new<U>(x: T, y: U) -> Self;
}
struct S2 {
contents: int,
}
impl Trait<int> for S2 {
fn new<U>(x: int, _: U) -> S2 {
S2 {
contents: x,
}
}
}
fn main() {
let _ = S::new::<int,f64>(1, 1.0); //~ ERROR the impl referenced by this path has 1 type parameter, but 0 type parameters were supplied
let _ = S::<'self,int>::new::<f64>(1, 1.0); //~ ERROR this impl has no lifetime parameter
let _: S2 = Trait::new::<int,f64>(1, 1.0); //~ ERROR the trait referenced by this path has 1 type parameter, but 0 type parameters were supplied
let _: S2 = Trait::<'self,int>::new::<f64>(1, 1.0); //~ ERROR this trait has no lifetime parameter
}
| 22.894737 | 151 | 0.531034 |
c16e4857409671d923a5bba2902ab83a9a9832fe | 1,334 | use std::io;
fn main() {
let mut line = String::new();
io::stdin().read_line(&mut line)
.expect("Could not read line");
let splitted_line = line.trim().split_whitespace();
let rhyme_number = splitted_line.count();
line = String::new();
io::stdin().read_line(&mut line)
.expect("Could not read line");
let number_of_people = line.trim().parse::<usize>().unwrap();
let mut names: Vec<String> = Vec::with_capacity(number_of_people);
for _ in 0..number_of_people {
let mut line = String::new();
io::stdin().read_line(&mut line)
.expect("Could not read line");
names.push(line.trim().to_string());
}
let mut team1 = Vec::with_capacity((number_of_people / 2) + 1);
let mut team2 = Vec::with_capacity(number_of_people / 2);
let mut last_index = 0;
for i in 0..names.len() {
let next_index = (last_index + (rhyme_number - 1)) % names.len();
last_index = next_index;
let name = names.remove(next_index);
if i % 2 == 0 {
team1.push(name);
} else {
team2.push(name);
}
}
println!("{}", team1.len());
for name in team1 {
println!("{}", name);
}
println!("{}", team2.len());
for name in team2 {
println!("{}", name);
}
} | 31.023256 | 73 | 0.558471 |
ccc5128d58eb8b8a7ce3abe6ecf923e79f7d6057 | 12,239 | use crate::model::ParsingContext;
use crate::pb::*;
use tract_hir::internal::*;
use tract_hir::ops;
pub fn rnn(
_ctx: &ParsingContext,
pb: &NodeProto,
) -> TractResult<(Box<dyn InferenceOp>, Vec<String>)> {
let mut rnn = RNN::default();
let mut options = crate::model::optional_inputs(pb).skip(3);
rnn.optional_bias_input = options.next().unwrap();
rnn.optional_sequence_lens_input = options.next().unwrap();
rnn.optional_initial_h_input = options.next().unwrap();
let mut options = crate::model::optional_outputs(pb);
rnn.optional_y_output = options.next().unwrap();
rnn.optional_y_h_output = options.next().unwrap();
Ok((expand(rnn), vec![]))
}
#[derive(Debug, Clone, new, Hash)]
pub struct RNN {
pub optional_bias_input: Option<usize>,
pub optional_sequence_lens_input: Option<usize>,
pub optional_initial_h_input: Option<usize>,
pub optional_y_output: Option<usize>,
pub optional_y_h_output: Option<usize>,
pub fore: Box<dyn TypedOp>,
pub back: Box<dyn TypedOp>,
}
tract_linalg::impl_dyn_hash!(RNN);
impl Default for RNN {
fn default() -> RNN {
RNN {
optional_bias_input: None,
optional_sequence_lens_input: None,
optional_initial_h_input: None,
optional_y_output: None,
optional_y_h_output: None,
fore: Box::new(ops::math::tanh()),
back: Box::new(ops::math::tanh()),
}
}
}
impl Expansion for RNN {
fn name(&self) -> Cow<str> {
"RNN".into()
}
fn validation(&self) -> Validation {
Validation::Rounding
}
op_onnx!();
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
s: &mut Solver<'r>,
inputs: &'p [TensorProxy],
outputs: &'p [TensorProxy],
) -> TractResult<()> {
let input_count = 3
+ self.optional_bias_input.is_some() as usize
+ self.optional_sequence_lens_input.is_some() as usize
+ self.optional_initial_h_input.is_some() as usize;
check_input_arity(&inputs, input_count)?;
let output_count =
self.optional_y_output.is_some() as usize + self.optional_y_h_output.is_some() as usize;
check_output_arity(&outputs, output_count)?;
s.equals(&inputs[0].datum_type, &inputs[1].datum_type)?;
s.equals(&inputs[0].datum_type, &inputs[2].datum_type)?;
s.equals(&inputs[0].datum_type, &outputs[0].datum_type)?;
s.equals(&inputs[0].rank, 3)?;
s.equals(&inputs[1].rank, 3)?;
s.equals(&inputs[2].rank, 3)?;
s.equals(&inputs[1].shape[0], &inputs[2].shape[0])?; // num_directions
s.equals(&inputs[1].shape[1], &inputs[2].shape[1])?; // hidden_size
s.equals(&inputs[1].shape[1], &inputs[2].shape[2])?; // hidden_size
if let Some(bias) = self.optional_bias_input {
s.equals(&inputs[bias].datum_type, &inputs[0].datum_type)?;
s.equals(&inputs[bias].rank, 2)?;
s.equals(&inputs[bias].shape[0], &inputs[2].shape[0])?; // num_directions
s.equals(&inputs[bias].shape[1], 2 * inputs[2].shape[2].bex())?; // 2 * hidden_size
}
if let Some(seq_len) = self.optional_sequence_lens_input {
s.equals(&inputs[seq_len].rank, 1)?;
s.equals(&inputs[seq_len].shape[0], &inputs[0].shape[1])?; // batch_size
}
if let Some(initial_h) = self.optional_initial_h_input {
s.equals(&inputs[initial_h].datum_type, &inputs[0].datum_type)?;
s.equals(&inputs[initial_h].rank, 3)?;
s.equals(&inputs[initial_h].shape[0], &inputs[1].shape[0])?; // num_directions
s.equals(&inputs[initial_h].shape[1], &inputs[0].shape[1])?; // batch_size
s.equals(&inputs[initial_h].shape[2], &inputs[2].shape[2])?; // hidden_size
}
if let Some(y) = self.optional_y_output {
s.equals(&outputs[y].datum_type, &inputs[0].datum_type)?;
s.equals(&outputs[y].rank, 4)?;
s.equals(&outputs[y].shape[0], &inputs[0].shape[0])?; // seq_lenght
s.equals(&outputs[y].shape[1], &inputs[1].shape[0])?; // num_directions
s.equals(&outputs[y].shape[2], &inputs[0].shape[1])?; // batch_size
s.equals(&outputs[y].shape[3], &inputs[2].shape[2])?; // hidden_size
}
if let Some(y_h) = self.optional_y_h_output {
s.equals(&outputs[y_h].datum_type, &inputs[0].datum_type)?;
s.equals(&outputs[y_h].rank, 3)?;
s.equals(&outputs[y_h].shape[0], &inputs[1].shape[0])?; // num_directions
s.equals(&outputs[y_h].shape[1], &inputs[0].shape[1])?; // batch_size
s.equals(&outputs[y_h].shape[2], &inputs[2].shape[2])?; // hidden_size
}
Ok(())
}
fn nboutputs(&self) -> TractResult<usize> {
Ok(self.optional_y_output.is_some() as usize + self.optional_y_h_output.is_some() as usize)
}
fn wire(
&self,
prefix: &str,
target: &mut TypedModel,
inputs: &[OutletId],
) -> TractResult<TVec<OutletId>> {
use tract_hir::tract_core::ops::array::TypedConcat;
let fore = self.wire_one_side(prefix, target, inputs, 0)?;
let w_fact = target.outlet_fact(inputs[1])?;
if w_fact.shape.dim(0) == 2.into() {
let back = self.wire_one_side(&format!("{}.back", prefix), target, inputs, 1)?;
let mut outputs = tvec!(0.into(); self.nboutputs()?);
if let Some(ix) = self.optional_y_output {
outputs[ix] = target.wire_node(
format!("{}.merge_y_output", prefix),
TypedConcat::concat_vars(1, 2),
&[fore[ix], back[ix]],
)?[0];
}
if let Some(ix) = self.optional_y_h_output {
outputs[ix] = target.wire_node(
format!("{}.merge_y_h_output", prefix),
TypedConcat::concat_vars(0, 2),
&[fore[ix], back[ix]],
)?[0];
}
Ok(outputs)
} else {
Ok(fore)
}
}
}
impl RNN {
#[allow(non_snake_case)]
fn wire_one_side(
&self,
prefix: &str,
target: &mut TypedModel,
inputs: &[OutletId],
dir: usize,
) -> TractResult<TVec<OutletId>> {
use tract_hir::ops::{array, math, matmul, scan};
let x_fact = target.outlet_fact(inputs[0])?.clone();
let r_fact = target.outlet_fact(inputs[2])?;
let b_size = x_fact.shape.dim(1).to_integer().unwrap() as usize;
let h_size = r_fact.shape.dim(2).to_integer().unwrap() as usize;
let chunk = if dir == 0 { 1 } else { -1 };
let mut body = TypedModel::default();
let mut outer_inputs = vec![];
let mut input_mapping = vec![];
macro_rules! target_wire {
($name: ident = $op: expr, $($param: expr),*) => {
let $name = target.wire_node(
format!("{}.{}", prefix, stringify!($name)),
$op, [$($param),*].as_ref())?[0];
}
};
macro_rules! wire {
($name: ident = $op: expr, $($param: expr),*) => {
let $name = body.wire_node(
format!("{}.{}", prefix, stringify!($name)),
$op, [$($param),*].as_ref())?[0];
}
};
// X: onnx interface: [seq_length, batch_size, input_size]
// scan outer interface: idem
// scann inner interface: [chunk=1, batch_size, input_size]
// onnx inner interface: [batch_size, input_size]
outer_inputs.push(inputs[0]);
input_mapping.push(scan::InputMapping::Scan { slot: 0, axis: 0, chunk });
let mut x_source_fact = x_fact.clone();
x_source_fact.shape.set_dim(0, 1.to_dim())?;
let x_source = body.add_source("x_source", x_source_fact)?.into();
wire!(Xt = AxisOp::Rm(0), x_source);
// W: onnx interface: [num_directions, 3*hidden_size, input_size]
// scan interfaces: [3*hidden_size, input_size]
target_wire!(w = AxisOp::Rm(0), inputs[1]);
outer_inputs.push(w);
input_mapping.push(scan::InputMapping::Full { slot: 1 });
let W = body.add_source("w", target.outlet_fact(w)?.clone())?.into();
// R: onnx interface: [num_directions, 3*hidden_size, hidden_size]
// scan interfaces: [3*hidden_size, hidden_size]
target_wire!(r_dir = array::Slice::new(0, dir, dir + 1), inputs[2]);
target_wire!(r = AxisOp::Rm(0), r_dir);
outer_inputs.push(r);
input_mapping.push(scan::InputMapping::Full { slot: 2 });
let R = body.add_source("r", target.outlet_fact(r)?.clone())?.into();
// B: onnx interface: [num_directions, 6*hidden_size]
let b = if let Some(slot) = self.optional_bias_input {
target_wire!(b_dir = array::Slice::new(0, dir, dir + 1), inputs[slot]);
outer_inputs.push(b_dir);
input_mapping.push(scan::InputMapping::Full { slot });
let b = body.add_source("b", target.outlet_fact(b_dir)?.clone())?.into();
Some(b)
} else {
None
};
if let Some(slot) = self.optional_sequence_lens_input {
outer_inputs.push(inputs[slot]);
}
// initial h, optional: onnx: [num_directions, batch_size, hidden_size]
// scan outer: [chunk=1, batch_size, hidden_size]
// scan inner: [chunk=1, batch_size, hidden_size]
// onnx inner: [batch_size, hidden_size]
let initializer = if let Some(initial_h_input) = self.optional_initial_h_input {
target_wire!(h = AxisOp::Rm(0), inputs[initial_h_input]);
target_wire!(h_chunk = AxisOp::Add(0), h);
outer_inputs.push(h_chunk);
scan::StateInitializer::FromInput(initial_h_input)
} else {
scan::StateInitializer::Value(
tract_ndarray::Array3::<f32>::zeros((1, b_size, h_size)).into_arc_tensor(),
)
};
input_mapping.push(scan::InputMapping::State { initializer });
let h_source = body
.add_source(
"h_source",
TypedFact::dt_shape(x_fact.datum_type, [1, b_size, h_size].as_ref())?,
)?
.into();
wire!(Ht_1 = AxisOp::Rm(0), h_source);
let bias = if let Some(b) = b {
wire!(Wbi = array::Slice::new(1, 0 * h_size, 1 * h_size), b);
wire!(Rbi = array::Slice::new(1, 1 * h_size, 2 * h_size), b);
wire!(bi = math::add::bin_typed(), Wbi, Rbi);
Some(bi)
} else {
None
};
// Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)
wire!(Xt_WiT = matmul::MatMul::default().with_b_trans(true), Xt, W);
wire!(Ht_1_RiT = matmul::MatMul::default().with_b_trans(true), Ht_1, R);
wire!(ht0 = math::add::bin_typed(), Xt_WiT, Ht_1_RiT);
let mut ht0 = ht0;
if let Some(bias) = bias {
wire!(ht_bias = math::add::bin_typed(), ht0, bias);
ht0 = ht_bias;
}
wire!(Ht = self.fore.clone(), ht0);
wire!(y_h = AxisOp::Add(0), Ht);
body.set_output_outlets(&[y_h])?;
let output_mapping = scan::OutputMapping {
state: true,
axis: 0,
chunk,
full_dim_hint: None,
last_value_slot: self.optional_y_h_output,
full_slot: self.optional_y_output,
};
let scan_outputs = target.wire_node(
prefix,
scan::Scan::new(
body,
input_mapping,
vec![output_mapping],
self.optional_sequence_lens_input,
)?,
&outer_inputs,
)?;
let mut result = tvec!();
if let Some(slot) = self.optional_y_output {
target_wire!(y = AxisOp::Add(1), scan_outputs[slot]);
result.push(y);
}
if let Some(slot) = self.optional_y_h_output {
result.push(scan_outputs[slot]);
}
Ok(result)
}
}
| 38.608833 | 100 | 0.553558 |
48e27149ce07b6f729de513ece7a341b53ebb2c3 | 29,481 | pub mod net;
pub mod proto;
use self::net::*;
use super::ConvergenceLayerAgent;
use async_trait::async_trait;
use bp7::{Bundle, ByteBuffer};
use futures::lock::Mutex;
//use futures_util::stream::StreamExt;
use log::{debug, error, info, warn};
use std::collections::HashMap;
use std::convert::TryFrom;
use std::net::SocketAddr;
use std::ops::DerefMut;
use std::sync::Arc;
use tokio::net::tcp::{OwnedReadHalf, OwnedWriteHalf};
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio::task::JoinHandle;
use tokio::time;
//use std::net::TcpStream;
use super::tcp::proto::*;
use crate::CONFIG;
use anyhow::anyhow;
use anyhow::bail;
use bytes::Bytes;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpListener;
use tokio::net::TcpStream;
use tokio::time::Duration;
// TODO
// Implemented draft version 24
// sending/receiving of bundles, always uses maximum allowed packet size, no segmentation
// ssl not implemented yet
/*
There is one TcpConvergenceLayer object that spawns one Listener task.
The convergence layer holds all currently active TCPCL sessions.
A new session is established by either receiving a new connection in the Listener or by sending bundles to a new destination.
The session is established by first creating a TcpConnection, exchanging session information and then transitioning to a TcpSession.
Per session a sending and receiving task exist, encapsulating the respective parts of the tcp connection.
A third TcpSession task maintains session state and sends/receives bundles. TcpConvergenceLayer communicates via channels with TcpSession.
*/
const KEEPALIVE: u16 = 30;
const SEGMENT_MRU: u64 = 64000;
const TRANSFER_MRU: u64 = 64000;
const INTERNAL_CHANNEL_BUFFER: usize = 50;
/// Represents a tcp convergence layer session.
/// Convergence layer connection is established at this point.
struct TcpClSession {
/// Transmitter to tcp sending task
tx_session_outgoing: mpsc::Sender<TcpClPacket>,
/// Receiver from tcp receiving task
rx_session_incoming: mpsc::Receiver<TcpClPacket>,
/// Queue of all outgoing packages
rx_session_queue: mpsc::Receiver<(ByteBuffer, oneshot::Sender<bool>)>,
/// Local session parameters
data_local: SessInitData,
/// Remote session parameters
data_remote: SessInitData,
/// Last transaction id, incremented by 1
last_tid: u64,
}
impl TcpClSession {
/// Run this future until the connection is closed
async fn run(mut self) {
loop {
// session is idle, try to send/receive
tokio::select! {
// poll for new incoming packages
received = self.rx_session_incoming.recv() => {
if let Some(packet) = received {
match self.receive(packet).await {
Ok(stop) => if stop { break; },
Err(err) => {
error!("error while receiving: {:?}", err);
break;
},
}
} else {
break;
}
},
// send outgoing packages
bundle = self.rx_session_queue.recv() => {
if let Some(message) = bundle {
debug!("Sending bundle");
self.send(message).await;
} else {
break;
}
}
}
}
}
/// Receive a new packet.
/// Returns once transfer is finished and session is idle again.
/// Result indicates whether connection is closed (true).
async fn receive(&mut self, packet: TcpClPacket) -> anyhow::Result<bool> {
match packet {
// session is terminated, send ack and return with true
TcpClPacket::SessTerm(data) => {
if !data.flags.contains(SessTermFlags::REPLY) {
self.tx_session_outgoing
.send(TcpClPacket::SessTerm(SessTermData {
flags: SessTermFlags::REPLY,
reason: data.reason,
}))
.await?;
}
Ok(true)
}
// receive a bundle
TcpClPacket::XferSeg(data) => {
if data.flags.contains(XferSegmentFlags::END)
&& !data.flags.contains(XferSegmentFlags::START)
{
return Err(anyhow!(
"Received XferSeg with end flag, unexpected in idle mode"
));
}
if data.flags.is_empty() {
return Err(anyhow!(
"Received XferSeg with no flags, unexpected in idle mode"
));
}
let mut vec = data.buf.to_vec();
self.tx_session_outgoing
.send(TcpClPacket::XferAck(XferAckData {
tid: data.tid,
len: data.len,
flags: XferSegmentFlags::empty(),
}))
.await?;
let mut connection_closing = false;
// receive further packages until transfer is finished
if !data.flags.contains(XferSegmentFlags::END) {
let mut len = data.len;
if data.len > self.data_local.segment_mru {
self.tx_session_outgoing
.send(TcpClPacket::XferRefuse(XferRefuseData {
reason: XferRefuseReasonCode::NotAcceptable,
tid: data.tid,
}))
.await?;
}
loop {
if let Some(packet) = self.rx_session_incoming.recv().await {
match packet {
TcpClPacket::SessTerm(mut data) => {
connection_closing = true;
data.flags |= SessTermFlags::REPLY;
self.tx_session_outgoing
.send(TcpClPacket::SessTerm(data))
.await?;
}
TcpClPacket::XferSeg(data) => {
vec.append(&mut data.buf.to_vec());
len += data.len;
self.tx_session_outgoing
.send(TcpClPacket::XferAck(XferAckData {
tid: data.tid,
len,
flags: XferSegmentFlags::empty(),
}))
.await?;
if data.flags.contains(XferSegmentFlags::END) {
break;
}
}
TcpClPacket::MsgReject(data) => {
warn!("Received message reject: {:?}", data);
}
_ => {
return Err(anyhow!("Unexpected packet, {:?}", packet));
}
}
} else {
return Err(anyhow!("Connection closed while receiving transfer"));
}
}
}
debug!("Parsing bundle from received tcp bytes");
// parse bundle
match Bundle::try_from(vec) {
Ok(bundle) => {
tokio::spawn(async move {
if let Err(err) = crate::core::processing::receive(bundle).await {
error!("Failed to process bundle: {}", err);
}
});
}
Err(err) => {
error!("Failed to parse bundle: {}", err);
self.tx_session_outgoing
.send(TcpClPacket::XferRefuse(XferRefuseData {
reason: XferRefuseReasonCode::NotAcceptable,
tid: data.tid,
}))
.await?;
}
}
Ok(connection_closing)
}
_ => Err(anyhow!("Unexpected packet, {:?}", packet)),
}
}
/// Send outgoing bundle.
/// Result indicates whether connection is closed (true).
async fn send(&mut self, data: (ByteBuffer, tokio::sync::oneshot::Sender<bool>)) -> bool {
let mut byte_vec = Vec::new();
let mut acked = 0u64;
self.last_tid += 1;
let (vec, tx_result) = data;
// split bundle data into chunks the size of remote maximum segment size
for bytes in vec.chunks(self.data_remote.segment_mru as usize) {
let buf = Bytes::copy_from_slice(bytes);
let len = buf.len() as u64;
debug!("bytes len {}", len);
let packet_data = XferSegData {
flags: XferSegmentFlags::empty(),
buf,
len,
tid: self.last_tid,
};
byte_vec.push(packet_data);
}
if byte_vec.is_empty() {
warn!("Emtpy bundle transfer, aborting");
if tx_result.send(false).is_err() {
error!("Internal result channel error");
return true;
}
return false;
}
byte_vec.first_mut().unwrap().flags |= XferSegmentFlags::START;
byte_vec.last_mut().unwrap().flags |= XferSegmentFlags::END;
// push packets to send task
for packet in byte_vec {
if let Err(err) = self
.tx_session_outgoing
.send(TcpClPacket::XferSeg(packet))
.await
{
error!("Internal sender error, {}", err);
if tx_result.send(false).is_err() {
error!("Internal result channel error");
return true;
}
return true;
}
}
// wait for all acks
while acked < vec.len() as u64 {
if let Some(received) = self.rx_session_incoming.recv().await {
match received {
TcpClPacket::XferAck(ack_data) => {
if ack_data.tid == self.last_tid {
acked = ack_data.len;
}
}
TcpClPacket::XferRefuse(refuse_data) => {
warn!("Transfer refused, {:?}", refuse_data.reason);
if tx_result.send(false).is_err() {
error!("Internal result channel error");
return true;
}
return false;
}
TcpClPacket::MsgReject(msg_reject_data) => {
warn!("Message rejected, {:?}", msg_reject_data.reason);
if tx_result.send(false).is_err() {
error!("Internal result channel error");
return true;
}
return true;
}
_ => warn!("Unexpected packet while waiting for acks"),
}
}
}
debug!("All acked");
// indicate successful transfer
if tx_result.send(true).is_err() {
error!("Internal result channel error");
return true;
}
false
}
}
struct TcpClReceiver {
rx_tcp: OwnedReadHalf,
tx_session_incoming: mpsc::Sender<TcpClPacket>,
timeout: u16,
}
struct TcpClSender {
tx_tcp: OwnedWriteHalf,
rx_session_outgoing: mpsc::Receiver<TcpClPacket>,
timeout: u16,
}
impl TcpClReceiver {
/// Run receiver task and check keepalive timeout.
async fn run(mut self) {
let mut interval = time::interval(Duration::from_secs(self.timeout.into()));
interval.tick().await;
loop {
tokio::select! {
parsed_packet = TcpClPacket::deserialize(&mut self.rx_tcp) => {
match parsed_packet {
Ok(packet) => {
debug!("Received and successfully parsed packet");
if let TcpClPacket::KeepAlive = packet {
debug!("Received keepalive");
} else {
self.send_packet(packet).await;
}
}
Err(err) => {
error!("Failed parsing package: {:?}", err);
break;
}
}
}
// if above future doesn't complete in keepalive time interval, teardown the session
_ = interval.tick() => {
debug!("Keepalive timeout");
self.send_packet(TcpClPacket::SessTerm(SessTermData {
flags: SessTermFlags::empty(),
reason: SessTermReasonCode::IdleTimeout,
})).await;
break;
}
}
}
}
async fn send_packet(&mut self, packet: TcpClPacket) {
if let Err(err) = self.tx_session_incoming.send(packet).await {
error!("Error while sending via internal channel: {}", err);
}
}
}
impl TcpClSender {
/// Run sender task and check keepalive timeout.
async fn run(mut self) {
let mut interval = time::interval(Duration::from_secs(self.timeout.into()));
interval.tick().await;
loop {
tokio::select! {
packet = self.rx_session_outgoing.recv() => {
if let Some(packet) = packet {
self.send_packet(&packet).await;
if let TcpClPacket::SessTerm(_) = packet {
//breaks loop, tasks finished, dropped sender, connection closed
break;
}
}
}
// periodically wait for keepalive timeout and send packet if above future doesn't complete first
_ = interval.tick() => {
debug!("Keepalive send");
self.send_packet(&TcpClPacket::KeepAlive).await;
}
}
}
}
async fn send_packet(&mut self, packet: &TcpClPacket) {
if let Err(err) = packet.serialize(&mut self.tx_tcp).await {
error!("Error while serializing packet: {}", err);
}
if let Err(err) = self.tx_tcp.flush().await {
error!("Error while flushing tcp sending queue: {}", err);
}
}
}
/// Initial tcp connection.
/// Session not yet established.
struct TcpConnection {
stream: TcpStream,
addr: SocketAddr,
}
impl TcpConnection {
/// Session parameter negotiation
async fn negotiate_session(&mut self) -> anyhow::Result<(SessInitData, SessInitData)> {
let node_id = (*CONFIG.lock()).host_eid.to_string();
let mut sess_init_data = SessInitData {
keepalive: KEEPALIVE,
segment_mru: SEGMENT_MRU,
transfer_mru: TRANSFER_MRU,
node_id,
};
let session_init = TcpClPacket::SessInit(sess_init_data.clone());
session_init.serialize(&mut self.stream).await?;
self.stream.flush().await?;
let response = TcpClPacket::deserialize(&mut self.stream).await?;
debug!("Received session parameters");
if let TcpClPacket::SessInit(mut data) = response {
let keepalive = sess_init_data.keepalive.min(data.keepalive);
sess_init_data.keepalive = keepalive;
data.keepalive = keepalive;
Ok((sess_init_data, data))
} else {
Err(TcpClError::UnexpectedPacket.into())
}
}
/// Initial contact header exchange
async fn exchange_contact_header(&mut self) -> anyhow::Result<ContactHeaderFlags> {
self.send_contact_header(ContactHeaderFlags::default())
.await?;
self.receive_contact_header().await
}
async fn send_contact_header(&mut self, flags: ContactHeaderFlags) -> anyhow::Result<()> {
self.stream.write(b"dtn!").await?;
self.stream.write_u8(4).await?;
self.stream.write_u8(flags.bits()).await?;
self.stream.flush().await?;
Ok(())
}
async fn receive_contact_header(&mut self) -> anyhow::Result<ContactHeaderFlags> {
let mut buf: [u8; 6] = [0; 6];
self.stream.read_exact(&mut buf).await?;
if &buf[0..4] != b"dtn!" {
bail!("Invalid magic");
}
if buf[4] != 4 {
bail!("Unsupported version");
}
Ok(ContactHeaderFlags::from_bits_truncate(buf[5]))
}
/// Establish a tcp session on this connection and insert it into a session list.
async fn connect(
mut self,
sessions: &mut impl DerefMut<
Target = HashMap<SocketAddr, mpsc::Sender<(ByteBuffer, oneshot::Sender<bool>)>>,
>,
) {
// Phase 1
debug!("Exchanging contact header, {}", self.addr);
if let Err(err) = self.exchange_contact_header().await {
error!(
"Failed to exchange contact header with {}: {}",
self.addr, err
);
}
// Phase 2
debug!("Negotiating session parameters, {}", self.addr);
match self.negotiate_session().await {
Ok((local_parameters, remote_parameters)) => {
// channel between receiver task and session task, incoming packets
let (tx_session_incoming, rx_session_incoming) =
mpsc::channel::<TcpClPacket>(INTERNAL_CHANNEL_BUFFER);
// channel between sender task and session task, outgoing packets
let (tx_session_outgoing, rx_session_outgoing) =
mpsc::channel::<TcpClPacket>(INTERNAL_CHANNEL_BUFFER);
// channel between convergence layer and session, bundle queue
let (tx_session_queue, rx_session_queue) =
mpsc::channel::<(ByteBuffer, oneshot::Sender<bool>)>(INTERNAL_CHANNEL_BUFFER);
let (rx_tcp, tx_tcp) = self.stream.into_split();
let rx_task = TcpClReceiver {
rx_tcp,
tx_session_incoming,
timeout: remote_parameters.keepalive,
};
let tx_task = TcpClSender {
tx_tcp,
rx_session_outgoing,
timeout: local_parameters.keepalive,
};
let session_task = TcpClSession {
tx_session_outgoing,
rx_session_incoming,
rx_session_queue,
data_local: local_parameters,
data_remote: remote_parameters,
last_tid: 0,
};
tokio::spawn(rx_task.run());
tokio::spawn(tx_task.run());
tokio::spawn(session_task.run());
// insert new session into session list, provides lookup for bundle sending
sessions.insert(self.addr, tx_session_queue);
info!("Started TCP session for {}", self.addr);
}
Err(err) => error!("Failed to negotiate session: {}", err),
}
}
}
pub struct Listener {
tcp_listener: TcpListener,
sessions: Arc<Mutex<SessionMap>>,
}
impl Listener {
async fn run(self) {
loop {
match self.tcp_listener.accept().await {
Ok((stream, addr)) => {
info!("Incoming connection from: {:?}", addr);
let connection = TcpConnection { stream, addr };
// establish session and insert into shared session list
connection.connect(&mut self.sessions.lock().await).await;
}
Err(e) => {
error!("Couldn't get client: {:?}", e)
}
}
}
}
}
impl TcpConvergenceLayer {
pub fn new(port: Option<u16>) -> TcpConvergenceLayer {
TcpConvergenceLayer {
local_port: port.unwrap_or(4556),
sessions: Arc::new(Mutex::new(HashMap::new())),
listener: Arc::new(Mutex::new(None)),
}
}
/// Send bundles via an existing session
/// Session should have been established in advance.
async fn send_bundles(
&self,
addr: &SocketAddr,
ready: &[ByteBuffer],
sessions: &mut impl DerefMut<Target = SessionMap>,
) -> anyhow::Result<bool> {
if let Some(sender) = sessions.get(addr) {
debug!("Using existing session for {}", addr);
let mut results = Vec::new();
for bundle in ready {
debug!("Sending bundle {:?}", bundle);
// unfortunately not possible to avoid cloning, atomic reference counting would be needed in API
// backchannel that responds whether bundle send was successful
let (tx, rx) = oneshot::channel::<bool>();
sender.send((bundle.clone(), tx)).await?;
let successful = rx.await?;
results.push(successful);
}
for result in results {
if !result {
return Ok(false);
}
}
Ok(true)
} else {
warn!("No session found for {} but expected", addr);
Ok(false)
}
}
}
type SessionMap = HashMap<SocketAddr, mpsc::Sender<(ByteBuffer, oneshot::Sender<bool>)>>;
#[derive(Clone, Default, Debug)]
pub struct TcpConvergenceLayer {
local_port: u16,
// Session list, new sessions can either be added by [`Listener`] or [`scheduled_submission`]
sessions: Arc<Mutex<SessionMap>>,
listener: Arc<Mutex<Option<JoinHandle<()>>>>,
}
#[async_trait]
impl ConvergenceLayerAgent for TcpConvergenceLayer {
async fn setup(&mut self) {
let tcp_listener = TcpListener::bind(("0.0.0.0", self.local_port))
.await
.expect("Couldn't create TCP listener");
let listener = Listener {
tcp_listener,
sessions: self.sessions.clone(),
};
*self.listener.lock().await = Some(tokio::spawn(listener.run()));
}
fn port(&self) -> u16 {
self.local_port
}
fn name(&self) -> &'static str {
"tcp"
}
async fn scheduled_submission(&self, dest: &str, ready: &[ByteBuffer]) -> bool {
let addr = dest.parse().unwrap();
// lock has to be held across querying and inserting a new session
let mut session_lock = self.sessions.lock().await;
if session_lock.get(&addr).is_none() {
info!("New connection to {}", dest);
match TcpStream::connect(addr).await {
Ok(stream) => {
let connection = TcpConnection { stream, addr };
connection.connect(&mut session_lock).await;
}
Err(err) => {
warn!("Error connecting to {}, {:?}", dest, err);
return false;
}
}
}
match self.send_bundles(&addr, ready, &mut session_lock).await {
Ok(result) => result,
Err(err) => {
warn!("error while sending bundles: {}", err);
session_lock.remove(&addr);
false
}
}
}
}
impl std::fmt::Display for TcpConvergenceLayer {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "tcp")
}
}
#[cfg(test)]
mod tests {
use std::sync::atomic::{AtomicU64, Ordering};
use super::proto::XferSegData;
use crate::cla::tcp::net::TcpClPacket;
use crate::cla::tcp::proto::SessInitData;
use crate::cla::tcp::proto::XferSegmentFlags;
use anyhow::bail;
use bytes::Bytes;
use futures::executor::block_on;
pub(crate) fn generate_xfer_segments(
config: &SessInitData,
buf: Bytes,
) -> anyhow::Result<Vec<XferSegData>> {
static LAST_TRANSFER_ID: AtomicU64 = AtomicU64::new(0);
// TODO: check for wrap around and SESS_TERM if overflow occurs
let tid = LAST_TRANSFER_ID.fetch_add(1, Ordering::SeqCst);
let mut segs = Vec::new();
if buf.len() > config.transfer_mru as usize {
bail!("bundle too big");
}
let fitting = if buf.len() as u64 % config.segment_mru == 0 {
0
} else {
1
};
let num_segs = (buf.len() as u64 / config.segment_mru) + fitting;
for i in 0..num_segs {
let mut flags = XferSegmentFlags::empty();
if i == 0 {
flags |= XferSegmentFlags::START;
}
if i == num_segs - 1 {
flags |= XferSegmentFlags::END;
}
let len = if num_segs == 1 {
// data fits in one segment
buf.len() as u64
} else if i == num_segs - 1 {
// segment is the last one remaining
buf.len() as u64 % config.segment_mru
} else {
// middle segment get filled to the max
config.segment_mru
};
let base = (i * config.segment_mru) as usize;
let seg = XferSegData {
flags,
tid,
len,
buf: buf.slice(base..base + len as usize),
};
segs.push(seg);
}
Ok(segs)
}
fn perform_gen_xfer_segs_test(
segment_mru: u64,
transfer_mru: u64,
data_len: u64,
) -> anyhow::Result<Vec<XferSegData>> {
let config = SessInitData {
keepalive: 0,
segment_mru,
transfer_mru,
node_id: "node1".into(),
};
// let data_raw: [u8; data_len] = [0; data_len];
let data_raw: Vec<u8> = vec![0x90; data_len as usize];
let fitting = if data_len % segment_mru == 0 { 0 } else { 1 };
let num_expected_segs = ((data_len / segment_mru) + fitting) as usize;
//let data = Bytes::copy_from_slice(&data_raw);
let data = Bytes::copy_from_slice(&data_raw);
let segs =
generate_xfer_segments(&config, data).expect("error generating xfer segment list");
assert_eq!(segs.len(), num_expected_segs);
assert!(segs[0].flags.contains(XferSegmentFlags::START));
assert!(segs[num_expected_segs - 1]
.flags
.contains(XferSegmentFlags::END));
Ok(segs)
}
#[test]
fn gen_xfer_segs_single_seg() {
let segs =
perform_gen_xfer_segs_test(42, 100, 40).expect("error generating xfer segment list");
dbg!(&segs);
assert_eq!(segs.len(), 1);
}
#[test]
fn gen_xfer_segs_two_segs() {
let segs =
perform_gen_xfer_segs_test(42, 100, 45).expect("error generating xfer segment list");
dbg!(&segs);
assert_eq!(segs.len(), 2);
}
#[test]
fn gen_xfer_segs_three_segs() {
let segs =
perform_gen_xfer_segs_test(10, 100, 28).expect("error generating xfer segment list");
dbg!(&segs);
assert_eq!(segs.len(), 3);
}
#[test]
fn gen_xfer_segs_seg_edge_case() {
let segs =
perform_gen_xfer_segs_test(10, 100, 10).expect("error generating xfer segment list");
dbg!(&segs);
assert_eq!(segs.len(), 1);
}
#[test]
#[should_panic]
fn gen_xfer_segs_exceeding_t_mru() {
perform_gen_xfer_segs_test(42, 100, 180).unwrap_err();
}
#[test]
fn serialize_deserialize() {
let segs =
perform_gen_xfer_segs_test(10, 100, 10).expect("error generating xfer segment list");
for s in segs {
let mut buf = Vec::new();
let packet = TcpClPacket::XferSeg(s);
block_on(packet.serialize(&mut buf)).unwrap();
let mut slice = buf.as_ref();
let result = block_on(TcpClPacket::deserialize(&mut slice)).unwrap();
dbg!(&packet);
dbg!(&buf);
dbg!(&result);
assert!(packet == result);
}
}
}
| 37.699488 | 142 | 0.500763 |
f7a73cce092ab538dec373ed196ca387daf373ae | 762 | // Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
#[structopt(about = "Lightweight Move package builder")]
struct Args {
input_path: std::path::PathBuf,
#[structopt(
long,
short = "o",
about = "Optional output path, defaults to input_path/out"
)]
output_path: Option<std::path::PathBuf>,
}
fn main() {
let args = Args::from_args();
let build_config = move_package::BuildConfig {
dev_mode: false,
generate_abis: false,
generate_docs: true,
install_dir: args.output_path,
..Default::default()
};
build_config
.compile_package(&args.input_path, &mut std::io::stdout())
.unwrap();
}
| 23.090909 | 66 | 0.61811 |
b9510960506864617c755c33fa4cb399bf47f87d | 280 | pub use self::client::Client;
pub use self::entity::{Entity, EntityKind, Entities};
pub use self::context::Context;
pub use self::types::*;
pub use self::enums::{GameVariant, GameMode, Difficulty};
pub mod client;
pub mod entity;
pub mod context;
pub mod enums;
pub mod types;
| 20 | 57 | 0.728571 |
1d4f7fa5f5658f7a9256786d27e8b8d7cee3fc55 | 624 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::parsed_request::{Error, ParsedRequest};
use logger::{IncMetric, METRICS};
pub fn parse_get_instance_info() -> Result<ParsedRequest, Error> {
METRICS.get_api_requests.instance_info_count.inc();
Ok(ParsedRequest::GetInstanceInfo)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_get_instance_info_request() {
match parse_get_instance_info() {
Ok(ParsedRequest::GetInstanceInfo) => {}
_ => panic!("Test failed."),
}
}
}
| 26 | 74 | 0.668269 |
76f1045e9e80c5794beff149f220bb05882ee3b4 | 5,576 | use {Buf, BufMut};
use iovec::{IoVec, IoVecMut};
/// A `Chain` sequences two buffers.
///
/// `Chain` is an adapter that links two underlying buffers and provides a
/// continous view across both buffers. It is able to sequence either immutable
/// buffers ([`Buf`] values) or mutable buffers ([`BufMut`] values).
///
/// This struct is generally created by calling [`Buf::chain`]. Please see that
/// function's documentation for more detail.
///
/// # Examples
///
/// ```
/// use bytes::{Bytes, Buf, IntoBuf};
/// use bytes::buf::Chain;
///
/// let buf = Bytes::from(&b"hello "[..]).into_buf()
/// .chain(Bytes::from(&b"world"[..]));
///
/// let full: Bytes = buf.collect();
/// assert_eq!(full[..], b"hello world"[..]);
/// ```
///
/// [`Buf::chain`]: trait.Buf.html#method.chain
/// [`Buf`]: trait.Buf.html
/// [`BufMut`]: trait.BufMut.html
#[derive(Debug)]
pub struct Chain<T, U> {
a: T,
b: U,
}
impl<T, U> Chain<T, U> {
/// Creates a new `Chain` sequencing the provided values.
///
/// # Examples
///
/// ```
/// use bytes::BytesMut;
/// use bytes::buf::Chain;
///
/// let buf = Chain::new(
/// BytesMut::with_capacity(1024),
/// BytesMut::with_capacity(1024));
///
/// // Use the chained buffer
/// ```
pub fn new(a: T, b: U) -> Chain<T, U> {
Chain {
a: a,
b: b,
}
}
/// Gets a reference to the first underlying `Buf`.
///
/// # Examples
///
/// ```
/// use bytes::{Bytes, Buf, IntoBuf};
///
/// let buf = Bytes::from(&b"hello"[..]).into_buf()
/// .chain(Bytes::from(&b"world"[..]));
///
/// assert_eq!(buf.first_ref().get_ref()[..], b"hello"[..]);
/// ```
pub fn first_ref(&self) -> &T {
&self.a
}
/// Gets a mutable reference to the first underlying `Buf`.
///
/// # Examples
///
/// ```
/// use bytes::{Bytes, Buf, IntoBuf};
///
/// let mut buf = Bytes::from(&b"hello "[..]).into_buf()
/// .chain(Bytes::from(&b"world"[..]));
///
/// buf.first_mut().set_position(1);
///
/// let full: Bytes = buf.collect();
/// assert_eq!(full[..], b"ello world"[..]);
/// ```
pub fn first_mut(&mut self) -> &mut T {
&mut self.a
}
/// Gets a reference to the last underlying `Buf`.
///
/// # Examples
///
/// ```
/// use bytes::{Bytes, Buf, IntoBuf};
///
/// let buf = Bytes::from(&b"hello"[..]).into_buf()
/// .chain(Bytes::from(&b"world"[..]));
///
/// assert_eq!(buf.last_ref().get_ref()[..], b"world"[..]);
/// ```
pub fn last_ref(&self) -> &U {
&self.b
}
/// Gets a mutable reference to the last underlying `Buf`.
///
/// # Examples
///
/// ```
/// use bytes::{Bytes, Buf, IntoBuf};
///
/// let mut buf = Bytes::from(&b"hello "[..]).into_buf()
/// .chain(Bytes::from(&b"world"[..]));
///
/// buf.last_mut().set_position(1);
///
/// let full: Bytes = buf.collect();
/// assert_eq!(full[..], b"hello orld"[..]);
/// ```
pub fn last_mut(&mut self) -> &mut U {
&mut self.b
}
/// Consumes this `Chain`, returning the underlying values.
///
/// # Examples
///
/// ```
/// use bytes::{Bytes, Buf, IntoBuf};
///
/// let buf = Bytes::from(&b"hello"[..]).into_buf()
/// .chain(Bytes::from(&b"world"[..]));
///
/// let (first, last) = buf.into_inner();
/// assert_eq!(first.get_ref()[..], b"hello"[..]);
/// assert_eq!(last.get_ref()[..], b"world"[..]);
/// ```
pub fn into_inner(self) -> (T, U) {
(self.a, self.b)
}
}
impl<T, U> Buf for Chain<T, U>
where T: Buf,
U: Buf,
{
fn remaining(&self) -> usize {
self.a.remaining() + self.b.remaining()
}
fn bytes(&self) -> &[u8] {
if self.a.has_remaining() {
self.a.bytes()
} else {
self.b.bytes()
}
}
fn advance(&mut self, mut cnt: usize) {
let a_rem = self.a.remaining();
if a_rem != 0 {
if a_rem >= cnt {
self.a.advance(cnt);
return;
}
// Consume what is left of a
self.a.advance(a_rem);
cnt -= a_rem;
}
self.b.advance(cnt);
}
fn bytes_vec<'a>(&'a self, dst: &mut [IoVec<'a>]) -> usize {
let mut n = self.a.bytes_vec(dst);
n += self.b.bytes_vec(&mut dst[n..]);
n
}
}
impl<T, U> BufMut for Chain<T, U>
where T: BufMut,
U: BufMut,
{
fn remaining_mut(&self) -> usize {
self.a.remaining_mut() + self.b.remaining_mut()
}
unsafe fn bytes_mut(&mut self) -> &mut [u8] {
if self.a.has_remaining_mut() {
self.a.bytes_mut()
} else {
self.b.bytes_mut()
}
}
unsafe fn advance_mut(&mut self, mut cnt: usize) {
let a_rem = self.a.remaining_mut();
if a_rem != 0 {
if a_rem >= cnt {
self.a.advance_mut(cnt);
return;
}
// Consume what is left of a
self.a.advance_mut(a_rem);
cnt -= a_rem;
}
self.b.advance_mut(cnt);
}
unsafe fn bytes_vec_mut<'a>(&'a mut self, dst: &mut [IoVecMut<'a>]) -> usize {
let mut n = self.a.bytes_vec_mut(dst);
n += self.b.bytes_vec_mut(&mut dst[n..]);
n
}
}
| 24.563877 | 82 | 0.471844 |
ffa880ca268b5312b58ff3ef9cb129e139a60631 | 13,097 | use std::collections::{BTreeMap, HashMap, HashSet};
use std::fmt;
use std::str::FromStr;
use log::debug;
use serde::de;
use serde::ser;
use serde::{Deserialize, Serialize};
use crate::core::{Dependency, Package, PackageId, SourceId, Workspace};
use crate::util::errors::{CargoResult, CargoResultExt};
use crate::util::{internal, Graph};
use super::Resolve;
#[derive(Serialize, Deserialize, Debug)]
pub struct EncodableResolve {
package: Option<Vec<EncodableDependency>>,
/// `root` is optional to allow backward compatibility.
root: Option<EncodableDependency>,
metadata: Option<Metadata>,
#[serde(default, skip_serializing_if = "Patch::is_empty")]
patch: Patch,
}
#[derive(Serialize, Deserialize, Debug, Default)]
struct Patch {
unused: Vec<EncodableDependency>,
}
pub type Metadata = BTreeMap<String, String>;
impl EncodableResolve {
pub fn into_resolve(self, ws: &Workspace<'_>) -> CargoResult<Resolve> {
let path_deps = build_path_deps(ws);
let packages = {
let mut packages = self.package.unwrap_or_default();
if let Some(root) = self.root {
packages.insert(0, root);
}
packages
};
// `PackageId`s in the lock file don't include the `source` part
// for workspace members, so we reconstruct proper ids.
let live_pkgs = {
let mut live_pkgs = HashMap::new();
let mut all_pkgs = HashSet::new();
for pkg in packages.iter() {
let enc_id = EncodablePackageId {
name: pkg.name.clone(),
version: pkg.version.clone(),
source: pkg.source,
};
if !all_pkgs.insert(enc_id.clone()) {
failure::bail!("package `{}` is specified twice in the lockfile", pkg.name);
}
let id = match pkg.source.as_ref().or_else(|| path_deps.get(&pkg.name)) {
// We failed to find a local package in the workspace.
// It must have been removed and should be ignored.
None => {
debug!("path dependency now missing {} v{}", pkg.name, pkg.version);
continue;
}
Some(&source) => PackageId::new(&pkg.name, &pkg.version, source)?,
};
assert!(live_pkgs.insert(enc_id, (id, pkg)).is_none())
}
live_pkgs
};
let lookup_id = |enc_id: &EncodablePackageId| -> Option<PackageId> {
live_pkgs.get(enc_id).map(|&(id, _)| id)
};
let g = {
let mut g = Graph::new();
for &(ref id, _) in live_pkgs.values() {
g.add(id.clone());
}
for &(ref id, pkg) in live_pkgs.values() {
let deps = match pkg.dependencies {
Some(ref deps) => deps,
None => continue,
};
for edge in deps.iter() {
if let Some(to_depend_on) = lookup_id(edge) {
g.link(id.clone(), to_depend_on);
}
}
}
g
};
let replacements = {
let mut replacements = HashMap::new();
for &(ref id, pkg) in live_pkgs.values() {
if let Some(ref replace) = pkg.replace {
assert!(pkg.dependencies.is_none());
if let Some(replace_id) = lookup_id(replace) {
replacements.insert(id.clone(), replace_id);
}
}
}
replacements
};
let mut metadata = self.metadata.unwrap_or_default();
// Parse out all package checksums. After we do this we can be in a few
// situations:
//
// * We parsed no checksums. In this situation we're dealing with an old
// lock file and we're gonna fill them all in.
// * We parsed some checksums, but not one for all packages listed. It
// could have been the case that some were listed, then an older Cargo
// client added more dependencies, and now we're going to fill in the
// missing ones.
// * There are too many checksums listed, indicative of an older Cargo
// client removing a package but not updating the checksums listed.
//
// In all of these situations they're part of normal usage, so we don't
// really worry about it. We just try to slurp up as many checksums as
// possible.
let mut checksums = HashMap::new();
let prefix = "checksum ";
let mut to_remove = Vec::new();
for (k, v) in metadata.iter().filter(|p| p.0.starts_with(prefix)) {
to_remove.push(k.to_string());
let k = &k[prefix.len()..];
let enc_id: EncodablePackageId = k
.parse()
.chain_err(|| internal("invalid encoding of checksum in lockfile"))?;
let id = match lookup_id(&enc_id) {
Some(id) => id,
_ => continue,
};
let v = if v == "<none>" {
None
} else {
Some(v.to_string())
};
checksums.insert(id, v);
}
for k in to_remove {
metadata.remove(&k);
}
let mut unused_patches = Vec::new();
for pkg in self.patch.unused {
let id = match pkg.source.as_ref().or_else(|| path_deps.get(&pkg.name)) {
Some(&src) => PackageId::new(&pkg.name, &pkg.version, src)?,
None => continue,
};
unused_patches.push(id);
}
Ok(Resolve::new(
g,
replacements,
HashMap::new(),
checksums,
metadata,
unused_patches,
))
}
}
fn build_path_deps(ws: &Workspace<'_>) -> HashMap<String, SourceId> {
// If a crate is *not* a path source, then we're probably in a situation
// such as `cargo install` with a lock file from a remote dependency. In
// that case we don't need to fixup any path dependencies (as they're not
// actually path dependencies any more), so we ignore them.
let members = ws
.members()
.filter(|p| p.package_id().source_id().is_path())
.collect::<Vec<_>>();
let mut ret = HashMap::new();
let mut visited = HashSet::new();
for member in members.iter() {
ret.insert(
member.package_id().name().to_string(),
member.package_id().source_id(),
);
visited.insert(member.package_id().source_id());
}
for member in members.iter() {
build_pkg(member, ws, &mut ret, &mut visited);
}
for deps in ws.root_patch().values() {
for dep in deps {
build_dep(dep, ws, &mut ret, &mut visited);
}
}
for &(_, ref dep) in ws.root_replace() {
build_dep(dep, ws, &mut ret, &mut visited);
}
return ret;
fn build_pkg(
pkg: &Package,
ws: &Workspace<'_>,
ret: &mut HashMap<String, SourceId>,
visited: &mut HashSet<SourceId>,
) {
for dep in pkg.dependencies() {
build_dep(dep, ws, ret, visited);
}
}
fn build_dep(
dep: &Dependency,
ws: &Workspace<'_>,
ret: &mut HashMap<String, SourceId>,
visited: &mut HashSet<SourceId>,
) {
let id = dep.source_id();
if visited.contains(&id) || !id.is_path() {
return;
}
let path = match id.url().to_file_path() {
Ok(p) => p.join("Cargo.toml"),
Err(_) => return,
};
let pkg = match ws.load(&path) {
Ok(p) => p,
Err(_) => return,
};
ret.insert(pkg.name().to_string(), pkg.package_id().source_id());
visited.insert(pkg.package_id().source_id());
build_pkg(&pkg, ws, ret, visited);
}
}
impl Patch {
fn is_empty(&self) -> bool {
self.unused.is_empty()
}
}
#[derive(Serialize, Deserialize, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodableDependency {
name: String,
version: String,
source: Option<SourceId>,
dependencies: Option<Vec<EncodablePackageId>>,
replace: Option<EncodablePackageId>,
}
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Clone)]
pub struct EncodablePackageId {
name: String,
version: String,
source: Option<SourceId>,
}
impl fmt::Display for EncodablePackageId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{} {}", self.name, self.version)?;
if let Some(ref s) = self.source {
write!(f, " ({})", s.to_url())?;
}
Ok(())
}
}
impl FromStr for EncodablePackageId {
type Err = failure::Error;
fn from_str(s: &str) -> CargoResult<EncodablePackageId> {
let mut s = s.splitn(3, ' ');
let name = s.next().unwrap();
let version = s
.next()
.ok_or_else(|| internal("invalid serialized PackageId"))?;
let source_id = match s.next() {
Some(s) => {
if s.starts_with('(') && s.ends_with(')') {
Some(SourceId::from_url(&s[1..s.len() - 1])?)
} else {
failure::bail!("invalid serialized PackageId")
}
}
None => None,
};
Ok(EncodablePackageId {
name: name.to_string(),
version: version.to_string(),
source: source_id,
})
}
}
impl ser::Serialize for EncodablePackageId {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
s.collect_str(self)
}
}
impl<'de> de::Deserialize<'de> for EncodablePackageId {
fn deserialize<D>(d: D) -> Result<EncodablePackageId, D::Error>
where
D: de::Deserializer<'de>,
{
String::deserialize(d).and_then(|string| {
string
.parse::<EncodablePackageId>()
.map_err(de::Error::custom)
})
}
}
pub struct WorkspaceResolve<'a, 'cfg: 'a> {
pub ws: &'a Workspace<'cfg>,
pub resolve: &'a Resolve,
}
impl<'a, 'cfg> ser::Serialize for WorkspaceResolve<'a, 'cfg> {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
let mut ids: Vec<_> = self.resolve.iter().collect();
ids.sort();
let encodable = ids
.iter()
.map(|&id| encodable_resolve_node(id, self.resolve))
.collect::<Vec<_>>();
let mut metadata = self.resolve.metadata().clone();
for &id in ids.iter().filter(|id| !id.source_id().is_path()) {
let checksum = match self.resolve.checksums()[&id] {
Some(ref s) => &s[..],
None => "<none>",
};
let id = encodable_package_id(id);
metadata.insert(format!("checksum {}", id.to_string()), checksum.to_string());
}
let metadata = if metadata.is_empty() {
None
} else {
Some(metadata)
};
let patch = Patch {
unused: self
.resolve
.unused_patches()
.iter()
.map(|id| EncodableDependency {
name: id.name().to_string(),
version: id.version().to_string(),
source: encode_source(id.source_id()),
dependencies: None,
replace: None,
})
.collect(),
};
EncodableResolve {
package: Some(encodable),
root: None,
metadata,
patch,
}
.serialize(s)
}
}
fn encodable_resolve_node(id: PackageId, resolve: &Resolve) -> EncodableDependency {
let (replace, deps) = match resolve.replacement(id) {
Some(id) => (Some(encodable_package_id(id)), None),
None => {
let mut deps = resolve
.deps_not_replaced(id)
.map(encodable_package_id)
.collect::<Vec<_>>();
deps.sort();
(None, Some(deps))
}
};
EncodableDependency {
name: id.name().to_string(),
version: id.version().to_string(),
source: encode_source(id.source_id()),
dependencies: deps,
replace,
}
}
pub fn encodable_package_id(id: PackageId) -> EncodablePackageId {
EncodablePackageId {
name: id.name().to_string(),
version: id.version().to_string(),
source: encode_source(id.source_id()).map(|s| s.with_precise(None)),
}
}
fn encode_source(id: SourceId) -> Option<SourceId> {
if id.is_path() {
None
} else {
Some(id)
}
}
| 30.889151 | 96 | 0.51821 |
61f46d22797c5754a8387b8db8237ac28ce591c5 | 763 | // Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![cfg_attr(docsrs, doc(cfg(feature = "std")))]
extern crate std;
use crate::Error;
use core::convert::From;
use std::io;
impl From<Error> for io::Error {
fn from(err: Error) -> Self {
match err.raw_os_error() {
Some(errno) => io::Error::from_raw_os_error(errno),
None => io::Error::new(io::ErrorKind::Other, err),
}
}
}
impl std::error::Error for Error {}
| 30.52 | 68 | 0.66055 |
79aaeaddf9f97f9ebabfc75e8be9a6e16856ae8e | 8,596 | //! RISC-V Instruction Set Architecture.
mod abi;
mod binemit;
mod enc_tables;
mod registers;
pub mod settings;
use super::super::settings as shared_settings;
#[cfg(feature = "testing_hooks")]
use crate::binemit::CodeSink;
use crate::binemit::{emit_function, MemoryCodeSink};
use crate::ir;
use crate::isa::enc_tables::{self as shared_enc_tables, lookup_enclist, Encodings};
use crate::isa::Builder as IsaBuilder;
use crate::isa::{EncInfo, RegClass, RegInfo, TargetIsa};
use crate::regalloc;
use alloc::boxed::Box;
use core::fmt;
use target_lexicon::{PointerWidth, Triple};
#[allow(dead_code)]
struct Isa {
triple: Triple,
shared_flags: shared_settings::Flags,
isa_flags: settings::Flags,
cpumode: &'static [shared_enc_tables::Level1Entry<u16>],
}
/// Get an ISA builder for creating RISC-V targets.
pub fn isa_builder(triple: Triple) -> IsaBuilder {
IsaBuilder {
triple,
setup: settings::builder(),
constructor: isa_constructor,
}
}
fn isa_constructor(
triple: Triple,
shared_flags: shared_settings::Flags,
builder: shared_settings::Builder,
) -> Box<dyn TargetIsa> {
let level1 = match triple.pointer_width().unwrap() {
PointerWidth::U16 => panic!("16-bit RISC-V unrecognized"),
PointerWidth::U32 => &enc_tables::LEVEL1_RV32[..],
PointerWidth::U64 => &enc_tables::LEVEL1_RV64[..],
};
Box::new(Isa {
triple,
isa_flags: settings::Flags::new(&shared_flags, builder),
shared_flags,
cpumode: level1,
})
}
impl TargetIsa for Isa {
fn name(&self) -> &'static str {
"riscv"
}
fn triple(&self) -> &Triple {
&self.triple
}
fn flags(&self) -> &shared_settings::Flags {
&self.shared_flags
}
fn register_info(&self) -> RegInfo {
registers::INFO.clone()
}
fn encoding_info(&self) -> EncInfo {
enc_tables::INFO.clone()
}
fn legal_encodings<'a>(
&'a self,
func: &'a ir::Function,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type,
) -> Encodings<'a> {
lookup_enclist(
ctrl_typevar,
inst,
func,
self.cpumode,
&enc_tables::LEVEL2[..],
&enc_tables::ENCLISTS[..],
&enc_tables::LEGALIZE_ACTIONS[..],
&enc_tables::RECIPE_PREDICATES[..],
&enc_tables::INST_PREDICATES[..],
self.isa_flags.predicate_view(),
)
}
fn legalize_signature(&self, sig: &mut ir::Signature, current: bool) {
abi::legalize_signature(sig, &self.triple, &self.isa_flags, current)
}
fn regclass_for_abi_type(&self, ty: ir::Type) -> RegClass {
abi::regclass_for_abi_type(ty)
}
fn allocatable_registers(&self, func: &ir::Function) -> regalloc::RegisterSet {
abi::allocatable_registers(func, &self.isa_flags)
}
#[cfg(feature = "testing_hooks")]
fn emit_inst(
&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut dyn CodeSink,
) {
binemit::emit_inst(func, inst, divert, sink, self)
}
fn emit_function_to_memory(&self, func: &ir::Function, sink: &mut MemoryCodeSink) {
emit_function(func, binemit::emit_inst, sink, self)
}
fn unsigned_add_overflow_condition(&self) -> ir::condcodes::IntCC {
unimplemented!()
}
fn unsigned_sub_overflow_condition(&self) -> ir::condcodes::IntCC {
unimplemented!()
}
}
#[cfg(test)]
mod tests {
use crate::ir::{immediates, types};
use crate::ir::{Function, InstructionData, Opcode};
use crate::isa;
use crate::settings::{self, Configurable};
use alloc::string::{String, ToString};
use core::str::FromStr;
use target_lexicon::triple;
fn encstr(isa: &dyn isa::TargetIsa, enc: Result<isa::Encoding, isa::Legalize>) -> String {
match enc {
Ok(e) => isa.encoding_info().display(e).to_string(),
Err(_) => "no encoding".to_string(),
}
}
#[test]
fn test_64bitenc() {
let shared_builder = settings::builder();
let shared_flags = settings::Flags::new(shared_builder);
let isa = isa::lookup(triple!("riscv64"))
.unwrap()
.finish(shared_flags);
let mut func = Function::new();
let ebb = func.dfg.make_ebb();
let arg64 = func.dfg.append_ebb_param(ebb, types::I64);
let arg32 = func.dfg.append_ebb_param(ebb, types::I32);
// Try to encode iadd_imm.i64 v1, -10.
let inst64 = InstructionData::BinaryImm {
opcode: Opcode::IaddImm,
arg: arg64,
imm: immediates::Imm64::new(-10),
};
// ADDI is I/0b00100
assert_eq!(
encstr(&*isa, isa.encode(&func, &inst64, types::I64)),
"Ii#04"
);
// Try to encode iadd_imm.i64 v1, -10000.
let inst64_large = InstructionData::BinaryImm {
opcode: Opcode::IaddImm,
arg: arg64,
imm: immediates::Imm64::new(-10000),
};
// Immediate is out of range for ADDI.
assert!(isa.encode(&func, &inst64_large, types::I64).is_err());
// Create an iadd_imm.i32 which is encodable in RV64.
let inst32 = InstructionData::BinaryImm {
opcode: Opcode::IaddImm,
arg: arg32,
imm: immediates::Imm64::new(10),
};
// ADDIW is I/0b00110
assert_eq!(
encstr(&*isa, isa.encode(&func, &inst32, types::I32)),
"Ii#06"
);
}
// Same as above, but for RV32.
#[test]
fn test_32bitenc() {
let shared_builder = settings::builder();
let shared_flags = settings::Flags::new(shared_builder);
let isa = isa::lookup(triple!("riscv32"))
.unwrap()
.finish(shared_flags);
let mut func = Function::new();
let ebb = func.dfg.make_ebb();
let arg64 = func.dfg.append_ebb_param(ebb, types::I64);
let arg32 = func.dfg.append_ebb_param(ebb, types::I32);
// Try to encode iadd_imm.i64 v1, -10.
let inst64 = InstructionData::BinaryImm {
opcode: Opcode::IaddImm,
arg: arg64,
imm: immediates::Imm64::new(-10),
};
// In 32-bit mode, an i64 bit add should be narrowed.
assert!(isa.encode(&func, &inst64, types::I64).is_err());
// Try to encode iadd_imm.i64 v1, -10000.
let inst64_large = InstructionData::BinaryImm {
opcode: Opcode::IaddImm,
arg: arg64,
imm: immediates::Imm64::new(-10000),
};
// In 32-bit mode, an i64 bit add should be narrowed.
assert!(isa.encode(&func, &inst64_large, types::I64).is_err());
// Create an iadd_imm.i32 which is encodable in RV32.
let inst32 = InstructionData::BinaryImm {
opcode: Opcode::IaddImm,
arg: arg32,
imm: immediates::Imm64::new(10),
};
// ADDI is I/0b00100
assert_eq!(
encstr(&*isa, isa.encode(&func, &inst32, types::I32)),
"Ii#04"
);
// Create an imul.i32 which is encodable in RV32, but only when use_m is true.
let mul32 = InstructionData::Binary {
opcode: Opcode::Imul,
args: [arg32, arg32],
};
assert!(isa.encode(&func, &mul32, types::I32).is_err());
}
#[test]
fn test_rv32m() {
let shared_builder = settings::builder();
let shared_flags = settings::Flags::new(shared_builder);
// Set the supports_m stting which in turn enables the use_m predicate that unlocks
// encodings for imul.
let mut isa_builder = isa::lookup(triple!("riscv32")).unwrap();
isa_builder.enable("supports_m").unwrap();
let isa = isa_builder.finish(shared_flags);
let mut func = Function::new();
let ebb = func.dfg.make_ebb();
let arg32 = func.dfg.append_ebb_param(ebb, types::I32);
// Create an imul.i32 which is encodable in RV32M.
let mul32 = InstructionData::Binary {
opcode: Opcode::Imul,
args: [arg32, arg32],
};
assert_eq!(
encstr(&*isa, isa.encode(&func, &mul32, types::I32)),
"R#10c"
);
}
}
impl fmt::Display for Isa {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}\n{}", self.shared_flags, self.isa_flags)
}
}
| 29.641379 | 94 | 0.578874 |
33018ca7a4f57e33c54caa8a516392692b679bba | 4,100 | use aoc_runner_derive::{aoc, aoc_generator};
#[derive(Debug, PartialEq)]
pub enum Instruction {
Rectangle { x: usize, y: usize },
RotateRow { row: usize, delta: usize },
RotateColumn { col: usize, delta: usize },
}
#[aoc_generator(day8)]
pub fn generator(input: &str) -> Vec<Instruction> {
// this should be a nom parser really instead of keep allocating
input
.trim()
.lines()
.map(
|line| match line.trim().split(' ').collect::<Vec<_>>()[..] {
["rect", line] => {
let parts = line.split('x').collect::<Vec<_>>();
Instruction::Rectangle {
x: parts[0].parse().unwrap(),
y: parts[1].parse().unwrap(),
}
}
["rotate", "row", row, "by", delta] => Instruction::RotateRow {
row: row.replace("y=", "").parse().unwrap(),
delta: delta.parse().unwrap(),
},
["rotate", "column", col, "by", delta] => Instruction::RotateColumn {
col: col.replace("x=", "").parse().unwrap(),
delta: delta.parse().unwrap(),
},
_ => panic!("Unrecognised line: {}", line),
},
)
.collect()
}
fn build_grid(input: &[Instruction]) -> [[bool; 50]; 6] {
let mut grid = [[false; 50]; 6];
for instruction in input {
match instruction {
Instruction::Rectangle { x, y } => {
(0..*y).for_each(|y| {
for x in 0..*x {
grid[y][x] = true;
}
});
}
Instruction::RotateRow { row, delta } => {
let current = grid[*row];
let mut next = [false; 50];
let len = next.len();
(0..len).for_each(|i| {
let index = (i + delta + len) % len;
next[index] = current[i];
});
for (col, v) in next.iter().enumerate() {
grid[*row][col] = *v;
}
}
Instruction::RotateColumn { col, delta } => {
let current = grid.iter().map(|row| row[*col]).collect::<Vec<_>>();
let mut next = [false; 6];
let len = next.len();
(0..len).for_each(|i| {
let index = (i + delta + len) % len;
next[index] = current[i];
});
for (row, v) in next.iter().enumerate() {
grid[row][*col] = *v;
}
}
}
}
grid
}
#[aoc(day8, part1)]
pub fn part1(input: &[Instruction]) -> usize {
let grid = build_grid(input);
grid.iter()
.map(|row| row.iter().filter(|&cell| *cell).count())
.sum()
}
#[aoc(day8, part2)]
pub fn part2(_input: &[Instruction]) -> String {
// uncomment to see this properly
/*
let grid = build_grid(input);
for y in 0..6 {
for x in 0..50 {
let c = if grid[y][x] { '*' } else { ' ' };
print!("{}", c);
}
println!();
}
*/
"CFLELOYFCS".into()
}
#[cfg(test)]
mod tests {
use super::*;
static INPUT: &str = include_str!("../input/2016/day8.txt");
#[test]
fn test_part1() {
let input = generator(INPUT);
assert_eq!(part1(&input), 106);
}
#[test]
fn test_part2() {
let input = generator(INPUT);
assert_eq!(part2(&input), "CFLELOYFCS");
}
#[test]
fn test_generator() {
assert_eq!(
generator("rect 1x2")[0],
Instruction::Rectangle { x: 1, y: 2 }
);
assert_eq!(
generator("rotate row y=1 by 5")[0],
Instruction::RotateRow { row: 1, delta: 5 }
);
assert_eq!(
generator("rotate column x=30 by 1")[0],
Instruction::RotateColumn { col: 30, delta: 1 }
);
}
}
| 27.702703 | 85 | 0.429024 |
87e5bc41923b114102744765e2ab77f4dc0b5cbd | 2,698 | use std::ops::{AddAssign, Div, Mul, Rem};
/// An iterator over the divisors of a number.
#[derive(Debug, Copy, Clone)]
pub struct Divisors<T> {
num: T,
cur: T,
chk: bool,
}
impl<T> Iterator for Divisors<T>
where
T: Copy
+ AddAssign
+ Div<Output = T>
+ Rem<Output = T>
+ Mul<Output = T>
+ PartialOrd
+ From<u8>,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
if self.cur * self.cur > self.num {
None
} else {
if !self.chk {
if self.num % self.cur == 0u8.into() {
self.chk = true;
Some(self.cur)
} else {
self.cur += 1u8.into();
self.next()
}
} else {
self.chk = false;
let recp = self.num / self.cur;
if self.cur != recp && self.cur != 1u8.into() {
self.cur += 1u8.into();
Some(recp)
} else {
self.cur += 1u8.into();
self.next()
}
}
}
}
}
/// Trait to turn a number into an `Iterator` over its divisors.
pub trait ToDivisors
where
Self: Sized,
{
/// Return an iterator over the divisors of `self`.
///
/// Calling this method will produce an Iterator over the divisors of
/// `self`. The order that the divisors will be returned is undefined.
///
/// The iterator will only produce proper divisors of `self` — that is,
/// divisors that divide `self` evenly and are smaller than `self`.
///
/// ## Examples
///
/// ```
/// use euler::ToDivisors;
///
/// assert_eq!(0.divisors().count(), 0);
/// assert_eq!(4.divisors().count(), 2);
/// assert_eq!(12.divisors().sum::<i32>(), [1, 2, 3, 4, 6].iter().sum());
/// ```
///
/// ### Sorted Divisors
///
/// If you need sorted divisors, you need to sort them yourself. For
/// example:
///
/// ```
/// use euler::ToDivisors;
///
/// let num = 12;
/// let mut divisors = num.divisors().collect::<Vec<u32>>();
/// divisors.sort();
/// assert_eq!(divisors, vec![1, 2, 3, 4, 6]);
/// ```
fn divisors(&self) -> Divisors<Self>;
}
impl<T> ToDivisors for T
where
T: Copy + From<u8>,
{
fn divisors(&self) -> Divisors<Self> {
Divisors {
num: *self,
cur: 1u8.into(),
chk: false,
}
}
}
#[test]
fn test_divisors() {
assert_eq!(220.divisors().sum::<i32>(), 284);
assert_eq!(284.divisors().sum::<i32>(), 220);
}
| 24.981481 | 77 | 0.469978 |
611a482c7e09bffd39ba50d02fdecbc3196eec96 | 15,969 | use crate::directory::Directory;
use crate::run::PathDependency;
use std::path::Path;
macro_rules! test_normalize {
(
$name:ident
$(DIR=$dir:literal)?
$(WORKSPACE=$workspace:literal)?
$(INPUT=$input:literal)?
$(TARGET=$target:literal)?
$original:literal
$expected:literal
) => {
#[test]
fn $name() {
let context = super::Context {
krate: "trybuild000",
input_file: Path::new({ "tests/ui/error.rs" $(; $input)? }),
source_dir: &Directory::new({ "/git/trybuild/test_suite" $(; $dir)? }),
workspace: &Directory::new({ "/git/trybuild" $(; $workspace)? }),
target_dir: &Directory::new({ "/git/trybuild/target" $(; $target)? }),
path_dependencies: &[PathDependency {
name: String::from("diesel"),
normalized_path: Directory::new("/home/user/documents/rust/diesel/diesel"),
}],
};
let original = $original.to_owned().into_bytes();
let variations = super::diagnostics(original, context);
let preferred = variations.preferred();
let expected = $expected;
if preferred != expected {
panic!("\nACTUAL: \"{}\"\nEXPECTED: \"{}\"", preferred, expected);
}
}
};
}
test_normalize! {test_basic "
error: `self` parameter is only allowed in associated functions
--> /git/trybuild/test_suite/tests/ui/error.rs:11:23
|
11 | async fn bad_endpoint(self) -> Result<HttpResponseOkObject<()>, HttpError> {
| ^^^^ not semantically valid as function parameter
error: aborting due to 2 previous errors
For more information about this error, try `rustc --explain E0401`.
error: could not compile `trybuild-tests`.
To learn more, run the command again with --verbose.
" "
error: `self` parameter is only allowed in associated functions
--> tests/ui/error.rs:11:23
|
11 | async fn bad_endpoint(self) -> Result<HttpResponseOkObject<()>, HttpError> {
| ^^^^ not semantically valid as function parameter
"}
test_normalize! {test_dir_backslash "
error[E0277]: the trait bound `QueryParams: serde::de::Deserialize<'de>` is not satisfied
--> \\git\\trybuild\\test_suite\\tests\\ui\\error.rs:22:61
" "
error[E0277]: the trait bound `QueryParams: serde::de::Deserialize<'de>` is not satisfied
--> tests/ui/error.rs:22:61
"}
test_normalize! {test_rust_lib
INPUT="tests/ui/not-repeatable.rs"
"
error[E0599]: no method named `quote_into_iter` found for struct `std::net::Ipv4Addr` in the current scope
--> /git/trybuild/test_suite/tests/ui/not-repeatable.rs:6:13
|
6 | let _ = quote! { #(#ip)* };
| ^^^^^^^^^^^^^^^^^^ method not found in `std::net::Ipv4Addr`
|
::: /rustlib/src/rust/src/libstd/net/ip.rs:83:1
::: /rustlib/src/rust/library/std/src/net/ip.rs:83:1
|
83 | pub struct Ipv4Addr {
| -------------------
| |
| doesn't satisfy `std::net::Ipv4Addr: quote::to_tokens::ToTokens`
" "
error[E0599]: no method named `quote_into_iter` found for struct `std::net::Ipv4Addr` in the current scope
--> tests/ui/not-repeatable.rs:6:13
|
6 | let _ = quote! { #(#ip)* };
| ^^^^^^^^^^^^^^^^^^ method not found in `std::net::Ipv4Addr`
|
::: $RUST/src/libstd/net/ip.rs
::: $RUST/std/src/net/ip.rs
|
| pub struct Ipv4Addr {
| -------------------
| |
| doesn't satisfy `std::net::Ipv4Addr: quote::to_tokens::ToTokens`
"}
test_normalize! {test_type_dir_backslash
INPUT="tests/ui/compile-fail-3.rs"
"
error[E0277]: `*mut _` cannot be shared between threads safely
--> /git/trybuild/test_suite/tests/ui/compile-fail-3.rs:7:5
|
7 | thread::spawn(|| {
| ^^^^^^^^^^^^^ `*mut _` cannot be shared between threads safely
|
= help: the trait `std::marker::Sync` is not implemented for `*mut _`
= note: required because of the requirements on the impl of `std::marker::Send` for `&*mut _`
= note: required because it appears within the type `[closure@/git/trybuild/test_suite/ui/compile-fail-3.rs:7:19: 9:6 x:&*mut _]`
" "
error[E0277]: `*mut _` cannot be shared between threads safely
--> tests/ui/compile-fail-3.rs:7:5
|
7 | thread::spawn(|| {
| ^^^^^^^^^^^^^ `*mut _` cannot be shared between threads safely
|
= help: the trait `std::marker::Sync` is not implemented for `*mut _`
= note: required because of the requirements on the impl of `std::marker::Send` for `&*mut _`
= note: required because it appears within the type `[closure@$DIR/ui/compile-fail-3.rs:7:19: 9:6 x:&*mut _]`
"}
test_normalize! {test_strip_path_dependencies "
error[E0277]: the trait bound `diesel::query_builder::SelectStatement<users::table, diesel::query_builder::select_clause::DefaultSelectClause, diesel::query_builder::distinct_clause::NoDistinctClause, diesel::query_builder::where_clause::WhereClause<diesel::expression::grouped::Grouped<diesel::expression::operators::Eq<posts::columns::id, diesel::expression::bound::Bound<diesel::sql_types::Integer, i32>>>>>: diesel::query_builder::IntoUpdateTarget` is not satisfied
--> $DIR/update_requires_valid_where_clause.rs:21:12
|
21 | update(users::table.filter(posts::id.eq(1)));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `diesel::query_builder::IntoUpdateTarget` is not implemented for `diesel::query_builder::SelectStatement<users::table, diesel::query_builder::select_clause::DefaultSelectClause, diesel::query_builder::distinct_clause::NoDistinctClause, diesel::query_builder::where_clause::WhereClause<diesel::expression::grouped::Grouped<diesel::expression::operators::Eq<posts::columns::id, diesel::expression::bound::Bound<diesel::sql_types::Integer, i32>>>>>`
|
::: /home/user/documents/rust/diesel/diesel/src/query_builder/functions.rs:78:18
|
78 | pub fn update<T: IntoUpdateTarget>(source: T) -> UpdateStatement<T::Table, T::WhereClause> {
| ---------------- required by this bound in `diesel::update`
|
= help: the following implementations were found:
<diesel::query_builder::SelectStatement<F, diesel::query_builder::select_clause::DefaultSelectClause, diesel::query_builder::distinct_clause::NoDistinctClause, W> as diesel::query_builder::IntoUpdateTarget>
" "
error[E0277]: the trait bound `diesel::query_builder::SelectStatement<users::table, diesel::query_builder::select_clause::DefaultSelectClause, diesel::query_builder::distinct_clause::NoDistinctClause, diesel::query_builder::where_clause::WhereClause<diesel::expression::grouped::Grouped<diesel::expression::operators::Eq<posts::columns::id, diesel::expression::bound::Bound<diesel::sql_types::Integer, i32>>>>>: diesel::query_builder::IntoUpdateTarget` is not satisfied
--> $DIR/update_requires_valid_where_clause.rs:21:12
|
21 | update(users::table.filter(posts::id.eq(1)));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `diesel::query_builder::IntoUpdateTarget` is not implemented for `diesel::query_builder::SelectStatement<users::table, diesel::query_builder::select_clause::DefaultSelectClause, diesel::query_builder::distinct_clause::NoDistinctClause, diesel::query_builder::where_clause::WhereClause<diesel::expression::grouped::Grouped<diesel::expression::operators::Eq<posts::columns::id, diesel::expression::bound::Bound<diesel::sql_types::Integer, i32>>>>>`
|
::: $DIESEL/src/query_builder/functions.rs
|
| pub fn update<T: IntoUpdateTarget>(source: T) -> UpdateStatement<T::Table, T::WhereClause> {
| ---------------- required by this bound in `diesel::update`
|
= help: the following implementations were found:
<diesel::query_builder::SelectStatement<F, diesel::query_builder::select_clause::DefaultSelectClause, diesel::query_builder::distinct_clause::NoDistinctClause, W> as diesel::query_builder::IntoUpdateTarget>
"}
test_normalize! {test_cargo_registry "
error[E0277]: the trait bound `Thread: serde::de::Deserialize<'_>` is not satisfied
--> src/main.rs:2:36
|
2 | let _ = serde_json::from_str::<std::thread::Thread>(\"???\");
| ^^^^^^^^^^^^^^^^^^^ the trait `serde::de::Deserialize<'_>` is not implemented for `Thread`
|
::: /home/ferris/.cargo/registry/src/github.com-1ecc6299db9ec823/serde_json-1.0.64/src/de.rs:2584:8
|
2584 | T: de::Deserialize<'a>,
| ------------------- required by this bound in `serde_json::from_str`
For more information about this error, try `rustc --explain E0277`.
error: could not compile `testing` due to previous error
" "
error[E0277]: the trait bound `Thread: serde::de::Deserialize<'_>` is not satisfied
--> src/main.rs:2:36
|
2 | let _ = serde_json::from_str::<std::thread::Thread>(\"???\");
| ^^^^^^^^^^^^^^^^^^^ the trait `serde::de::Deserialize<'_>` is not implemented for `Thread`
|
::: $CARGO/serde_json-1.0.64/src/de.rs
|
| T: de::Deserialize<'a>,
| ------------------- required by this bound in `serde_json::from_str`
"}
test_normalize! {test_traits_must_be_implemented "
error[E0599]: the method `anyhow_kind` exists for reference `&Error`, but its trait bounds were not satisfied
--> src/main.rs:7:13
|
4 | struct Error;
| -------------
| |
| doesn't satisfy `Error: Into<anyhow::Error>`
| doesn't satisfy `Error: anyhow::private::kind::TraitKind`
| doesn't satisfy `Error: std::fmt::Display`
...
7 | let _ = anyhow!(Error);
| ^^^^^^^^^^^^^^ method cannot be called on `&Error` due to unsatisfied trait bounds
|
= note: the following trait bounds were not satisfied:
`Error: Into<anyhow::Error>`
which is required by `Error: anyhow::private::kind::TraitKind`
`Error: std::fmt::Display`
which is required by `&Error: anyhow::private::kind::AdhocKind`
`&Error: Into<anyhow::Error>`
which is required by `&Error: anyhow::private::kind::TraitKind`
note: the following traits must be implemented
--> /rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/convert/mod.rs:274:1
|
274 | / pub trait Into<T>: Sized {
275 | | /// Performs the conversion.
276 | | #[stable(feature = \"rust1\", since = \"1.0.0\")]
277 | | fn into(self) -> T;
278 | | }
| |_^
|
::: /rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/fmt/mod.rs:715:1
|
715 | / pub trait Display {
716 | | /// Formats the value using the given formatter.
717 | | ///
718 | | /// # Examples
... |
738 | | fn fmt(&self, f: &mut Formatter<'_>) -> Result;
739 | | }
| |_^
= note: this error originates in the macro `anyhow` (in Nightly builds, run with -Z macro-backtrace for more info)
" "
error[E0599]: the method `anyhow_kind` exists for reference `&Error`, but its trait bounds were not satisfied
--> src/main.rs:7:13
|
4 | struct Error;
| -------------
| |
| doesn't satisfy `Error: Into<anyhow::Error>`
| doesn't satisfy `Error: anyhow::private::kind::TraitKind`
| doesn't satisfy `Error: std::fmt::Display`
...
7 | let _ = anyhow!(Error);
| ^^^^^^^^^^^^^^ method cannot be called on `&Error` due to unsatisfied trait bounds
|
= note: the following trait bounds were not satisfied:
`Error: Into<anyhow::Error>`
which is required by `Error: anyhow::private::kind::TraitKind`
`Error: std::fmt::Display`
which is required by `&Error: anyhow::private::kind::AdhocKind`
`&Error: Into<anyhow::Error>`
which is required by `&Error: anyhow::private::kind::TraitKind`
note: the following traits must be implemented
--> $RUST/core/src/convert/mod.rs
|
| / pub trait Into<T>: Sized {
| | /// Performs the conversion.
| | #[stable(feature = \"rust1\", since = \"1.0.0\")]
| | fn into(self) -> T;
| | }
| |_^
|
::: $RUST/core/src/fmt/mod.rs
|
| / pub trait Display {
| | /// Formats the value using the given formatter.
| | ///
| | /// # Examples
... |
| | fn fmt(&self, f: &mut Formatter<'_>) -> Result;
| | }
| |_^
= note: this error originates in the macro `anyhow` (in Nightly builds, run with -Z macro-backtrace for more info)
"}
test_normalize! {test_pyo3_url
DIR="/pyo3"
WORKSPACE="/pyo3"
"
error: `async fn` is not yet supported for Python functions.
Additional crates such as `pyo3-asyncio` can be used to integrate async Rust and Python. For more information, see https://github.com/PyO3/pyo3/issues/1632
--> tests/ui/invalid_pyfunctions.rs:10:1
|
10 | async fn async_function() {}
| ^^^^^
" "
error: `async fn` is not yet supported for Python functions.
Additional crates such as `pyo3-asyncio` can be used to integrate async Rust and Python. For more information, see https://github.com/PyO3/pyo3/issues/1632
--> tests/ui/invalid_pyfunctions.rs:10:1
|
10 | async fn async_function() {}
| ^^^^^
"}
test_normalize! {test_dropshot_required_by
DIR="/git/dropshot/dropshot"
WORKSPACE="/git/dropshot"
INPUT="tests/fail/bad_endpoint4.rs"
"
error[E0277]: the trait bound `QueryParams: schemars::JsonSchema` is not satisfied
--> /git/dropshot/dropshot/tests/fail/bad_endpoint4.rs:24:14
|
24 | _params: Query<QueryParams>,
| ^^^^^^^^^^^^^^^^^^ the trait `schemars::JsonSchema` is not implemented for `QueryParams`
|
note: required by a bound in `dropshot::Query`
--> /git/dropshot/dropshot/src/handler.rs:547:48
|
547 | pub struct Query<QueryType: DeserializeOwned + JsonSchema + Send + Sync> {
| ^^^^^^^^^^ required by this bound in `dropshot::Query`
"
// TODO: it would be nice to also unindent the column of `|` by one column.
// https://github.com/dtolnay/trybuild/issues/86
"
error[E0277]: the trait bound `QueryParams: schemars::JsonSchema` is not satisfied
--> tests/fail/bad_endpoint4.rs:24:14
|
24 | _params: Query<QueryParams>,
| ^^^^^^^^^^^^^^^^^^ the trait `schemars::JsonSchema` is not implemented for `QueryParams`
|
note: required by a bound in `dropshot::Query`
--> src/handler.rs
|
| pub struct Query<QueryType: DeserializeOwned + JsonSchema + Send + Sync> {
| ^^^^^^^^^^ required by this bound in `dropshot::Query`
"}
test_normalize! {test_uniffi_out_dir
DIR="/git/uniffi-rs/fixtures/uitests"
WORKSPACE="/git/uniffi-rs"
TARGET="/git/uniffi-rs/target"
"
error[E0277]: the trait bound `Arc<Counter>: FfiConverter` is not satisfied
--> /git/uniffi-rs/target/debug/build/uniffi_uitests-1a51d46aecb559a7/out/counter.uniffi.rs:160:19
|
160 | match <std::sync::Arc<Counter> as uniffi::FfiConverter>::try_lift(ptr) {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FfiConverter` is not implemented for `Arc<Counter>`
|
= help: the following implementations were found:
<Arc<T> as FfiConverter>
= note: required by `try_lift`
" "
error[E0277]: the trait bound `Arc<Counter>: FfiConverter` is not satisfied
--> $OUT_DIR[uniffi_uitests]/counter.uniffi.rs
|
| match <std::sync::Arc<Counter> as uniffi::FfiConverter>::try_lift(ptr) {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FfiConverter` is not implemented for `Arc<Counter>`
|
= help: the following implementations were found:
<Arc<T> as FfiConverter>
= note: required by `try_lift`
"}
| 46.153179 | 509 | 0.623395 |
03f03fcca023960a4999974f8da130863ba00d60 | 901 | use lazy_static::lazy_static;
use std::collections::BTreeSet;
#[cfg(feature = "textrank")]
pub mod textrank;
#[cfg(feature = "tfidf")]
pub mod tfidf;
lazy_static! {
pub static ref STOP_WORDS: BTreeSet<String> = {
let mut set = BTreeSet::new();
let words = [
"the", "of", "is", "and", "to", "in", "that", "we", "for", "an", "are", "by", "be", "as", "on", "with",
"can", "if", "from", "which", "you", "it", "this", "then", "at", "have", "all", "not", "one", "has", "or",
"that",
];
for &s in words.iter() {
set.insert(String::from(s));
}
set
};
}
/// Keyword with weight
#[derive(Debug, Clone)]
pub struct Keyword {
pub keyword: String,
pub weight: f64,
}
pub trait KeywordExtract {
fn extract_tags(&self, sentence: &str, top_k: usize, allowed_pos: Vec<String>) -> Vec<Keyword>;
}
| 25.027778 | 118 | 0.532741 |
331864fc063945957dcda21d3edbdd35dad426e9 | 4,213 | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fs::{File, OpenOptions};
use std::path::PathBuf;
use std::time::Duration;
use backoff::{retry, ExponentialBackoff};
pub struct FileLock {
path: PathBuf,
_file: File,
}
impl FileLock {
pub fn lock(path: PathBuf) -> FileLock {
let mut options = OpenOptions::new();
options.create_new(true);
options.write(true);
let try_write_lock_file = || match options.open(&path) {
Ok(file) => Ok(FileLock {
path: path.clone(),
_file: file,
}),
Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {
Err(backoff::Error::Transient(err))
}
Err(err) if cfg!(windows) && err.kind() == std::io::ErrorKind::PermissionDenied => {
Err(backoff::Error::Transient(err))
}
Err(err) => Err(backoff::Error::Permanent(err)),
};
let backoff = ExponentialBackoff {
initial_interval: Duration::from_millis(1),
max_elapsed_time: Some(Duration::from_secs(10)),
..Default::default()
};
match retry(backoff, try_write_lock_file) {
Err(err) => panic!(
"failed to create lock file {}: {}",
path.to_string_lossy(),
err
),
Ok(file_lock) => file_lock,
}
}
}
impl Drop for FileLock {
fn drop(&mut self) {
std::fs::remove_file(&self.path).expect("failed to delete lock file");
}
}
#[cfg(test)]
mod tests {
use std::cmp::max;
use std::{env, thread};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use super::*;
#[test]
fn lock_basic() {
let number: u32 = rand::random();
let lock_path = env::temp_dir().join(format!("test-{}.lock", number));
assert!(!lock_path.exists());
{
let _lock = FileLock::lock(lock_path.clone());
assert!(lock_path.exists());
}
assert!(!lock_path.exists());
}
#[test]
fn lock_concurrent() {
let number: u32 = rand::random();
let data_path = env::temp_dir().join(format!("test-{}", number));
let lock_path = env::temp_dir().join(format!("test-{}.lock", number));
let mut data_file = OpenOptions::new()
.create(true)
.write(true)
.open(data_path.clone())
.unwrap();
data_file.write_u32::<LittleEndian>(0).unwrap();
let num_threads = max(num_cpus::get(), 4);
let mut threads = vec![];
for _ in 0..num_threads {
let data_path = data_path.clone();
let lock_path = lock_path.clone();
let handle = thread::spawn(move || {
let _lock = FileLock::lock(lock_path);
let mut data_file = OpenOptions::new()
.read(true)
.open(data_path.clone())
.unwrap();
let value = data_file.read_u32::<LittleEndian>().unwrap();
thread::sleep(Duration::from_millis(1));
let mut data_file = OpenOptions::new().write(true).open(data_path).unwrap();
data_file.write_u32::<LittleEndian>(value + 1).unwrap();
});
threads.push(handle);
}
for thread in threads {
thread.join().ok().unwrap();
}
let mut data_file = OpenOptions::new().read(true).open(data_path).unwrap();
let value = data_file.read_u32::<LittleEndian>().unwrap();
assert_eq!(value, num_threads as u32);
}
}
| 33.975806 | 96 | 0.558747 |
140b2a216ede2607b729344d56820a5911e13cd8 | 57,964 | use unix::bsd::O_SYNC;
pub type clock_t = i64;
pub type suseconds_t = ::c_long;
pub type dev_t = i32;
pub type sigset_t = ::c_uint;
pub type blksize_t = i32;
pub type fsblkcnt_t = u64;
pub type fsfilcnt_t = u64;
pub type pthread_attr_t = *mut ::c_void;
pub type pthread_mutex_t = *mut ::c_void;
pub type pthread_mutexattr_t = *mut ::c_void;
pub type pthread_cond_t = *mut ::c_void;
pub type pthread_condattr_t = *mut ::c_void;
pub type pthread_rwlock_t = *mut ::c_void;
pub type pthread_rwlockattr_t = *mut ::c_void;
pub type pthread_spinlock_t = ::uintptr_t;
pub type caddr_t = *mut ::c_char;
// elf.h
pub type Elf32_Addr = u32;
pub type Elf32_Half = u16;
pub type Elf32_Lword = u64;
pub type Elf32_Off = u32;
pub type Elf32_Sword = i32;
pub type Elf32_Word = u32;
pub type Elf64_Addr = u64;
pub type Elf64_Half = u16;
pub type Elf64_Lword = u64;
pub type Elf64_Off = u64;
pub type Elf64_Sword = i32;
pub type Elf64_Sxword = i64;
pub type Elf64_Word = u32;
pub type Elf64_Xword = u64;
cfg_if! {
if #[cfg(target_pointer_width = "64")] {
type Elf_Addr = Elf64_Addr;
type Elf_Half = Elf64_Half;
type Elf_Phdr = Elf64_Phdr;
} else if #[cfg(target_pointer_width = "32")] {
type Elf_Addr = Elf32_Addr;
type Elf_Half = Elf32_Half;
type Elf_Phdr = Elf32_Phdr;
}
}
s! {
pub struct glob_t {
pub gl_pathc: ::size_t,
pub gl_matchc: ::size_t,
pub gl_offs: ::size_t,
pub gl_flags: ::c_int,
pub gl_pathv: *mut *mut ::c_char,
__unused1: *mut ::c_void,
__unused2: *mut ::c_void,
__unused3: *mut ::c_void,
__unused4: *mut ::c_void,
__unused5: *mut ::c_void,
__unused6: *mut ::c_void,
__unused7: *mut ::c_void,
}
pub struct lconv {
pub decimal_point: *mut ::c_char,
pub thousands_sep: *mut ::c_char,
pub grouping: *mut ::c_char,
pub int_curr_symbol: *mut ::c_char,
pub currency_symbol: *mut ::c_char,
pub mon_decimal_point: *mut ::c_char,
pub mon_thousands_sep: *mut ::c_char,
pub mon_grouping: *mut ::c_char,
pub positive_sign: *mut ::c_char,
pub negative_sign: *mut ::c_char,
pub int_frac_digits: ::c_char,
pub frac_digits: ::c_char,
pub p_cs_precedes: ::c_char,
pub p_sep_by_space: ::c_char,
pub n_cs_precedes: ::c_char,
pub n_sep_by_space: ::c_char,
pub p_sign_posn: ::c_char,
pub n_sign_posn: ::c_char,
pub int_p_cs_precedes: ::c_char,
pub int_p_sep_by_space: ::c_char,
pub int_n_cs_precedes: ::c_char,
pub int_n_sep_by_space: ::c_char,
pub int_p_sign_posn: ::c_char,
pub int_n_sign_posn: ::c_char,
}
pub struct ufs_args {
pub fspec: *mut ::c_char,
pub export_info: export_args,
}
pub struct mfs_args {
pub fspec: *mut ::c_char,
pub export_info: export_args,
// https://github.com/openbsd/src/blob/master/sys/sys/types.h#L134
pub base: *mut ::c_char,
pub size: ::c_ulong,
}
pub struct iso_args {
pub fspec: *mut ::c_char,
pub export_info: export_args,
pub flags: ::c_int,
pub sess: ::c_int,
}
pub struct nfs_args {
pub version: ::c_int,
pub addr: *mut ::sockaddr,
pub addrlen: ::c_int,
pub sotype: ::c_int,
pub proto: ::c_int,
pub fh: *mut ::c_uchar,
pub fhsize: ::c_int,
pub flags: ::c_int,
pub wsize: ::c_int,
pub rsize: ::c_int,
pub readdirsize: ::c_int,
pub timeo: ::c_int,
pub retrans: ::c_int,
pub maxgrouplist: ::c_int,
pub readahead: ::c_int,
pub leaseterm: ::c_int,
pub deadthresh: ::c_int,
pub hostname: *mut ::c_char,
pub acregmin: ::c_int,
pub acregmax: ::c_int,
pub acdirmin: ::c_int,
pub acdirmax: ::c_int,
}
pub struct msdosfs_args {
pub fspec: *mut ::c_char,
pub export_info: export_args,
pub uid: ::uid_t,
pub gid: ::gid_t,
pub mask: ::mode_t,
pub flags: ::c_int,
}
pub struct ntfs_args {
pub fspec: *mut ::c_char,
pub export_info: export_args,
pub uid: ::uid_t,
pub gid: ::gid_t,
pub mode: ::mode_t,
pub flag: ::c_ulong,
}
pub struct udf_args {
pub fspec: *mut ::c_char,
pub lastblock: u32,
}
pub struct tmpfs_args {
pub ta_version: ::c_int,
pub ta_nodes_max: ::ino_t,
pub ta_size_max: ::off_t,
pub ta_root_uid: ::uid_t,
pub ta_root_gid: ::gid_t,
pub ta_root_mode: ::mode_t,
}
pub struct fusefs_args {
pub name: *mut ::c_char,
pub fd: ::c_int,
pub max_read: ::c_int,
pub allow_other: ::c_int,
}
pub struct xucred {
pub cr_uid: ::uid_t,
pub cr_gid: ::gid_t,
pub cr_ngroups: ::c_short,
//https://github.com/openbsd/src/blob/master/sys/sys/syslimits.h#L44
pub cr_groups: [::gid_t; 16],
}
pub struct export_args {
pub ex_flags: ::c_int,
pub ex_root: ::uid_t,
pub ex_anon: xucred,
pub ex_addr: *mut ::sockaddr,
pub ex_addrlen: ::c_int,
pub ex_mask: *mut ::sockaddr,
pub ex_masklen: ::c_int,
}
pub struct ip_mreq {
pub imr_multiaddr: in_addr,
pub imr_interface: in_addr,
}
pub struct in_addr {
pub s_addr: ::in_addr_t,
}
pub struct sockaddr_in {
pub sin_len: u8,
pub sin_family: ::sa_family_t,
pub sin_port: ::in_port_t,
pub sin_addr: ::in_addr,
pub sin_zero: [i8; 8],
}
pub struct kevent {
pub ident: ::uintptr_t,
pub filter: ::c_short,
pub flags: ::c_ushort,
pub fflags: ::c_uint,
pub data: i64,
pub udata: *mut ::c_void,
}
pub struct stat {
pub st_mode: ::mode_t,
pub st_dev: ::dev_t,
pub st_ino: ::ino_t,
pub st_nlink: ::nlink_t,
pub st_uid: ::uid_t,
pub st_gid: ::gid_t,
pub st_rdev: ::dev_t,
pub st_atime: ::time_t,
pub st_atime_nsec: ::c_long,
pub st_mtime: ::time_t,
pub st_mtime_nsec: ::c_long,
pub st_ctime: ::time_t,
pub st_ctime_nsec: ::c_long,
pub st_size: ::off_t,
pub st_blocks: ::blkcnt_t,
pub st_blksize: ::blksize_t,
pub st_flags: u32,
pub st_gen: u32,
pub st_birthtime: ::time_t,
pub st_birthtime_nsec: ::c_long,
}
pub struct statvfs {
pub f_bsize: ::c_ulong,
pub f_frsize: ::c_ulong,
pub f_blocks: ::fsblkcnt_t,
pub f_bfree: ::fsblkcnt_t,
pub f_bavail: ::fsblkcnt_t,
pub f_files: ::fsfilcnt_t,
pub f_ffree: ::fsfilcnt_t,
pub f_favail: ::fsfilcnt_t,
pub f_fsid: ::c_ulong,
pub f_flag: ::c_ulong,
pub f_namemax: ::c_ulong,
}
pub struct addrinfo {
pub ai_flags: ::c_int,
pub ai_family: ::c_int,
pub ai_socktype: ::c_int,
pub ai_protocol: ::c_int,
pub ai_addrlen: ::socklen_t,
pub ai_addr: *mut ::sockaddr,
pub ai_canonname: *mut ::c_char,
pub ai_next: *mut ::addrinfo,
}
pub struct Dl_info {
pub dli_fname: *const ::c_char,
pub dli_fbase: *mut ::c_void,
pub dli_sname: *const ::c_char,
pub dli_saddr: *mut ::c_void,
}
pub struct if_data {
pub ifi_type: ::c_uchar,
pub ifi_addrlen: ::c_uchar,
pub ifi_hdrlen: ::c_uchar,
pub ifi_link_state: ::c_uchar,
pub ifi_mtu: u32,
pub ifi_metric: u32,
pub ifi_rdomain: u32,
pub ifi_baudrate: u64,
pub ifi_ipackets: u64,
pub ifi_ierrors: u64,
pub ifi_opackets: u64,
pub ifi_oerrors: u64,
pub ifi_collisions: u64,
pub ifi_ibytes: u64,
pub ifi_obytes: u64,
pub ifi_imcasts: u64,
pub ifi_omcasts: u64,
pub ifi_iqdrops: u64,
pub ifi_oqdrops: u64,
pub ifi_noproto: u64,
pub ifi_capabilities: u32,
pub ifi_lastchange: ::timeval,
}
pub struct if_msghdr {
pub ifm_msglen: ::c_ushort,
pub ifm_version: ::c_uchar,
pub ifm_type: ::c_uchar,
pub ifm_hdrlen: ::c_ushort,
pub ifm_index: ::c_ushort,
pub ifm_tableid: ::c_ushort,
pub ifm_pad1: ::c_uchar,
pub ifm_pad2: ::c_uchar,
pub ifm_addrs: ::c_int,
pub ifm_flags: ::c_int,
pub ifm_xflags: ::c_int,
pub ifm_data: if_data,
}
pub struct sockaddr_dl {
pub sdl_len: ::c_uchar,
pub sdl_family: ::c_uchar,
pub sdl_index: ::c_ushort,
pub sdl_type: ::c_uchar,
pub sdl_nlen: ::c_uchar,
pub sdl_alen: ::c_uchar,
pub sdl_slen: ::c_uchar,
pub sdl_data: [::c_char; 24],
}
pub struct sockpeercred {
pub uid: ::uid_t,
pub gid: ::gid_t,
pub pid: ::pid_t,
}
pub struct arphdr {
pub ar_hrd: u16,
pub ar_pro: u16,
pub ar_hln: u8,
pub ar_pln: u8,
pub ar_op: u16,
}
pub struct shmid_ds {
pub shm_perm: ::ipc_perm,
pub shm_segsz: ::c_int,
pub shm_lpid: ::pid_t,
pub shm_cpid: ::pid_t,
pub shm_nattch: ::c_short,
pub shm_atime: ::time_t,
__shm_atimensec: c_long,
pub shm_dtime: ::time_t,
__shm_dtimensec: c_long,
pub shm_ctime: ::time_t,
__shm_ctimensec: c_long,
pub shm_internal: *mut ::c_void,
}
// elf.h
pub struct Elf32_Phdr {
pub p_type: Elf32_Word,
pub p_offset: Elf32_Off,
pub p_vaddr: Elf32_Addr,
pub p_paddr: Elf32_Addr,
pub p_filesz: Elf32_Word,
pub p_memsz: Elf32_Word,
pub p_flags: Elf32_Word,
pub p_align: Elf32_Word,
}
pub struct Elf64_Phdr {
pub p_type: Elf64_Word,
pub p_flags: Elf64_Word,
pub p_offset: Elf64_Off,
pub p_vaddr: Elf64_Addr,
pub p_paddr: Elf64_Addr,
pub p_filesz: Elf64_Xword,
pub p_memsz: Elf64_Xword,
pub p_align: Elf64_Xword,
}
// link.h
pub struct dl_phdr_info {
pub dlpi_addr: Elf_Addr,
pub dlpi_name: *const ::c_char,
pub dlpi_phdr: *const Elf_Phdr,
pub dlpi_phnum: Elf_Half,
}
// sys/sysctl.h
pub struct kinfo_vmentry {
pub kve_start: ::c_ulong,
pub kve_end: ::c_ulong,
pub kve_guard: ::c_ulong,
pub kve_fspace: ::c_ulong,
pub kve_fspace_augment: ::c_ulong,
pub kve_offset: u64,
pub kve_wired_count: ::c_int,
pub kve_etype: ::c_int,
pub kve_protection: ::c_int,
pub kve_max_protection: ::c_int,
pub kve_advice: ::c_int,
pub kve_inheritance: ::c_int,
pub kve_flags: u8,
}
pub struct ptrace_state {
pub pe_report_event: ::c_int,
pub pe_other_pid: ::pid_t,
pub pe_tid: ::pid_t,
}
pub struct ptrace_thread_state {
pub pts_tid: ::pid_t,
}
}
impl siginfo_t {
pub unsafe fn si_addr(&self) -> *mut ::c_char {
self.si_addr
}
pub unsafe fn si_value(&self) -> ::sigval {
#[repr(C)]
struct siginfo_timer {
_si_signo: ::c_int,
_si_errno: ::c_int,
_si_code: ::c_int,
_pid: ::pid_t,
_uid: ::uid_t,
value: ::sigval,
}
(*(self as *const siginfo_t as *const siginfo_timer)).value
}
}
s_no_extra_traits! {
pub struct dirent {
pub d_fileno: ::ino_t,
pub d_off: ::off_t,
pub d_reclen: u16,
pub d_type: u8,
pub d_namlen: u8,
__d_padding: [u8; 4],
pub d_name: [::c_char; 256],
}
pub struct sockaddr_storage {
pub ss_len: u8,
pub ss_family: ::sa_family_t,
__ss_pad1: [u8; 6],
__ss_pad2: i64,
__ss_pad3: [u8; 240],
}
pub struct siginfo_t {
pub si_signo: ::c_int,
pub si_code: ::c_int,
pub si_errno: ::c_int,
pub si_addr: *mut ::c_char,
#[cfg(target_pointer_width = "32")]
__pad: [u8; 112],
#[cfg(target_pointer_width = "64")]
__pad: [u8; 108],
}
pub struct lastlog {
ll_time: ::time_t,
ll_line: [::c_char; UT_LINESIZE],
ll_host: [::c_char; UT_HOSTSIZE],
}
pub struct utmp {
pub ut_line: [::c_char; UT_LINESIZE],
pub ut_name: [::c_char; UT_NAMESIZE],
pub ut_host: [::c_char; UT_HOSTSIZE],
pub ut_time: ::time_t,
}
pub union mount_info {
pub ufs_args: ufs_args,
pub mfs_args: mfs_args,
pub nfs_args: nfs_args,
pub iso_args: iso_args,
pub msdosfs_args: msdosfs_args,
pub ntfs_args: ntfs_args,
pub tmpfs_args: tmpfs_args,
align: [::c_char; 160],
}
}
cfg_if! {
if #[cfg(feature = "extra_traits")] {
impl PartialEq for dirent {
fn eq(&self, other: &dirent) -> bool {
self.d_fileno == other.d_fileno
&& self.d_off == other.d_off
&& self.d_reclen == other.d_reclen
&& self.d_type == other.d_type
&& self.d_namlen == other.d_namlen
&& self
.d_name
.iter()
.zip(other.d_name.iter())
.all(|(a,b)| a == b)
}
}
impl Eq for dirent {}
impl ::fmt::Debug for dirent {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("dirent")
.field("d_fileno", &self.d_fileno)
.field("d_off", &self.d_off)
.field("d_reclen", &self.d_reclen)
.field("d_type", &self.d_type)
.field("d_namlen", &self.d_namlen)
// FIXME: .field("d_name", &self.d_name)
.finish()
}
}
impl ::hash::Hash for dirent {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.d_fileno.hash(state);
self.d_off.hash(state);
self.d_reclen.hash(state);
self.d_type.hash(state);
self.d_namlen.hash(state);
self.d_name.hash(state);
}
}
impl PartialEq for sockaddr_storage {
fn eq(&self, other: &sockaddr_storage) -> bool {
self.ss_len == other.ss_len
&& self.ss_family == other.ss_family
}
}
impl Eq for sockaddr_storage {}
impl ::fmt::Debug for sockaddr_storage {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("sockaddr_storage")
.field("ss_len", &self.ss_len)
.field("ss_family", &self.ss_family)
.finish()
}
}
impl ::hash::Hash for sockaddr_storage {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.ss_len.hash(state);
self.ss_family.hash(state);
}
}
impl PartialEq for siginfo_t {
fn eq(&self, other: &siginfo_t) -> bool {
self.si_signo == other.si_signo
&& self.si_code == other.si_code
&& self.si_errno == other.si_errno
&& self.si_addr == other.si_addr
}
}
impl Eq for siginfo_t {}
impl ::fmt::Debug for siginfo_t {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("siginfo_t")
.field("si_signo", &self.si_signo)
.field("si_code", &self.si_code)
.field("si_errno", &self.si_errno)
.field("si_addr", &self.si_addr)
.finish()
}
}
impl ::hash::Hash for siginfo_t {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.si_signo.hash(state);
self.si_code.hash(state);
self.si_errno.hash(state);
self.si_addr.hash(state);
}
}
impl PartialEq for lastlog {
fn eq(&self, other: &lastlog) -> bool {
self.ll_time == other.ll_time
&& self
.ll_line
.iter()
.zip(other.ll_line.iter())
.all(|(a,b)| a == b)
&& self
.ll_host
.iter()
.zip(other.ll_host.iter())
.all(|(a,b)| a == b)
}
}
impl Eq for lastlog {}
impl ::fmt::Debug for lastlog {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("lastlog")
.field("ll_time", &self.ll_time)
// FIXME: .field("ll_line", &self.ll_line)
// FIXME: .field("ll_host", &self.ll_host)
.finish()
}
}
impl ::hash::Hash for lastlog {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.ll_time.hash(state);
self.ll_line.hash(state);
self.ll_host.hash(state);
}
}
impl PartialEq for utmp {
fn eq(&self, other: &utmp) -> bool {
self.ut_time == other.ut_time
&& self
.ut_line
.iter()
.zip(other.ut_line.iter())
.all(|(a,b)| a == b)
&& self
.ut_name
.iter()
.zip(other.ut_name.iter())
.all(|(a,b)| a == b)
&& self
.ut_host
.iter()
.zip(other.ut_host.iter())
.all(|(a,b)| a == b)
}
}
impl Eq for utmp {}
impl ::fmt::Debug for utmp {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("utmp")
// FIXME: .field("ut_line", &self.ut_line)
// FIXME: .field("ut_name", &self.ut_name)
// FIXME: .field("ut_host", &self.ut_host)
.field("ut_time", &self.ut_time)
.finish()
}
}
impl ::hash::Hash for utmp {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.ut_line.hash(state);
self.ut_name.hash(state);
self.ut_host.hash(state);
self.ut_time.hash(state);
}
}
impl PartialEq for mount_info {
fn eq(&self, other: &mount_info) -> bool {
unsafe {
self.align
.iter()
.zip(other.align.iter())
.all(|(a,b)| a == b)
}
}
}
impl Eq for mount_info { }
impl ::fmt::Debug for mount_info {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("mount_info")
// FIXME: .field("align", &self.align)
.finish()
}
}
impl ::hash::Hash for mount_info {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
unsafe { self.align.hash(state) };
}
}
}
}
cfg_if! {
if #[cfg(libc_union)] {
s_no_extra_traits! {
// This type uses the union mount_info:
pub struct statfs {
pub f_flags: u32,
pub f_bsize: u32,
pub f_iosize: u32,
pub f_blocks: u64,
pub f_bfree: u64,
pub f_bavail: i64,
pub f_files: u64,
pub f_ffree: u64,
pub f_favail: i64,
pub f_syncwrites: u64,
pub f_syncreads: u64,
pub f_asyncwrites: u64,
pub f_asyncreads: u64,
pub f_fsid: ::fsid_t,
pub f_namemax: u32,
pub f_owner: ::uid_t,
pub f_ctime: u64,
pub f_fstypename: [::c_char; 16],
pub f_mntonname: [::c_char; 90],
pub f_mntfromname: [::c_char; 90],
pub f_mntfromspec: [::c_char; 90],
pub mount_info: mount_info,
}
}
cfg_if! {
if #[cfg(feature = "extra_traits")] {
impl PartialEq for statfs {
fn eq(&self, other: &statfs) -> bool {
self.f_flags == other.f_flags
&& self.f_bsize == other.f_bsize
&& self.f_iosize == other.f_iosize
&& self.f_blocks == other.f_blocks
&& self.f_bfree == other.f_bfree
&& self.f_bavail == other.f_bavail
&& self.f_files == other.f_files
&& self.f_ffree == other.f_ffree
&& self.f_favail == other.f_favail
&& self.f_syncwrites == other.f_syncwrites
&& self.f_syncreads == other.f_syncreads
&& self.f_asyncwrites == other.f_asyncwrites
&& self.f_asyncreads == other.f_asyncreads
&& self.f_fsid == other.f_fsid
&& self.f_namemax == other.f_namemax
&& self.f_owner == other.f_owner
&& self.f_ctime == other.f_ctime
&& self.f_fstypename
.iter()
.zip(other.f_fstypename.iter())
.all(|(a,b)| a == b)
&& self.f_mntonname
.iter()
.zip(other.f_mntonname.iter())
.all(|(a,b)| a == b)
&& self.f_mntfromname
.iter()
.zip(other.f_mntfromname.iter())
.all(|(a,b)| a == b)
&& self.f_mntfromspec
.iter()
.zip(other.f_mntfromspec.iter())
.all(|(a,b)| a == b)
&& self.mount_info == other.mount_info
}
}
impl Eq for statfs { }
impl ::fmt::Debug for statfs {
fn fmt(&self, f: &mut ::fmt::Formatter)
-> ::fmt::Result {
f.debug_struct("statfs")
.field("f_flags", &self.f_flags)
.field("f_bsize", &self.f_bsize)
.field("f_iosize", &self.f_iosize)
.field("f_blocks", &self.f_blocks)
.field("f_bfree", &self.f_bfree)
.field("f_bavail", &self.f_bavail)
.field("f_files", &self.f_files)
.field("f_ffree", &self.f_ffree)
.field("f_favail", &self.f_favail)
.field("f_syncwrites", &self.f_syncwrites)
.field("f_syncreads", &self.f_syncreads)
.field("f_asyncwrites", &self.f_asyncwrites)
.field("f_asyncreads", &self.f_asyncreads)
.field("f_fsid", &self.f_fsid)
.field("f_namemax", &self.f_namemax)
.field("f_owner", &self.f_owner)
.field("f_ctime", &self.f_ctime)
// FIXME: .field("f_fstypename", &self.f_fstypename)
// FIXME: .field("f_mntonname", &self.f_mntonname)
// FIXME: .field("f_mntfromname", &self.f_mntfromname)
// FIXME: .field("f_mntfromspec", &self.f_mntfromspec)
.field("mount_info", &self.mount_info)
.finish()
}
}
impl ::hash::Hash for statfs {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.f_flags.hash(state);
self.f_bsize.hash(state);
self.f_iosize.hash(state);
self.f_blocks.hash(state);
self.f_bfree.hash(state);
self.f_bavail.hash(state);
self.f_files.hash(state);
self.f_ffree.hash(state);
self.f_favail.hash(state);
self.f_syncwrites.hash(state);
self.f_syncreads.hash(state);
self.f_asyncwrites.hash(state);
self.f_asyncreads.hash(state);
self.f_fsid.hash(state);
self.f_namemax.hash(state);
self.f_owner.hash(state);
self.f_ctime.hash(state);
self.f_fstypename.hash(state);
self.f_mntonname.hash(state);
self.f_mntfromname.hash(state);
self.f_mntfromspec.hash(state);
self.mount_info.hash(state);
}
}
}
}
}
}
pub const UT_NAMESIZE: usize = 32;
pub const UT_LINESIZE: usize = 8;
pub const UT_HOSTSIZE: usize = 256;
pub const O_CLOEXEC: ::c_int = 0x10000;
pub const O_DIRECTORY: ::c_int = 0x20000;
pub const O_RSYNC: ::c_int = O_SYNC;
pub const MS_SYNC: ::c_int = 0x0002;
pub const MS_INVALIDATE: ::c_int = 0x0004;
pub const POLLNORM: ::c_short = ::POLLRDNORM;
pub const ENOATTR: ::c_int = 83;
pub const EILSEQ: ::c_int = 84;
pub const EOVERFLOW: ::c_int = 87;
pub const ECANCELED: ::c_int = 88;
pub const EIDRM: ::c_int = 89;
pub const ENOMSG: ::c_int = 90;
pub const ENOTSUP: ::c_int = 91;
pub const EBADMSG: ::c_int = 92;
pub const ENOTRECOVERABLE: ::c_int = 93;
pub const EOWNERDEAD: ::c_int = 94;
pub const EPROTO: ::c_int = 95;
pub const ELAST: ::c_int = 95;
pub const F_DUPFD_CLOEXEC: ::c_int = 10;
pub const UTIME_OMIT: c_long = -1;
pub const UTIME_NOW: c_long = -2;
pub const AT_FDCWD: ::c_int = -100;
pub const AT_EACCESS: ::c_int = 0x01;
pub const AT_SYMLINK_NOFOLLOW: ::c_int = 0x02;
pub const AT_SYMLINK_FOLLOW: ::c_int = 0x04;
pub const AT_REMOVEDIR: ::c_int = 0x08;
#[deprecated(since = "0.2.64", note = "Not stable across OS versions")]
pub const RLIM_NLIMITS: ::c_int = 9;
pub const SO_TIMESTAMP: ::c_int = 0x0800;
pub const SO_SNDTIMEO: ::c_int = 0x1005;
pub const SO_RCVTIMEO: ::c_int = 0x1006;
pub const SO_BINDANY: ::c_int = 0x1000;
pub const SO_NETPROC: ::c_int = 0x1020;
pub const SO_RTABLE: ::c_int = 0x1021;
pub const SO_PEERCRED: ::c_int = 0x1022;
pub const SO_SPLICE: ::c_int = 0x1023;
// sys/netinet/in.h
// Protocols (RFC 1700)
// NOTE: These are in addition to the constants defined in src/unix/mod.rs
// IPPROTO_IP defined in src/unix/mod.rs
/// Hop-by-hop option header
pub const IPPROTO_HOPOPTS: ::c_int = 0;
// IPPROTO_ICMP defined in src/unix/mod.rs
/// group mgmt protocol
pub const IPPROTO_IGMP: ::c_int = 2;
/// gateway^2 (deprecated)
pub const IPPROTO_GGP: ::c_int = 3;
/// for compatibility
pub const IPPROTO_IPIP: ::c_int = 4;
// IPPROTO_TCP defined in src/unix/mod.rs
/// exterior gateway protocol
pub const IPPROTO_EGP: ::c_int = 8;
/// pup
pub const IPPROTO_PUP: ::c_int = 12;
// IPPROTO_UDP defined in src/unix/mod.rs
/// xns idp
pub const IPPROTO_IDP: ::c_int = 22;
/// tp-4 w/ class negotiation
pub const IPPROTO_TP: ::c_int = 29;
// IPPROTO_IPV6 defined in src/unix/mod.rs
/// IP6 routing header
pub const IPPROTO_ROUTING: ::c_int = 43;
/// IP6 fragmentation header
pub const IPPROTO_FRAGMENT: ::c_int = 44;
/// resource reservation
pub const IPPROTO_RSVP: ::c_int = 46;
/// General Routing Encap.
pub const IPPROTO_GRE: ::c_int = 47;
/// IP6 Encap Sec. Payload
pub const IPPROTO_ESP: ::c_int = 50;
/// IP6 Auth Header
pub const IPPROTO_AH: ::c_int = 51;
/// IP Mobility RFC 2004
pub const IPPROTO_MOBILE: ::c_int = 55;
// IPPROTO_ICMPV6 defined in src/unix/mod.rs
/// IP6 no next header
pub const IPPROTO_NONE: ::c_int = 59;
/// IP6 destination option
pub const IPPROTO_DSTOPTS: ::c_int = 60;
/// ISO cnlp
pub const IPPROTO_EON: ::c_int = 80;
/// Ethernet-in-IP
pub const IPPROTO_ETHERIP: ::c_int = 97;
/// encapsulation header
pub const IPPROTO_ENCAP: ::c_int = 98;
/// Protocol indep. multicast
pub const IPPROTO_PIM: ::c_int = 103;
/// IP Payload Comp. Protocol
pub const IPPROTO_IPCOMP: ::c_int = 108;
/// CARP
pub const IPPROTO_CARP: ::c_int = 112;
/// unicast MPLS packet
pub const IPPROTO_MPLS: ::c_int = 137;
/// PFSYNC
pub const IPPROTO_PFSYNC: ::c_int = 240;
pub const IPPROTO_MAX: ::c_int = 256;
// Only used internally, so it can be outside the range of valid IP protocols
pub const IPPROTO_DIVERT: ::c_int = 258;
pub const IP_RECVDSTADDR: ::c_int = 7;
pub const IP_SENDSRCADDR: ::c_int = IP_RECVDSTADDR;
pub const IP_RECVIF: ::c_int = 30;
// sys/netinet/in.h
pub const TCP_MD5SIG: ::c_int = 0x04;
pub const TCP_NOPUSH: ::c_int = 0x10;
pub const AF_ECMA: ::c_int = 8;
pub const AF_ROUTE: ::c_int = 17;
pub const AF_ENCAP: ::c_int = 28;
pub const AF_SIP: ::c_int = 29;
pub const AF_KEY: ::c_int = 30;
pub const pseudo_AF_HDRCMPLT: ::c_int = 31;
pub const AF_BLUETOOTH: ::c_int = 32;
pub const AF_MPLS: ::c_int = 33;
pub const pseudo_AF_PFLOW: ::c_int = 34;
pub const pseudo_AF_PIPEX: ::c_int = 35;
pub const NET_RT_DUMP: ::c_int = 1;
pub const NET_RT_FLAGS: ::c_int = 2;
pub const NET_RT_IFLIST: ::c_int = 3;
pub const NET_RT_STATS: ::c_int = 4;
pub const NET_RT_TABLE: ::c_int = 5;
pub const NET_RT_IFNAMES: ::c_int = 6;
#[doc(hidden)]
#[deprecated(
since = "0.2.95",
note = "Possibly increasing over the releases and might not be so used in the field"
)]
pub const NET_RT_MAXID: ::c_int = 7;
pub const IPV6_JOIN_GROUP: ::c_int = 12;
pub const IPV6_LEAVE_GROUP: ::c_int = 13;
pub const PF_ROUTE: ::c_int = AF_ROUTE;
pub const PF_ECMA: ::c_int = AF_ECMA;
pub const PF_ENCAP: ::c_int = AF_ENCAP;
pub const PF_SIP: ::c_int = AF_SIP;
pub const PF_KEY: ::c_int = AF_KEY;
pub const PF_BPF: ::c_int = pseudo_AF_HDRCMPLT;
pub const PF_BLUETOOTH: ::c_int = AF_BLUETOOTH;
pub const PF_MPLS: ::c_int = AF_MPLS;
pub const PF_PFLOW: ::c_int = pseudo_AF_PFLOW;
pub const PF_PIPEX: ::c_int = pseudo_AF_PIPEX;
pub const SCM_TIMESTAMP: ::c_int = 0x04;
pub const O_DSYNC: ::c_int = 128;
pub const MAP_RENAME: ::c_int = 0x0000;
pub const MAP_NORESERVE: ::c_int = 0x0000;
pub const MAP_HASSEMAPHORE: ::c_int = 0x0000;
pub const EIPSEC: ::c_int = 82;
pub const ENOMEDIUM: ::c_int = 85;
pub const EMEDIUMTYPE: ::c_int = 86;
pub const EAI_BADFLAGS: ::c_int = -1;
pub const EAI_NONAME: ::c_int = -2;
pub const EAI_AGAIN: ::c_int = -3;
pub const EAI_FAIL: ::c_int = -4;
pub const EAI_NODATA: ::c_int = -5;
pub const EAI_FAMILY: ::c_int = -6;
pub const EAI_SOCKTYPE: ::c_int = -7;
pub const EAI_SERVICE: ::c_int = -8;
pub const EAI_MEMORY: ::c_int = -10;
pub const EAI_SYSTEM: ::c_int = -11;
pub const EAI_OVERFLOW: ::c_int = -14;
pub const RUSAGE_THREAD: ::c_int = 1;
pub const MAP_COPY: ::c_int = 0x0002;
pub const MAP_NOEXTEND: ::c_int = 0x0000;
pub const _PC_LINK_MAX: ::c_int = 1;
pub const _PC_MAX_CANON: ::c_int = 2;
pub const _PC_MAX_INPUT: ::c_int = 3;
pub const _PC_NAME_MAX: ::c_int = 4;
pub const _PC_PATH_MAX: ::c_int = 5;
pub const _PC_PIPE_BUF: ::c_int = 6;
pub const _PC_CHOWN_RESTRICTED: ::c_int = 7;
pub const _PC_NO_TRUNC: ::c_int = 8;
pub const _PC_VDISABLE: ::c_int = 9;
pub const _PC_2_SYMLINKS: ::c_int = 10;
pub const _PC_ALLOC_SIZE_MIN: ::c_int = 11;
pub const _PC_ASYNC_IO: ::c_int = 12;
pub const _PC_FILESIZEBITS: ::c_int = 13;
pub const _PC_PRIO_IO: ::c_int = 14;
pub const _PC_REC_INCR_XFER_SIZE: ::c_int = 15;
pub const _PC_REC_MAX_XFER_SIZE: ::c_int = 16;
pub const _PC_REC_MIN_XFER_SIZE: ::c_int = 17;
pub const _PC_REC_XFER_ALIGN: ::c_int = 18;
pub const _PC_SYMLINK_MAX: ::c_int = 19;
pub const _PC_SYNC_IO: ::c_int = 20;
pub const _PC_TIMESTAMP_RESOLUTION: ::c_int = 21;
pub const _SC_CLK_TCK: ::c_int = 3;
pub const _SC_SEM_NSEMS_MAX: ::c_int = 31;
pub const _SC_SEM_VALUE_MAX: ::c_int = 32;
pub const _SC_HOST_NAME_MAX: ::c_int = 33;
pub const _SC_MONOTONIC_CLOCK: ::c_int = 34;
pub const _SC_2_PBS: ::c_int = 35;
pub const _SC_2_PBS_ACCOUNTING: ::c_int = 36;
pub const _SC_2_PBS_CHECKPOINT: ::c_int = 37;
pub const _SC_2_PBS_LOCATE: ::c_int = 38;
pub const _SC_2_PBS_MESSAGE: ::c_int = 39;
pub const _SC_2_PBS_TRACK: ::c_int = 40;
pub const _SC_ADVISORY_INFO: ::c_int = 41;
pub const _SC_AIO_LISTIO_MAX: ::c_int = 42;
pub const _SC_AIO_MAX: ::c_int = 43;
pub const _SC_AIO_PRIO_DELTA_MAX: ::c_int = 44;
pub const _SC_ASYNCHRONOUS_IO: ::c_int = 45;
pub const _SC_ATEXIT_MAX: ::c_int = 46;
pub const _SC_BARRIERS: ::c_int = 47;
pub const _SC_CLOCK_SELECTION: ::c_int = 48;
pub const _SC_CPUTIME: ::c_int = 49;
pub const _SC_DELAYTIMER_MAX: ::c_int = 50;
pub const _SC_IOV_MAX: ::c_int = 51;
pub const _SC_IPV6: ::c_int = 52;
pub const _SC_MAPPED_FILES: ::c_int = 53;
pub const _SC_MEMLOCK: ::c_int = 54;
pub const _SC_MEMLOCK_RANGE: ::c_int = 55;
pub const _SC_MEMORY_PROTECTION: ::c_int = 56;
pub const _SC_MESSAGE_PASSING: ::c_int = 57;
pub const _SC_MQ_OPEN_MAX: ::c_int = 58;
pub const _SC_MQ_PRIO_MAX: ::c_int = 59;
pub const _SC_PRIORITIZED_IO: ::c_int = 60;
pub const _SC_PRIORITY_SCHEDULING: ::c_int = 61;
pub const _SC_RAW_SOCKETS: ::c_int = 62;
pub const _SC_READER_WRITER_LOCKS: ::c_int = 63;
pub const _SC_REALTIME_SIGNALS: ::c_int = 64;
pub const _SC_REGEXP: ::c_int = 65;
pub const _SC_RTSIG_MAX: ::c_int = 66;
pub const _SC_SEMAPHORES: ::c_int = 67;
pub const _SC_SHARED_MEMORY_OBJECTS: ::c_int = 68;
pub const _SC_SHELL: ::c_int = 69;
pub const _SC_SIGQUEUE_MAX: ::c_int = 70;
pub const _SC_SPAWN: ::c_int = 71;
pub const _SC_SPIN_LOCKS: ::c_int = 72;
pub const _SC_SPORADIC_SERVER: ::c_int = 73;
pub const _SC_SS_REPL_MAX: ::c_int = 74;
pub const _SC_SYNCHRONIZED_IO: ::c_int = 75;
pub const _SC_SYMLOOP_MAX: ::c_int = 76;
pub const _SC_THREAD_ATTR_STACKADDR: ::c_int = 77;
pub const _SC_THREAD_ATTR_STACKSIZE: ::c_int = 78;
pub const _SC_THREAD_CPUTIME: ::c_int = 79;
pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: ::c_int = 80;
pub const _SC_THREAD_KEYS_MAX: ::c_int = 81;
pub const _SC_THREAD_PRIO_INHERIT: ::c_int = 82;
pub const _SC_THREAD_PRIO_PROTECT: ::c_int = 83;
pub const _SC_THREAD_PRIORITY_SCHEDULING: ::c_int = 84;
pub const _SC_THREAD_PROCESS_SHARED: ::c_int = 85;
pub const _SC_THREAD_ROBUST_PRIO_INHERIT: ::c_int = 86;
pub const _SC_THREAD_ROBUST_PRIO_PROTECT: ::c_int = 87;
pub const _SC_THREAD_SPORADIC_SERVER: ::c_int = 88;
pub const _SC_THREAD_STACK_MIN: ::c_int = 89;
pub const _SC_THREAD_THREADS_MAX: ::c_int = 90;
pub const _SC_THREADS: ::c_int = 91;
pub const _SC_TIMEOUTS: ::c_int = 92;
pub const _SC_TIMER_MAX: ::c_int = 93;
pub const _SC_TIMERS: ::c_int = 94;
pub const _SC_TRACE: ::c_int = 95;
pub const _SC_TRACE_EVENT_FILTER: ::c_int = 96;
pub const _SC_TRACE_EVENT_NAME_MAX: ::c_int = 97;
pub const _SC_TRACE_INHERIT: ::c_int = 98;
pub const _SC_TRACE_LOG: ::c_int = 99;
pub const _SC_GETGR_R_SIZE_MAX: ::c_int = 100;
pub const _SC_GETPW_R_SIZE_MAX: ::c_int = 101;
pub const _SC_LOGIN_NAME_MAX: ::c_int = 102;
pub const _SC_THREAD_SAFE_FUNCTIONS: ::c_int = 103;
pub const _SC_TRACE_NAME_MAX: ::c_int = 104;
pub const _SC_TRACE_SYS_MAX: ::c_int = 105;
pub const _SC_TRACE_USER_EVENT_MAX: ::c_int = 106;
pub const _SC_TTY_NAME_MAX: ::c_int = 107;
pub const _SC_TYPED_MEMORY_OBJECTS: ::c_int = 108;
pub const _SC_V6_ILP32_OFF32: ::c_int = 109;
pub const _SC_V6_ILP32_OFFBIG: ::c_int = 110;
pub const _SC_V6_LP64_OFF64: ::c_int = 111;
pub const _SC_V6_LPBIG_OFFBIG: ::c_int = 112;
pub const _SC_V7_ILP32_OFF32: ::c_int = 113;
pub const _SC_V7_ILP32_OFFBIG: ::c_int = 114;
pub const _SC_V7_LP64_OFF64: ::c_int = 115;
pub const _SC_V7_LPBIG_OFFBIG: ::c_int = 116;
pub const _SC_XOPEN_CRYPT: ::c_int = 117;
pub const _SC_XOPEN_ENH_I18N: ::c_int = 118;
pub const _SC_XOPEN_LEGACY: ::c_int = 119;
pub const _SC_XOPEN_REALTIME: ::c_int = 120;
pub const _SC_XOPEN_REALTIME_THREADS: ::c_int = 121;
pub const _SC_XOPEN_STREAMS: ::c_int = 122;
pub const _SC_XOPEN_UNIX: ::c_int = 123;
pub const _SC_XOPEN_UUCP: ::c_int = 124;
pub const _SC_XOPEN_VERSION: ::c_int = 125;
pub const _SC_PHYS_PAGES: ::c_int = 500;
pub const _SC_AVPHYS_PAGES: ::c_int = 501;
pub const _SC_NPROCESSORS_CONF: ::c_int = 502;
pub const _SC_NPROCESSORS_ONLN: ::c_int = 503;
pub const FD_SETSIZE: usize = 1024;
pub const ST_NOSUID: ::c_ulong = 2;
pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = 0 as *mut _;
pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = 0 as *mut _;
pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = 0 as *mut _;
pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 1;
pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 2;
pub const PTHREAD_MUTEX_NORMAL: ::c_int = 3;
pub const PTHREAD_MUTEX_STRICT_NP: ::c_int = 4;
pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_STRICT_NP;
pub const EVFILT_AIO: i16 = -3;
pub const EVFILT_PROC: i16 = -5;
pub const EVFILT_READ: i16 = -1;
pub const EVFILT_SIGNAL: i16 = -6;
pub const EVFILT_TIMER: i16 = -7;
pub const EVFILT_VNODE: i16 = -4;
pub const EVFILT_WRITE: i16 = -2;
pub const EV_ADD: u16 = 0x1;
pub const EV_DELETE: u16 = 0x2;
pub const EV_ENABLE: u16 = 0x4;
pub const EV_DISABLE: u16 = 0x8;
pub const EV_ONESHOT: u16 = 0x10;
pub const EV_CLEAR: u16 = 0x20;
pub const EV_RECEIPT: u16 = 0x40;
pub const EV_DISPATCH: u16 = 0x80;
pub const EV_FLAG1: u16 = 0x2000;
pub const EV_ERROR: u16 = 0x4000;
pub const EV_EOF: u16 = 0x8000;
pub const EV_SYSFLAGS: u16 = 0xf000;
pub const NOTE_LOWAT: u32 = 0x00000001;
pub const NOTE_EOF: u32 = 0x00000002;
pub const NOTE_DELETE: u32 = 0x00000001;
pub const NOTE_WRITE: u32 = 0x00000002;
pub const NOTE_EXTEND: u32 = 0x00000004;
pub const NOTE_ATTRIB: u32 = 0x00000008;
pub const NOTE_LINK: u32 = 0x00000010;
pub const NOTE_RENAME: u32 = 0x00000020;
pub const NOTE_REVOKE: u32 = 0x00000040;
pub const NOTE_TRUNCATE: u32 = 0x00000080;
pub const NOTE_EXIT: u32 = 0x80000000;
pub const NOTE_FORK: u32 = 0x40000000;
pub const NOTE_EXEC: u32 = 0x20000000;
pub const NOTE_PDATAMASK: u32 = 0x000fffff;
pub const NOTE_PCTRLMASK: u32 = 0xf0000000;
pub const NOTE_TRACK: u32 = 0x00000001;
pub const NOTE_TRACKERR: u32 = 0x00000002;
pub const NOTE_CHILD: u32 = 0x00000004;
pub const TMP_MAX: ::c_uint = 0x7fffffff;
pub const AI_PASSIVE: ::c_int = 1;
pub const AI_CANONNAME: ::c_int = 2;
pub const AI_NUMERICHOST: ::c_int = 4;
pub const AI_EXT: ::c_int = 8;
pub const AI_NUMERICSERV: ::c_int = 16;
pub const AI_FQDN: ::c_int = 32;
pub const AI_ADDRCONFIG: ::c_int = 64;
pub const NI_NUMERICHOST: ::c_int = 1;
pub const NI_NUMERICSERV: ::c_int = 2;
pub const NI_NOFQDN: ::c_int = 4;
pub const NI_NAMEREQD: ::c_int = 8;
pub const NI_DGRAM: ::c_int = 16;
pub const NI_MAXHOST: ::size_t = 256;
pub const RTLD_LOCAL: ::c_int = 0;
pub const CTL_MAXNAME: ::c_int = 12;
pub const CTLTYPE_NODE: ::c_int = 1;
pub const CTLTYPE_INT: ::c_int = 2;
pub const CTLTYPE_STRING: ::c_int = 3;
pub const CTLTYPE_QUAD: ::c_int = 4;
pub const CTLTYPE_STRUCT: ::c_int = 5;
pub const CTL_UNSPEC: ::c_int = 0;
pub const CTL_KERN: ::c_int = 1;
pub const CTL_VM: ::c_int = 2;
pub const CTL_FS: ::c_int = 3;
pub const CTL_NET: ::c_int = 4;
pub const CTL_DEBUG: ::c_int = 5;
pub const CTL_HW: ::c_int = 6;
pub const CTL_MACHDEP: ::c_int = 7;
pub const CTL_DDB: ::c_int = 9;
pub const CTL_VFS: ::c_int = 10;
pub const CTL_MAXID: ::c_int = 11;
pub const HW_NCPUONLINE: ::c_int = 25;
pub const KERN_OSTYPE: ::c_int = 1;
pub const KERN_OSRELEASE: ::c_int = 2;
pub const KERN_OSREV: ::c_int = 3;
pub const KERN_VERSION: ::c_int = 4;
pub const KERN_MAXVNODES: ::c_int = 5;
pub const KERN_MAXPROC: ::c_int = 6;
pub const KERN_MAXFILES: ::c_int = 7;
pub const KERN_ARGMAX: ::c_int = 8;
pub const KERN_SECURELVL: ::c_int = 9;
pub const KERN_HOSTNAME: ::c_int = 10;
pub const KERN_HOSTID: ::c_int = 11;
pub const KERN_CLOCKRATE: ::c_int = 12;
pub const KERN_PROF: ::c_int = 16;
pub const KERN_POSIX1: ::c_int = 17;
pub const KERN_NGROUPS: ::c_int = 18;
pub const KERN_JOB_CONTROL: ::c_int = 19;
pub const KERN_SAVED_IDS: ::c_int = 20;
pub const KERN_BOOTTIME: ::c_int = 21;
pub const KERN_DOMAINNAME: ::c_int = 22;
pub const KERN_MAXPARTITIONS: ::c_int = 23;
pub const KERN_RAWPARTITION: ::c_int = 24;
pub const KERN_MAXTHREAD: ::c_int = 25;
pub const KERN_NTHREADS: ::c_int = 26;
pub const KERN_OSVERSION: ::c_int = 27;
pub const KERN_SOMAXCONN: ::c_int = 28;
pub const KERN_SOMINCONN: ::c_int = 29;
#[deprecated(since = "0.2.71", note = "Removed in OpenBSD 6.0")]
pub const KERN_USERMOUNT: ::c_int = 30;
pub const KERN_NOSUIDCOREDUMP: ::c_int = 32;
pub const KERN_FSYNC: ::c_int = 33;
pub const KERN_SYSVMSG: ::c_int = 34;
pub const KERN_SYSVSEM: ::c_int = 35;
pub const KERN_SYSVSHM: ::c_int = 36;
#[deprecated(since = "0.2.71", note = "Removed in OpenBSD 6.0")]
pub const KERN_ARND: ::c_int = 37;
pub const KERN_MSGBUFSIZE: ::c_int = 38;
pub const KERN_MALLOCSTATS: ::c_int = 39;
pub const KERN_CPTIME: ::c_int = 40;
pub const KERN_NCHSTATS: ::c_int = 41;
pub const KERN_FORKSTAT: ::c_int = 42;
pub const KERN_NSELCOLL: ::c_int = 43;
pub const KERN_TTY: ::c_int = 44;
pub const KERN_CCPU: ::c_int = 45;
pub const KERN_FSCALE: ::c_int = 46;
pub const KERN_NPROCS: ::c_int = 47;
pub const KERN_MSGBUF: ::c_int = 48;
pub const KERN_POOL: ::c_int = 49;
pub const KERN_STACKGAPRANDOM: ::c_int = 50;
pub const KERN_SYSVIPC_INFO: ::c_int = 51;
pub const KERN_SPLASSERT: ::c_int = 54;
pub const KERN_PROC_ARGS: ::c_int = 55;
pub const KERN_NFILES: ::c_int = 56;
pub const KERN_TTYCOUNT: ::c_int = 57;
pub const KERN_NUMVNODES: ::c_int = 58;
pub const KERN_MBSTAT: ::c_int = 59;
pub const KERN_SEMINFO: ::c_int = 61;
pub const KERN_SHMINFO: ::c_int = 62;
pub const KERN_INTRCNT: ::c_int = 63;
pub const KERN_WATCHDOG: ::c_int = 64;
pub const KERN_PROC: ::c_int = 66;
pub const KERN_MAXCLUSTERS: ::c_int = 67;
pub const KERN_EVCOUNT: ::c_int = 68;
pub const KERN_TIMECOUNTER: ::c_int = 69;
pub const KERN_MAXLOCKSPERUID: ::c_int = 70;
pub const KERN_CPTIME2: ::c_int = 71;
pub const KERN_CACHEPCT: ::c_int = 72;
pub const KERN_FILE: ::c_int = 73;
pub const KERN_CONSDEV: ::c_int = 75;
pub const KERN_NETLIVELOCKS: ::c_int = 76;
pub const KERN_POOL_DEBUG: ::c_int = 77;
pub const KERN_PROC_CWD: ::c_int = 78;
pub const KERN_PROC_NOBROADCASTKILL: ::c_int = 79;
pub const KERN_PROC_VMMAP: ::c_int = 80;
pub const KERN_GLOBAL_PTRACE: ::c_int = 81;
pub const KERN_CONSBUFSIZE: ::c_int = 82;
pub const KERN_CONSBUF: ::c_int = 83;
pub const KERN_AUDIO: ::c_int = 84;
pub const KERN_CPUSTATS: ::c_int = 85;
pub const KERN_PFSTATUS: ::c_int = 86;
pub const KERN_TIMEOUT_STATS: ::c_int = 87;
#[deprecated(
since = "0.2.95",
note = "Possibly increasing over the releases and might not be so used in the field"
)]
pub const KERN_MAXID: ::c_int = 88;
pub const KERN_PROC_ALL: ::c_int = 0;
pub const KERN_PROC_PID: ::c_int = 1;
pub const KERN_PROC_PGRP: ::c_int = 2;
pub const KERN_PROC_SESSION: ::c_int = 3;
pub const KERN_PROC_TTY: ::c_int = 4;
pub const KERN_PROC_UID: ::c_int = 5;
pub const KERN_PROC_RUID: ::c_int = 6;
pub const KERN_PROC_KTHREAD: ::c_int = 7;
pub const KERN_PROC_SHOW_THREADS: ::c_int = 0x40000000;
pub const KERN_SYSVIPC_MSG_INFO: ::c_int = 1;
pub const KERN_SYSVIPC_SEM_INFO: ::c_int = 2;
pub const KERN_SYSVIPC_SHM_INFO: ::c_int = 3;
pub const KERN_PROC_ARGV: ::c_int = 1;
pub const KERN_PROC_NARGV: ::c_int = 2;
pub const KERN_PROC_ENV: ::c_int = 3;
pub const KERN_PROC_NENV: ::c_int = 4;
pub const KI_NGROUPS: ::c_int = 16;
pub const KI_MAXCOMLEN: ::c_int = 24;
pub const KI_WMESGLEN: ::c_int = 8;
pub const KI_MAXLOGNAME: ::c_int = 32;
pub const KI_EMULNAMELEN: ::c_int = 8;
pub const CHWFLOW: ::tcflag_t = ::MDMBUF | ::CRTSCTS;
pub const OLCUC: ::tcflag_t = 0x20;
pub const ONOCR: ::tcflag_t = 0x40;
pub const ONLRET: ::tcflag_t = 0x80;
//https://github.com/openbsd/src/blob/master/sys/sys/mount.h
pub const ISOFSMNT_NORRIP: ::c_int = 0x1; // disable Rock Ridge Ext
pub const ISOFSMNT_GENS: ::c_int = 0x2; // enable generation numbers
pub const ISOFSMNT_EXTATT: ::c_int = 0x4; // enable extended attr
pub const ISOFSMNT_NOJOLIET: ::c_int = 0x8; // disable Joliet Ext
pub const ISOFSMNT_SESS: ::c_int = 0x10; // use iso_args.sess
pub const NFS_ARGSVERSION: ::c_int = 4; // change when nfs_args changes
pub const NFSMNT_RESVPORT: ::c_int = 0; // always use reserved ports
pub const NFSMNT_SOFT: ::c_int = 0x1; // soft mount (hard is default)
pub const NFSMNT_WSIZE: ::c_int = 0x2; // set write size
pub const NFSMNT_RSIZE: ::c_int = 0x4; // set read size
pub const NFSMNT_TIMEO: ::c_int = 0x8; // set initial timeout
pub const NFSMNT_RETRANS: ::c_int = 0x10; // set number of request retries
pub const NFSMNT_MAXGRPS: ::c_int = 0x20; // set maximum grouplist size
pub const NFSMNT_INT: ::c_int = 0x40; // allow interrupts on hard mount
pub const NFSMNT_NOCONN: ::c_int = 0x80; // Don't Connect the socket
pub const NFSMNT_NQNFS: ::c_int = 0x100; // Use Nqnfs protocol
pub const NFSMNT_NFSV3: ::c_int = 0x200; // Use NFS Version 3 protocol
pub const NFSMNT_KERB: ::c_int = 0x400; // Use Kerberos authentication
pub const NFSMNT_DUMBTIMR: ::c_int = 0x800; // Don't estimate rtt dynamically
pub const NFSMNT_LEASETERM: ::c_int = 0x1000; // set lease term (nqnfs)
pub const NFSMNT_READAHEAD: ::c_int = 0x2000; // set read ahead
pub const NFSMNT_DEADTHRESH: ::c_int = 0x4000; // set dead server retry thresh
pub const NFSMNT_NOAC: ::c_int = 0x8000; // disable attribute cache
pub const NFSMNT_RDIRPLUS: ::c_int = 0x10000; // Use Readdirplus for V3
pub const NFSMNT_READDIRSIZE: ::c_int = 0x20000; // Set readdir size
/* Flags valid only in mount syscall arguments */
pub const NFSMNT_ACREGMIN: ::c_int = 0x40000; // acregmin field valid
pub const NFSMNT_ACREGMAX: ::c_int = 0x80000; // acregmax field valid
pub const NFSMNT_ACDIRMIN: ::c_int = 0x100000; // acdirmin field valid
pub const NFSMNT_ACDIRMAX: ::c_int = 0x200000; // acdirmax field valid
/* Flags valid only in kernel */
pub const NFSMNT_INTERNAL: ::c_int = 0xfffc0000; // Bits set internally
pub const NFSMNT_HASWRITEVERF: ::c_int = 0x40000; // Has write verifier for V3
pub const NFSMNT_GOTPATHCONF: ::c_int = 0x80000; // Got the V3 pathconf info
pub const NFSMNT_GOTFSINFO: ::c_int = 0x100000; // Got the V3 fsinfo
pub const NFSMNT_MNTD: ::c_int = 0x200000; // Mnt server for mnt point
pub const NFSMNT_DISMINPROG: ::c_int = 0x400000; // Dismount in progress
pub const NFSMNT_DISMNT: ::c_int = 0x800000; // Dismounted
pub const NFSMNT_SNDLOCK: ::c_int = 0x1000000; // Send socket lock
pub const NFSMNT_WANTSND: ::c_int = 0x2000000; // Want above
pub const NFSMNT_RCVLOCK: ::c_int = 0x4000000; // Rcv socket lock
pub const NFSMNT_WANTRCV: ::c_int = 0x8000000; // Want above
pub const NFSMNT_WAITAUTH: ::c_int = 0x10000000; // Wait for authentication
pub const NFSMNT_HASAUTH: ::c_int = 0x20000000; // Has authenticator
pub const NFSMNT_WANTAUTH: ::c_int = 0x40000000; // Wants an authenticator
pub const NFSMNT_AUTHERR: ::c_int = 0x80000000; // Authentication error
pub const MSDOSFSMNT_SHORTNAME: ::c_int = 0x1; // Force old DOS short names only
pub const MSDOSFSMNT_LONGNAME: ::c_int = 0x2; // Force Win'95 long names
pub const MSDOSFSMNT_NOWIN95: ::c_int = 0x4; // Completely ignore Win95 entries
pub const NTFS_MFLAG_CASEINS: ::c_int = 0x1;
pub const NTFS_MFLAG_ALLNAMES: ::c_int = 0x2;
pub const TMPFS_ARGS_VERSION: ::c_int = 1;
pub const MAP_STACK: ::c_int = 0x4000;
pub const MAP_CONCEAL: ::c_int = 0x8000;
// https://github.com/openbsd/src/blob/master/sys/net/if.h#L187
pub const IFF_UP: ::c_int = 0x1; // interface is up
pub const IFF_BROADCAST: ::c_int = 0x2; // broadcast address valid
pub const IFF_DEBUG: ::c_int = 0x4; // turn on debugging
pub const IFF_LOOPBACK: ::c_int = 0x8; // is a loopback net
pub const IFF_POINTOPOINT: ::c_int = 0x10; // interface is point-to-point link
pub const IFF_STATICARP: ::c_int = 0x20; // only static ARP
pub const IFF_RUNNING: ::c_int = 0x40; // resources allocated
pub const IFF_NOARP: ::c_int = 0x80; // no address resolution protocol
pub const IFF_PROMISC: ::c_int = 0x100; // receive all packets
pub const IFF_ALLMULTI: ::c_int = 0x200; // receive all multicast packets
pub const IFF_OACTIVE: ::c_int = 0x400; // transmission in progress
pub const IFF_SIMPLEX: ::c_int = 0x800; // can't hear own transmissions
pub const IFF_LINK0: ::c_int = 0x1000; // per link layer defined bit
pub const IFF_LINK1: ::c_int = 0x2000; // per link layer defined bit
pub const IFF_LINK2: ::c_int = 0x4000; // per link layer defined bit
pub const IFF_MULTICAST: ::c_int = 0x8000; // supports multicast
pub const PTHREAD_STACK_MIN: ::size_t = 1_usize << _MAX_PAGE_SHIFT;
pub const MINSIGSTKSZ: ::size_t = 3_usize << _MAX_PAGE_SHIFT;
pub const SIGSTKSZ: ::size_t = MINSIGSTKSZ + (1_usize << _MAX_PAGE_SHIFT) * 4;
pub const PT_SET_EVENT_MASK: ::c_int = 12;
pub const PT_GET_EVENT_MASK: ::c_int = 13;
pub const PT_GET_PROCESS_STATE: ::c_int = 14;
pub const PT_GET_THREAD_FIRST: ::c_int = 15;
pub const PT_GET_THREAD_NEXT: ::c_int = 16;
pub const PT_FIRSTMACH: ::c_int = 32;
pub const SOCK_CLOEXEC: ::c_int = 0x8000;
pub const SOCK_NONBLOCK: ::c_int = 0x4000;
pub const SOCK_DNS: ::c_int = 0x1000;
pub const BIOCGRSIG: ::c_ulong = 0x40044273;
pub const BIOCSRSIG: ::c_ulong = 0x80044272;
pub const BIOCSDLT: ::c_ulong = 0x8004427a;
pub const PTRACE_FORK: ::c_int = 0x0002;
pub const WCONTINUED: ::c_int = 8;
const_fn! {
{const} fn _ALIGN(p: usize) -> usize {
(p + _ALIGNBYTES) & !_ALIGNBYTES
}
}
f! {
pub fn CMSG_DATA(cmsg: *const ::cmsghdr) -> *mut ::c_uchar {
(cmsg as *mut ::c_uchar)
.offset(_ALIGN(::mem::size_of::<::cmsghdr>()) as isize)
}
pub fn CMSG_LEN(length: ::c_uint) -> ::c_uint {
_ALIGN(::mem::size_of::<::cmsghdr>()) as ::c_uint + length
}
pub fn CMSG_NXTHDR(mhdr: *const ::msghdr, cmsg: *const ::cmsghdr)
-> *mut ::cmsghdr
{
if cmsg.is_null() {
return ::CMSG_FIRSTHDR(mhdr);
};
let next = cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize)
+ _ALIGN(::mem::size_of::<::cmsghdr>());
let max = (*mhdr).msg_control as usize
+ (*mhdr).msg_controllen as usize;
if next > max {
0 as *mut ::cmsghdr
} else {
(cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize))
as *mut ::cmsghdr
}
}
pub {const} fn CMSG_SPACE(length: ::c_uint) -> ::c_uint {
(_ALIGN(::mem::size_of::<::cmsghdr>()) + _ALIGN(length as usize))
as ::c_uint
}
}
safe_f! {
pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int {
status >> 8
}
pub {const} fn WIFSIGNALED(status: ::c_int) -> bool {
(status & 0o177) != 0o177 && (status & 0o177) != 0
}
pub {const} fn WIFSTOPPED(status: ::c_int) -> bool {
(status & 0xff) == 0o177
}
pub {const} fn WIFCONTINUED(status: ::c_int) -> bool {
(status & 0o177777) == 0o177777
}
}
extern "C" {
pub fn gettimeofday(tp: *mut ::timeval, tz: *mut ::timezone) -> ::c_int;
pub fn settimeofday(tp: *const ::timeval, tz: *const ::timezone) -> ::c_int;
pub fn execvpe(
file: *const ::c_char,
argv: *const *const ::c_char,
envp: *const *const ::c_char,
) -> ::c_int;
pub fn pledge(promises: *const ::c_char, execpromises: *const ::c_char) -> ::c_int;
pub fn unveil(path: *const ::c_char, permissions: *const ::c_char) -> ::c_int;
pub fn strtonum(
nptr: *const ::c_char,
minval: ::c_longlong,
maxval: ::c_longlong,
errstr: *mut *const ::c_char,
) -> ::c_longlong;
pub fn dup3(src: ::c_int, dst: ::c_int, flags: ::c_int) -> ::c_int;
pub fn chflags(path: *const ::c_char, flags: ::c_uint) -> ::c_int;
pub fn fchflags(fd: ::c_int, flags: ::c_uint) -> ::c_int;
pub fn chflagsat(
fd: ::c_int,
path: *const ::c_char,
flags: ::c_uint,
atflag: ::c_int,
) -> ::c_int;
pub fn dirfd(dirp: *mut ::DIR) -> ::c_int;
pub fn getnameinfo(
sa: *const ::sockaddr,
salen: ::socklen_t,
host: *mut ::c_char,
hostlen: ::size_t,
serv: *mut ::c_char,
servlen: ::size_t,
flags: ::c_int,
) -> ::c_int;
pub fn getresgid(rgid: *mut ::gid_t, egid: *mut ::gid_t, sgid: *mut ::gid_t) -> ::c_int;
pub fn getresuid(ruid: *mut ::uid_t, euid: *mut ::uid_t, suid: *mut ::uid_t) -> ::c_int;
pub fn kevent(
kq: ::c_int,
changelist: *const ::kevent,
nchanges: ::c_int,
eventlist: *mut ::kevent,
nevents: ::c_int,
timeout: *const ::timespec,
) -> ::c_int;
pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int) -> ::c_int;
pub fn getthrid() -> ::pid_t;
pub fn pthread_attr_getguardsize(
attr: *const ::pthread_attr_t,
guardsize: *mut ::size_t,
) -> ::c_int;
pub fn pthread_attr_getstack(
attr: *const ::pthread_attr_t,
stackaddr: *mut *mut ::c_void,
stacksize: *mut ::size_t,
) -> ::c_int;
pub fn pthread_main_np() -> ::c_int;
pub fn pthread_get_name_np(tid: ::pthread_t, name: *mut ::c_char, len: ::size_t);
pub fn pthread_set_name_np(tid: ::pthread_t, name: *const ::c_char);
pub fn pthread_stackseg_np(thread: ::pthread_t, sinfo: *mut ::stack_t) -> ::c_int;
pub fn sysctl(
name: *const ::c_int,
namelen: ::c_uint,
oldp: *mut ::c_void,
oldlenp: *mut ::size_t,
newp: *mut ::c_void,
newlen: ::size_t,
) -> ::c_int;
pub fn getentropy(buf: *mut ::c_void, buflen: ::size_t) -> ::c_int;
pub fn setresgid(rgid: ::gid_t, egid: ::gid_t, sgid: ::gid_t) -> ::c_int;
pub fn setresuid(ruid: ::uid_t, euid: ::uid_t, suid: ::uid_t) -> ::c_int;
pub fn ptrace(request: ::c_int, pid: ::pid_t, addr: caddr_t, data: ::c_int) -> ::c_int;
pub fn utrace(label: *const ::c_char, addr: *const ::c_void, len: ::size_t) -> ::c_int;
pub fn memmem(
haystack: *const ::c_void,
haystacklen: ::size_t,
needle: *const ::c_void,
needlelen: ::size_t,
) -> *mut ::c_void;
// #include <link.h>
pub fn dl_iterate_phdr(
callback: ::Option<
unsafe extern "C" fn(
info: *mut dl_phdr_info,
size: usize,
data: *mut ::c_void,
) -> ::c_int,
>,
data: *mut ::c_void,
) -> ::c_int;
pub fn uselocale(loc: ::locale_t) -> ::locale_t;
pub fn freelocale(loc: ::locale_t);
pub fn newlocale(mask: ::c_int, locale: *const ::c_char, base: ::locale_t) -> ::locale_t;
pub fn duplocale(base: ::locale_t) -> ::locale_t;
// Added in `OpenBSD` 5.5
pub fn explicit_bzero(s: *mut ::c_void, len: ::size_t);
pub fn setproctitle(fmt: *const ::c_char, ...);
pub fn freezero(ptr: *mut ::c_void, size: ::size_t);
pub fn malloc_conceal(size: ::size_t) -> *mut ::c_void;
pub fn calloc_conceal(nmemb: ::size_t, size: ::size_t) -> *mut ::c_void;
}
#[link(name = "execinfo")]
extern "C" {
pub fn backtrace(addrlist: *mut *mut ::c_void, len: ::size_t) -> ::size_t;
pub fn backtrace_symbols(addrlist: *const *mut ::c_void, len: ::size_t) -> *mut *mut ::c_char;
pub fn backtrace_symbols_fd(
addrlist: *const *mut ::c_void,
len: ::size_t,
fd: ::c_int,
) -> ::c_int;
pub fn backtrace_symbols_fmt(
addrlist: *const *mut ::c_void,
len: ::size_t,
fmt: *const ::c_char,
) -> *mut *mut ::c_char;
}
cfg_if! {
if #[cfg(libc_union)] {
extern {
// these functions use statfs which uses the union mount_info:
pub fn statfs(path: *const ::c_char, buf: *mut statfs) -> ::c_int;
pub fn fstatfs(fd: ::c_int, buf: *mut statfs) -> ::c_int;
}
}
}
cfg_if! {
if #[cfg(target_arch = "aarch64")] {
mod aarch64;
pub use self::aarch64::*;
} else if #[cfg(target_arch = "arm")] {
mod arm;
pub use self::arm::*;
} else if #[cfg(target_arch = "mips64")] {
mod mips64;
pub use self::mips64::*;
} else if #[cfg(target_arch = "powerpc")] {
mod powerpc;
pub use self::powerpc::*;
} else if #[cfg(target_arch = "powerpc64")] {
mod powerpc64;
pub use self::powerpc64::*;
} else if #[cfg(target_arch = "riscv64")] {
mod riscv64;
pub use self::riscv64::*;
} else if #[cfg(target_arch = "sparc64")] {
mod sparc64;
pub use self::sparc64::*;
} else if #[cfg(target_arch = "x86")] {
mod x86;
pub use self::x86::*;
} else if #[cfg(target_arch = "x86_64")] {
mod x86_64;
pub use self::x86_64::*;
} else {
// Unknown target_arch
}
}
| 34.461356 | 98 | 0.590953 |
f70b3a9b0b14cc28c4ebfd223a2d6feeb9bf47a8 | 8,260 | // Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
use aptos_types::{
epoch_change::Verifier, epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures,
};
use executor_types::ExecutedTrees;
use crate::error::Error;
/// SyncState contains the following fields:
/// * `committed_ledger_info` holds the latest certified ledger info (committed to storage),
/// i.e., the ledger info for the highest version for which storage has all ledger state.
/// * `synced_trees` holds the latest transaction accumulator and state tree (which may
/// or may not be committed to storage), i.e., some ledger state for the next highest
/// ledger info version is missing.
/// * `trusted_epoch_state` corresponds to the current epoch if the highest committed
/// ledger info (`committed_ledger_info`) is in the middle of the epoch, otherwise, it
/// corresponds to the next epoch if the highest committed ledger info ends the epoch.
///
/// Note: `committed_ledger_info` is used for helping other Aptos nodes synchronize (i.e.,
/// it corresponds to the highest version we have a proof for in storage). `synced_trees`
/// is used locally for retrieving missing chunks for the local storage.
#[derive(Clone, Debug)]
pub struct SyncState {
committed_ledger_info: LedgerInfoWithSignatures,
synced_trees: ExecutedTrees,
trusted_epoch_state: EpochState,
}
impl SyncState {
pub fn new(
committed_ledger_info: LedgerInfoWithSignatures,
synced_trees: ExecutedTrees,
current_epoch_state: EpochState,
) -> Self {
let trusted_epoch_state = committed_ledger_info
.ledger_info()
.next_epoch_state()
.cloned()
.unwrap_or(current_epoch_state);
SyncState {
committed_ledger_info,
synced_trees,
trusted_epoch_state,
}
}
pub fn committed_epoch(&self) -> u64 {
self.committed_ledger_info.ledger_info().epoch()
}
pub fn committed_ledger_info(&self) -> LedgerInfoWithSignatures {
self.committed_ledger_info.clone()
}
pub fn committed_version(&self) -> u64 {
self.committed_ledger_info.ledger_info().version()
}
/// Returns the highest available version in the local storage, even if it's not
/// committed (i.e., covered by a ledger info).
pub fn synced_version(&self) -> u64 {
self.synced_trees.version().unwrap_or(0)
}
pub fn trusted_epoch(&self) -> u64 {
self.trusted_epoch_state.epoch
}
pub fn verify_ledger_info(&self, ledger_info: &LedgerInfoWithSignatures) -> Result<(), Error> {
self.trusted_epoch_state
.verify(ledger_info)
.map_err(|error| Error::UnexpectedError(error.to_string()))
}
}
#[cfg(any(feature = "fuzzing", test))]
pub(crate) mod test_utils {
use std::{collections::HashMap, sync::Arc};
use futures::channel::mpsc;
use aptos_config::{
config::{NodeConfig, RoleType},
network_id::NetworkId,
};
use aptos_infallible::RwLock;
use aptos_types::{
move_resource::MoveStorage,
on_chain_config::ON_CHAIN_CONFIG_REGISTRY,
transaction::{Transaction, WriteSetPayload},
waypoint::Waypoint,
};
use aptos_vm::AptosVM;
use aptosdb::AptosDB;
use channel::{aptos_channel, message_queues::QueueStyle};
use event_notifications::{EventNotificationSender, EventSubscriptionService};
use executor::chunk_executor::ChunkExecutor;
use executor_test_helpers::bootstrap_genesis;
use mempool_notifications::MempoolNotifier;
use network::{
peer_manager::{ConnectionRequestSender, PeerManagerRequestSender},
protocols::network::NewNetworkSender,
};
use storage_interface::{DbReader, DbReaderWriter};
use crate::{
coordinator::StateSyncCoordinator,
executor_proxy::{ExecutorProxy, ExecutorProxyTrait},
network::StateSyncSender,
};
#[cfg(test)]
pub(crate) fn create_coordinator_with_config_and_waypoint(
node_config: NodeConfig,
waypoint: Waypoint,
) -> StateSyncCoordinator<ExecutorProxy<ChunkExecutor<AptosVM>>, MempoolNotifier> {
create_state_sync_coordinator_for_tests(node_config, waypoint, false)
}
pub(crate) fn create_validator_coordinator(
) -> StateSyncCoordinator<ExecutorProxy<ChunkExecutor<AptosVM>>, MempoolNotifier> {
let mut node_config = NodeConfig::default();
node_config.base.role = RoleType::Validator;
create_state_sync_coordinator_for_tests(node_config, Waypoint::default(), false)
}
#[cfg(test)]
pub(crate) fn create_full_node_coordinator(
) -> StateSyncCoordinator<ExecutorProxy<ChunkExecutor<AptosVM>>, MempoolNotifier> {
let mut node_config = NodeConfig::default();
node_config.base.role = RoleType::FullNode;
create_state_sync_coordinator_for_tests(node_config, Waypoint::default(), false)
}
#[cfg(test)]
pub(crate) fn create_read_only_coordinator(
) -> StateSyncCoordinator<ExecutorProxy<ChunkExecutor<AptosVM>>, MempoolNotifier> {
let mut node_config = NodeConfig::default();
node_config.base.role = RoleType::Validator;
create_state_sync_coordinator_for_tests(node_config, Waypoint::default(), true)
}
fn create_state_sync_coordinator_for_tests(
node_config: NodeConfig,
waypoint: Waypoint,
read_only_mode: bool,
) -> StateSyncCoordinator<ExecutorProxy<ChunkExecutor<AptosVM>>, MempoolNotifier> {
// Generate a genesis change set
let (genesis, _) = vm_genesis::test_genesis_change_set_and_validators(Some(1));
// Create test aptos database
let db_path = aptos_temppath::TempPath::new();
db_path.create_as_dir().unwrap();
let (db, db_rw) = DbReaderWriter::wrap(AptosDB::new_for_test(db_path.path()));
// Bootstrap the genesis transaction
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis));
bootstrap_genesis::<AptosVM>(&db_rw, &genesis_txn).unwrap();
// Create the event subscription service and notify initial configs
let storage: Arc<dyn DbReader> = db.clone();
let synced_version = (&*storage).fetch_latest_state_checkpoint_version().unwrap();
let mut event_subscription_service = EventSubscriptionService::new(
ON_CHAIN_CONFIG_REGISTRY,
Arc::new(RwLock::new(db_rw.clone())),
);
event_subscription_service
.notify_initial_configs(synced_version)
.unwrap();
// Create executor proxy
let chunk_executor = Arc::new(ChunkExecutor::<AptosVM>::new(db_rw).unwrap());
let executor_proxy = ExecutorProxy::new(db, chunk_executor, event_subscription_service);
// Get initial state
let initial_state = executor_proxy.get_local_storage_state().unwrap();
// Setup network senders
let (network_reqs_tx, _network_reqs_rx) = aptos_channel::new(QueueStyle::FIFO, 8, None);
let (connection_reqs_tx, _) = aptos_channel::new(QueueStyle::FIFO, 8, None);
let network_sender = StateSyncSender::new(
PeerManagerRequestSender::new(network_reqs_tx),
ConnectionRequestSender::new(connection_reqs_tx),
);
let network_id = NetworkId::Validator;
let network_senders = vec![(network_id, network_sender)]
.into_iter()
.collect::<HashMap<_, _>>();
// Create channel senders and receivers
let (_coordinator_sender, coordinator_receiver) = mpsc::unbounded();
let (mempool_notifier, _) = mempool_notifications::new_mempool_notifier_listener_pair();
let (_, consensus_listener) =
consensus_notifications::new_consensus_notifier_listener_pair(1000);
// Return the new state sync coordinator
StateSyncCoordinator::new(
coordinator_receiver,
mempool_notifier,
consensus_listener,
network_senders,
&node_config,
waypoint,
executor_proxy,
initial_state,
read_only_mode,
)
.unwrap()
}
}
| 38.064516 | 99 | 0.680872 |
fb47a3626dc5a3959c864998ec66276acb3b7b89 | 1,533 | use std::rc::Rc;
use html5ever::{local_name, namespace_url, ns, LocalName, Prefix};
use crate::attr::AttrValue;
use crate::document::Document;
use crate::element::Element;
use crate::inheritance::{Castable, DerivedFrom};
use crate::node::Node;
use crate::nodetype::{ElementTypeId, NodeTypeId};
use crate::virtualmethods::VirtualMethods;
#[derive(Clone, Debug)]
#[repr(C)]
pub struct HTMLElement {
element: Element,
}
impl Castable for HTMLElement {}
impl DerivedFrom<Node> for HTMLElement {}
impl DerivedFrom<Element> for HTMLElement {}
impl HTMLElement {
pub fn new(local_name: LocalName, prefix: Option<Prefix>, document: Rc<Document>) -> Self {
HTMLElement::new_inherited(
NodeTypeId::Element(ElementTypeId::Element),
local_name,
prefix,
document,
)
}
pub fn new_inherited(
node_type_id: NodeTypeId,
local_name: LocalName,
prefix: Option<Prefix>,
document: Rc<Document>,
) -> Self {
HTMLElement {
element: Element::new_inherited(node_type_id, local_name, ns!(html), prefix, document),
}
}
}
impl VirtualMethods for HTMLElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<Element>() as &dyn VirtualMethods)
}
fn parse_plain_attribute(&self, name: &LocalName, value: String) -> AttrValue {
match name {
&local_name!("itemprop") => AttrValue::from_serialized_tokenlist(value.into()),
&local_name!("itemtype") => AttrValue::from_serialized_tokenlist(value.into()),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
| 26.431034 | 92 | 0.722114 |
e650a9c14ca2134dc5f52374fd5cc78c6c75f52a | 11,216 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryRequest {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub subscriptions: Vec<String>,
#[serde(rename = "managementGroupId", default, skip_serializing_if = "Option::is_none")]
pub management_group_id: Option<String>,
pub query: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub options: Option<QueryRequestOptions>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub facets: Vec<FacetRequest>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryRequestOptions {
#[serde(rename = "$skipToken", default, skip_serializing_if = "Option::is_none")]
pub skip_token: Option<String>,
#[serde(rename = "$top", default, skip_serializing_if = "Option::is_none")]
pub top: Option<i32>,
#[serde(rename = "$skip", default, skip_serializing_if = "Option::is_none")]
pub skip: Option<i32>,
#[serde(rename = "resultFormat", default, skip_serializing_if = "Option::is_none")]
pub result_format: Option<query_request_options::ResultFormat>,
}
pub mod query_request_options {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ResultFormat {
#[serde(rename = "table")]
Table,
#[serde(rename = "objectArray")]
ObjectArray,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FacetRequest {
pub expression: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub options: Option<FacetRequestOptions>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FacetRequestOptions {
#[serde(rename = "sortBy", default, skip_serializing_if = "Option::is_none")]
pub sort_by: Option<String>,
#[serde(rename = "sortOrder", default, skip_serializing_if = "Option::is_none")]
pub sort_order: Option<facet_request_options::SortOrder>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub filter: Option<String>,
#[serde(rename = "$top", default, skip_serializing_if = "Option::is_none")]
pub top: Option<i32>,
}
pub mod facet_request_options {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SortOrder {
#[serde(rename = "asc")]
Asc,
#[serde(rename = "desc")]
Desc,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryResponse {
#[serde(rename = "totalRecords")]
pub total_records: i64,
pub count: i64,
#[serde(rename = "resultTruncated")]
pub result_truncated: query_response::ResultTruncated,
#[serde(rename = "$skipToken", default, skip_serializing_if = "Option::is_none")]
pub skip_token: Option<String>,
pub data: serde_json::Value,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub facets: Vec<Facet>,
}
pub mod query_response {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ResultTruncated {
#[serde(rename = "true")]
True,
#[serde(rename = "false")]
False,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Table {
pub columns: Vec<Column>,
pub rows: Vec<Row>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Column {
pub name: String,
#[serde(rename = "type")]
pub type_: ColumnDataType,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ColumnDataType {
#[serde(rename = "string")]
String,
#[serde(rename = "integer")]
Integer,
#[serde(rename = "number")]
Number,
#[serde(rename = "boolean")]
Boolean,
#[serde(rename = "object")]
Object,
}
pub type Row = Vec<serde_json::Value>;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Facet {
pub expression: String,
#[serde(rename = "resultType")]
pub result_type: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FacetResult {
#[serde(flatten)]
pub facet: Facet,
#[serde(rename = "totalRecords")]
pub total_records: i64,
pub count: i32,
pub data: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FacetError {
#[serde(flatten)]
pub facet: Facet,
pub errors: Vec<ErrorDetails>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponse {
pub error: Error,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Error {
pub code: String,
pub message: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorDetails>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDetails {
pub code: String,
pub message: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Display {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceChangesRequestParameters {
#[serde(rename = "resourceId")]
pub resource_id: String,
pub interval: serde_json::Value,
#[serde(rename = "$skipToken", default, skip_serializing_if = "Option::is_none")]
pub skip_token: Option<String>,
#[serde(rename = "$top", default, skip_serializing_if = "Option::is_none")]
pub top: Option<i32>,
#[serde(rename = "fetchPropertyChanges", default, skip_serializing_if = "Option::is_none")]
pub fetch_property_changes: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceChangeList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub changes: Vec<ResourceChangeData>,
#[serde(rename = "$skipToken", default, skip_serializing_if = "Option::is_none")]
pub skip_token: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceChangeData {
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
#[serde(rename = "changeId")]
pub change_id: String,
#[serde(rename = "beforeSnapshot")]
pub before_snapshot: serde_json::Value,
#[serde(rename = "afterSnapshot")]
pub after_snapshot: serde_json::Value,
#[serde(rename = "changeType", default, skip_serializing_if = "Option::is_none")]
pub change_type: Option<resource_change_data::ChangeType>,
#[serde(rename = "propertyChanges", default, skip_serializing_if = "Vec::is_empty")]
pub property_changes: Vec<ResourcePropertyChange>,
}
pub mod resource_change_data {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ChangeType {
Create,
Update,
Delete,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourcePropertyChange {
#[serde(rename = "propertyName")]
pub property_name: String,
#[serde(rename = "beforeValue", default, skip_serializing_if = "Option::is_none")]
pub before_value: Option<String>,
#[serde(rename = "afterValue", default, skip_serializing_if = "Option::is_none")]
pub after_value: Option<String>,
#[serde(rename = "changeCategory")]
pub change_category: resource_property_change::ChangeCategory,
#[serde(rename = "propertyChangeType")]
pub property_change_type: resource_property_change::PropertyChangeType,
}
pub mod resource_property_change {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ChangeCategory {
User,
System,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PropertyChangeType {
Insert,
Update,
Remove,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSnapshotData {
#[serde(rename = "snapshotId", default, skip_serializing_if = "Option::is_none")]
pub snapshot_id: Option<String>,
pub timestamp: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub content: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceChangeDetailsRequestParameters {
#[serde(rename = "resourceId")]
pub resource_id: String,
#[serde(rename = "changeId")]
pub change_id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DateTimeInterval {
pub start: String,
pub end: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourcesHistoryRequest {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub subscriptions: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub options: Option<ResourcesHistoryRequestOptions>,
#[serde(rename = "managementGroupId", default, skip_serializing_if = "Option::is_none")]
pub management_group_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourcesHistoryRequestOptions {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub interval: Option<DateTimeInterval>,
#[serde(rename = "$top", default, skip_serializing_if = "Option::is_none")]
pub top: Option<i32>,
#[serde(rename = "$skip", default, skip_serializing_if = "Option::is_none")]
pub skip: Option<i32>,
#[serde(rename = "$skipToken", default, skip_serializing_if = "Option::is_none")]
pub skip_token: Option<String>,
#[serde(rename = "resultFormat", default, skip_serializing_if = "Option::is_none")]
pub result_format: Option<resources_history_request_options::ResultFormat>,
}
pub mod resources_history_request_options {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ResultFormat {
#[serde(rename = "table")]
Table,
#[serde(rename = "objectArray")]
ObjectArray,
}
}
| 37.139073 | 95 | 0.685717 |
fe0a6cf288024724dff293408e62d697d4117b4d | 555 | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
const ARR: [usize; 5] = [5, 4, 3, 2, 1];
fn main() {
assert_eq!(3, ARR[ARR[3]]);
}
| 34.6875 | 68 | 0.702703 |
21244876d339f2bcdb171e39f4686668ced716ed | 5,574 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use crate::EventController;
use crate::IMContext;
use crate::PropagationLimit;
use crate::PropagationPhase;
use crate::Widget;
use glib::object::Cast;
use glib::object::IsA;
use glib::object::ObjectType as ObjectType_;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use glib::StaticType;
use glib::ToValue;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::wrapper! {
pub struct EventControllerKey(Object<ffi::GtkEventControllerKey, ffi::GtkEventControllerKeyClass>) @extends EventController;
match fn {
get_type => || ffi::gtk_event_controller_key_get_type(),
}
}
impl EventControllerKey {
#[doc(alias = "gtk_event_controller_key_new")]
pub fn new() -> EventControllerKey {
assert_initialized_main_thread!();
unsafe {
EventController::from_glib_full(ffi::gtk_event_controller_key_new()).unsafe_cast()
}
}
#[doc(alias = "gtk_event_controller_key_forward")]
pub fn forward<P: IsA<Widget>>(&self, widget: &P) -> bool {
unsafe {
from_glib(ffi::gtk_event_controller_key_forward(
self.to_glib_none().0,
widget.as_ref().to_glib_none().0,
))
}
}
#[doc(alias = "gtk_event_controller_key_get_group")]
pub fn get_group(&self) -> u32 {
unsafe { ffi::gtk_event_controller_key_get_group(self.to_glib_none().0) }
}
#[doc(alias = "gtk_event_controller_key_get_im_context")]
pub fn get_im_context(&self) -> Option<IMContext> {
unsafe {
from_glib_none(ffi::gtk_event_controller_key_get_im_context(
self.to_glib_none().0,
))
}
}
#[doc(alias = "gtk_event_controller_key_set_im_context")]
pub fn set_im_context<P: IsA<IMContext>>(&self, im_context: &P) {
unsafe {
ffi::gtk_event_controller_key_set_im_context(
self.to_glib_none().0,
im_context.as_ref().to_glib_none().0,
);
}
}
pub fn connect_im_update<F: Fn(&EventControllerKey) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn im_update_trampoline<F: Fn(&EventControllerKey) + 'static>(
this: *mut ffi::GtkEventControllerKey,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"im-update\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
im_update_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
pub fn connect_modifiers<
F: Fn(&EventControllerKey, gdk::ModifierType) -> glib::signal::Inhibit + 'static,
>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn modifiers_trampoline<
F: Fn(&EventControllerKey, gdk::ModifierType) -> glib::signal::Inhibit + 'static,
>(
this: *mut ffi::GtkEventControllerKey,
keyval: gdk::ffi::GdkModifierType,
f: glib::ffi::gpointer,
) -> glib::ffi::gboolean {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this), from_glib(keyval)).to_glib()
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"modifiers\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
modifiers_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
impl Default for EventControllerKey {
fn default() -> Self {
Self::new()
}
}
#[derive(Clone, Default)]
pub struct EventControllerKeyBuilder {
name: Option<String>,
propagation_limit: Option<PropagationLimit>,
propagation_phase: Option<PropagationPhase>,
}
impl EventControllerKeyBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn build(self) -> EventControllerKey {
let mut properties: Vec<(&str, &dyn ToValue)> = vec![];
if let Some(ref name) = self.name {
properties.push(("name", name));
}
if let Some(ref propagation_limit) = self.propagation_limit {
properties.push(("propagation-limit", propagation_limit));
}
if let Some(ref propagation_phase) = self.propagation_phase {
properties.push(("propagation-phase", propagation_phase));
}
let ret = glib::Object::new::<EventControllerKey>(&properties).expect("object new");
ret
}
pub fn name(mut self, name: &str) -> Self {
self.name = Some(name.to_string());
self
}
pub fn propagation_limit(mut self, propagation_limit: PropagationLimit) -> Self {
self.propagation_limit = Some(propagation_limit);
self
}
pub fn propagation_phase(mut self, propagation_phase: PropagationPhase) -> Self {
self.propagation_phase = Some(propagation_phase);
self
}
}
impl fmt::Display for EventControllerKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("EventControllerKey")
}
}
| 31.314607 | 128 | 0.585038 |
1ea11924565bf0d90e15c89203265aa4af3ed9d6 | 14,699 | use crate::common::CommonState;
use crate::game::{State, Transition, WizardState};
use crate::helpers::ID;
use crate::render::DrawOptions;
use crate::ui::{ShowEverything, UI};
use abstutil::Timer;
use ezgui::{hotkey, EventCtx, GfxCtx, Key, ModalMenu, Wizard};
use geom::{Duration, PolyLine};
use map_model::{
BuildingID, IntersectionID, IntersectionType, LaneType, PathRequest, Position, LANE_THICKNESS,
};
use rand::seq::SliceRandom;
use rand::Rng;
use sim::{DrivingGoal, Scenario, SidewalkSpot, TripSpec};
const SMALL_DT: Duration = Duration::const_seconds(0.1);
pub struct AgentSpawner {
menu: ModalMenu,
from: Source,
maybe_goal: Option<(Goal, Option<PolyLine>)>,
}
#[derive(Clone)]
enum Source {
Walking(BuildingID),
Driving(Position),
}
#[derive(PartialEq)]
enum Goal {
Building(BuildingID),
Border(IntersectionID),
}
impl AgentSpawner {
pub fn new(
ctx: &mut EventCtx,
ui: &mut UI,
sandbox_menu: &mut ModalMenu,
) -> Option<Box<dyn State>> {
let menu = ModalMenu::new(
"Agent Spawner",
vec![vec![(hotkey(Key::Escape), "quit")]],
ctx,
);
let map = &ui.primary.map;
match ui.primary.current_selection {
Some(ID::Building(id)) => {
if ctx
.input
.contextual_action(Key::F3, "spawn a pedestrian starting here")
{
return Some(Box::new(AgentSpawner {
menu,
from: Source::Walking(id),
maybe_goal: None,
}));
}
if let Some(pos) = Position::bldg_via_driving(id, map) {
if ctx
.input
.contextual_action(Key::F4, "spawn a car starting here")
{
return Some(Box::new(AgentSpawner {
menu,
from: Source::Driving(pos),
maybe_goal: None,
}));
}
}
}
Some(ID::Lane(id)) => {
if map.get_l(id).is_driving()
&& ctx
.input
.contextual_action(Key::F3, "spawn an agent starting here")
{
return Some(Box::new(AgentSpawner {
menu,
from: Source::Driving(Position::new(id, map.get_l(id).length() / 2.0)),
maybe_goal: None,
}));
}
}
Some(ID::Intersection(i)) => {
if ctx
.input
.contextual_action(Key::Z, "spawn agents around this intersection")
{
spawn_agents_around(i, ui, ctx);
}
}
None => {
if ui.primary.sim.is_empty() && sandbox_menu.action("start a scenario") {
return Some(WizardState::new(Box::new(instantiate_scenario)));
}
}
_ => {}
}
None
}
}
impl State for AgentSpawner {
fn event(&mut self, ctx: &mut EventCtx, ui: &mut UI) -> Transition {
// TODO Instructions to select target building/lane
self.menu.handle_event(ctx, None);
if self.menu.action("quit") {
return Transition::Pop;
}
ctx.canvas.handle_event(ctx.input);
if ctx.redo_mouseover() {
ui.recalculate_current_selection(ctx);
}
let map = &ui.primary.map;
let new_goal = match ui.primary.current_selection {
Some(ID::Building(b)) => Goal::Building(b),
Some(ID::Intersection(i))
if map.get_i(i).intersection_type == IntersectionType::Border =>
{
Goal::Border(i)
}
_ => {
self.maybe_goal = None;
return Transition::Keep;
}
};
let recalculate = match self.maybe_goal {
Some((ref g, _)) => *g == new_goal,
None => true,
};
if recalculate {
let start = match self.from {
Source::Walking(b) => Position::bldg_via_walking(b, map),
Source::Driving(pos) => pos,
};
let end = match new_goal {
Goal::Building(to) => match self.from {
Source::Walking(_) => Position::bldg_via_walking(to, map),
Source::Driving(_) => {
let end = map.find_driving_lane_near_building(to);
Position::new(end, map.get_l(end).length())
}
},
Goal::Border(to) => {
let lanes = map.get_i(to).get_incoming_lanes(
map,
match self.from {
Source::Walking(_) => LaneType::Sidewalk,
Source::Driving(_) => LaneType::Driving,
},
);
if lanes.is_empty() {
self.maybe_goal = None;
return Transition::Keep;
}
Position::new(lanes[0], map.get_l(lanes[0]).length())
}
};
if start == end {
self.maybe_goal = None;
} else {
if let Some(path) = map.pathfind(PathRequest {
start,
end,
can_use_bike_lanes: false,
can_use_bus_lanes: false,
}) {
self.maybe_goal = Some((new_goal, path.trace(map, start.dist_along(), None)));
} else {
self.maybe_goal = None;
}
}
}
if self.maybe_goal.is_some() && ctx.input.contextual_action(Key::F3, "end the agent here") {
let mut rng = ui.primary.current_flags.sim_flags.make_rng();
let sim = &mut ui.primary.sim;
match (self.from.clone(), self.maybe_goal.take().unwrap().0) {
(Source::Walking(from), Goal::Building(to)) => {
let start = SidewalkSpot::building(from, map);
let goal = SidewalkSpot::building(to, map);
let ped_speed = Scenario::rand_ped_speed(&mut rng);
if let Some((stop1, stop2, route)) =
map.should_use_transit(start.sidewalk_pos, goal.sidewalk_pos)
{
sim.schedule_trip(
sim.time(),
TripSpec::UsingTransit {
start,
goal,
route,
stop1,
stop2,
ped_speed,
},
map,
);
} else {
sim.schedule_trip(
sim.time(),
TripSpec::JustWalking {
start,
goal,
ped_speed,
},
map,
);
}
}
(Source::Walking(from), Goal::Border(to)) => {
if let Some(goal) = SidewalkSpot::end_at_border(to, map) {
sim.schedule_trip(
sim.time(),
TripSpec::JustWalking {
start: SidewalkSpot::building(from, map),
goal,
ped_speed: Scenario::rand_ped_speed(&mut rng),
},
map,
);
} else {
println!("Can't end a walking trip at {}; no sidewalks", to);
}
}
(Source::Driving(from), Goal::Building(to)) => {
if let Some(start_pos) = TripSpec::spawn_car_at(from, map) {
sim.schedule_trip(
sim.time(),
TripSpec::CarAppearing {
start_pos,
vehicle_spec: Scenario::rand_car(&mut rng),
goal: DrivingGoal::ParkNear(to),
ped_speed: Scenario::rand_ped_speed(&mut rng),
},
map,
);
} else {
println!("Can't make a car appear at {:?}", from);
}
}
(Source::Driving(from), Goal::Border(to)) => {
if let Some(goal) = DrivingGoal::end_at_border(to, vec![LaneType::Driving], map)
{
sim.schedule_trip(
sim.time(),
TripSpec::CarAppearing {
start_pos: from,
vehicle_spec: Scenario::rand_car(&mut rng),
goal,
ped_speed: Scenario::rand_ped_speed(&mut rng),
},
map,
);
} else {
println!("Can't end a car trip at {}; no driving lanes", to);
}
}
};
sim.spawn_all_trips(map, &mut Timer::new("spawn trip"), false);
sim.step(map, SMALL_DT);
ui.recalculate_current_selection(ctx);
return Transition::Pop;
}
Transition::Keep
}
fn draw_default_ui(&self) -> bool {
false
}
fn draw(&self, g: &mut GfxCtx, ui: &UI) {
let src = match self.from {
Source::Walking(b1) => ID::Building(b1),
Source::Driving(pos1) => ID::Lane(pos1.lane()),
};
let mut opts = DrawOptions::new();
opts.override_colors.insert(src, ui.cs.get("selected"));
ui.draw(g, opts, &ui.primary.sim, &ShowEverything::new());
if let Some((_, Some(ref trace))) = self.maybe_goal {
g.draw_polygon(ui.cs.get("route"), &trace.make_polygons(LANE_THICKNESS));
}
self.menu.draw(g);
CommonState::draw_osd(g, ui, &ui.primary.current_selection);
}
}
fn spawn_agents_around(i: IntersectionID, ui: &mut UI, ctx: &EventCtx) {
let map = &ui.primary.map;
let sim = &mut ui.primary.sim;
let mut rng = ui.primary.current_flags.sim_flags.make_rng();
for l in &map.get_i(i).incoming_lanes {
let lane = map.get_l(*l);
if lane.is_driving() {
for _ in 0..10 {
let vehicle_spec = if rng.gen_bool(0.7) {
Scenario::rand_car(&mut rng)
} else {
Scenario::rand_bike(&mut rng)
};
if vehicle_spec.length > lane.length() {
continue;
}
sim.schedule_trip(
sim.time(),
TripSpec::CarAppearing {
start_pos: Position::new(
lane.id,
Scenario::rand_dist(&mut rng, vehicle_spec.length, lane.length()),
),
vehicle_spec,
goal: DrivingGoal::ParkNear(
map.all_buildings().choose(&mut rng).unwrap().id,
),
ped_speed: Scenario::rand_ped_speed(&mut rng),
},
map,
);
}
} else if lane.is_sidewalk() {
for _ in 0..5 {
sim.schedule_trip(
sim.time(),
TripSpec::JustWalking {
start: SidewalkSpot::suddenly_appear(
lane.id,
Scenario::rand_dist(&mut rng, 0.1 * lane.length(), 0.9 * lane.length()),
map,
),
goal: SidewalkSpot::building(
map.all_buildings().choose(&mut rng).unwrap().id,
map,
),
ped_speed: Scenario::rand_ped_speed(&mut rng),
},
map,
);
}
}
}
sim.spawn_all_trips(map, &mut Timer::throwaway(), false);
sim.step(map, SMALL_DT);
ui.recalculate_current_selection(ctx);
}
fn instantiate_scenario(wiz: &mut Wizard, ctx: &mut EventCtx, ui: &mut UI) -> Option<Transition> {
let num_agents = ui.primary.current_flags.num_agents;
let builtin = if let Some(n) = num_agents {
format!("random scenario with {} agents", n)
} else {
"random scenario with some agents".to_string()
};
let map = &ui.primary.map;
let scenario_name = wiz
.wrap(ctx)
.choose_string("Instantiate which scenario?", || {
let mut list = vec![builtin.clone(), "just buses".to_string()];
list.extend(abstutil::list_all_objects(
abstutil::SCENARIOS,
map.get_name(),
));
list
})?;
let scenario = if scenario_name == builtin {
if let Some(n) = num_agents {
Scenario::scaled_run(map, n)
} else {
Scenario::small_run(map)
}
} else if scenario_name == "just buses" {
Scenario::empty(map)
} else {
abstutil::read_binary(
&abstutil::path1_bin(map.get_name(), abstutil::SCENARIOS, &scenario_name),
&mut Timer::throwaway(),
)
.unwrap()
};
ctx.loading_screen("instantiate scenario", |_, timer| {
scenario.instantiate(
&mut ui.primary.sim,
&ui.primary.map,
&mut ui.primary.current_flags.sim_flags.make_rng(),
timer,
);
ui.primary.sim.step(&ui.primary.map, SMALL_DT);
});
Some(Transition::Pop)
}
| 36.383663 | 100 | 0.429621 |
48907ff4016cd5c6f1c22bb35797621f991a0e50 | 754 | use Entity;
/// `Node` is used to store and manipulate the postiion, rotation and scale
/// of the object. Every `Node` can have a parent, which allows you to apply
/// position, rotation and scale hierarchically.
///
/// `Entity` are used to record the tree relationships. Every access requires going
/// through the arena, which can be cumbersome and comes with some runtime overhead.
///
/// But it not only keeps code clean and simple, but also makes `Node` could be
/// send or shared across threads safely. This enables e.g. parallel tree traversals.
#[derive(Default, Debug, Clone, Copy)]
pub struct Node {
pub parent: Option<Entity>,
pub next_sib: Option<Entity>,
pub prev_sib: Option<Entity>,
pub first_child: Option<Entity>,
}
| 39.684211 | 85 | 0.725464 |
9c1d7abb7c9f66171e48b3960ba024b1c55243ad | 3,675 | mod utils;
extern crate js_sys;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
pub enum UniverseType {
Random,
Empty,
}
impl Default for UniverseType {
fn default() -> Self {
UniverseType::Random
}
}
#[wasm_bindgen]
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Cell {
Dead = 0,
Alive = 1,
}
impl Cell {
fn toggle(&mut self) {
*self = match *self {
Cell::Dead => Cell::Alive,
Cell::Alive => Cell::Dead,
};
}
}
#[wasm_bindgen]
pub struct Universe {
width: u32,
height: u32,
cells: Vec<Cell>,
}
impl Universe {
fn get_index(&self, row: u32, column: u32) -> usize {
(row * self.width + column) as usize
}
fn live_neighbour_count(&self, row: u32, column: u32) -> u8 {
let mut count = 0;
for delta_row in [self.height - 1, 0, 1].iter().cloned() {
for delta_col in [self.width - 1, 0, 1].iter().cloned() {
if delta_row == 0 && delta_col == 0 {
continue;
}
let neighbour_row = (row + delta_row) % self.height;
let neighbour_col = (column + delta_col) % self.width;
let idx = self.get_index(neighbour_row, neighbour_col);
count += self.cells[idx] as u8
}
}
count
}
}
fn random_cells(width: u32, height: u32) -> Vec<Cell> {
(0..width * height)
.map(|_| {
if js_sys::Math::random() < 0.5 {
Cell::Alive
} else {
Cell::Dead
}
}).collect()
}
fn empty_cells(width: u32, height: u32) -> Vec<Cell> {
vec![Cell::Dead; (width * height) as usize]
}
#[wasm_bindgen]
impl Universe {
pub fn width(&self) -> u32 {
self.width
}
pub fn height(&self) -> u32 {
self.height
}
pub fn cells(&self) -> *const Cell {
self.cells.as_ptr()
}
pub fn toggle_cell(&mut self, row: u32, column: u32) {
let idx = self.get_index(row, column);
if idx < self.cells.len() {
self.cells[idx].toggle();
}
}
pub fn tick(&mut self) {
let mut next = self.cells.clone();
for row in 0..self.height {
for col in 0..self.width {
let idx = self.get_index(row, col);
let cell = self.cells[idx];
let live_neighbours = self.live_neighbour_count(row, col);
let next_cell = match (cell, live_neighbours) {
(Cell::Alive, x) if x < 2 => Cell::Dead,
(Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive,
(Cell::Alive, x) if x > 3 => Cell::Dead,
(Cell::Dead, 3) => Cell::Alive,
(otherwise, _) => otherwise,
};
next[idx] = next_cell;
}
}
self.cells = next;
}
pub fn new(size: u32, kind: Option<UniverseType>) -> Universe {
utils::set_panic_hook();
let width = size;
let height = size;
let cells = match kind.unwrap_or_default() {
UniverseType::Empty => empty_cells(width, height),
UniverseType::Random => random_cells(width, height),
};
Universe {
width,
height,
cells,
}
}
}
| 25.699301 | 74 | 0.497415 |
5da9d9fa6eb801fe5cb3a5d39fa26b8cce8fceac | 10,785 | //! Send HTTP requests and responses asynchronously.
//!
//! This module has both an `HttpClientCodec` for an async HTTP client and an
//! `HttpServerCodec` for an async HTTP server.
use std::io::{self, Write};
use std::error::Error;
use std::fmt::{self, Display, Formatter};
use hyper;
use hyper::http::h1::Incoming;
use hyper::http::h1::parse_response;
use hyper::http::h1::parse_request;
use hyper::http::RawStatus;
use hyper::status::StatusCode;
use hyper::method::Method;
use hyper::uri::RequestUri;
use hyper::buffer::BufReader;
use tokio_io::codec::{Decoder, Encoder};
use bytes::BytesMut;
use bytes::BufMut;
#[derive(Copy, Clone, Debug)]
///A codec to be used with `tokio` codecs that can serialize HTTP requests and
///deserialize HTTP responses. One can use this on it's own without websockets to
///make a very bare async HTTP server.
///
///# Example
///```rust,no_run
///# extern crate tokio_core;
///# extern crate tokio_io;
///# extern crate websocket_tokio_reform as websocket;
///# extern crate hyper;
///use websocket::async::HttpClientCodec;
///# use websocket::async::futures::{Future, Sink, Stream};
///# use tokio_core::net::TcpStream;
///# use tokio_core::reactor::Core;
///# use tokio_io::AsyncRead;
///# use hyper::http::h1::Incoming;
///# use hyper::version::HttpVersion;
///# use hyper::header::Headers;
///# use hyper::method::Method;
///# use hyper::uri::RequestUri;
///
///# fn main() {
///let mut core = Core::new().unwrap();
///let addr = "crouton.net".parse().unwrap();
///
///let f = TcpStream::connect(&addr, &core.handle())
/// .and_then(|s| {
/// Ok(s.framed(HttpClientCodec))
/// })
/// .and_then(|s| {
/// s.send(Incoming {
/// version: HttpVersion::Http11,
/// subject: (Method::Get, RequestUri::AbsolutePath("/".to_string())),
/// headers: Headers::new(),
/// })
/// })
/// .map_err(|e| e.into())
/// .and_then(|s| s.into_future().map_err(|(e, _)| e))
/// .map(|(m, _)| println!("You got a crouton: {:?}", m));
///
///core.run(f).unwrap();
///# }
///```
pub struct HttpClientCodec;
fn split_off_http(src: &mut BytesMut) -> Option<BytesMut> {
match src.windows(4).position(|i| i == b"\r\n\r\n") {
Some(p) => Some(src.split_to(p + 4)),
None => None,
}
}
impl Encoder for HttpClientCodec {
type Item = Incoming<(Method, RequestUri)>;
type Error = io::Error;
fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> {
// TODO: optomize this!
let request = format!(
"{} {} {}\r\n{}\r\n",
item.subject.0, item.subject.1, item.version, item.headers
);
let byte_len = request.as_bytes().len();
if byte_len > dst.remaining_mut() {
dst.reserve(byte_len);
}
dst.writer().write(request.as_bytes()).map(|_| ())
}
}
impl Decoder for HttpClientCodec {
type Item = Incoming<RawStatus>;
type Error = HttpCodecError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// check if we get a request from hyper
// TODO: this is ineffecient, but hyper does not give us a better way to parse
match split_off_http(src) {
Some(buf) => {
let mut reader = BufReader::with_capacity(&*buf as &[u8], buf.len());
let res = match parse_response(&mut reader) {
Err(hyper::Error::Io(ref e)) if e.kind() == io::ErrorKind::UnexpectedEof => {
return Ok(None)
}
Err(hyper::Error::TooLarge) => return Ok(None),
Err(e) => return Err(e.into()),
Ok(r) => r,
};
Ok(Some(res))
}
None => Ok(None),
}
}
}
///A codec that can be used with streams implementing `AsyncRead + AsyncWrite`
///that can serialize HTTP responses and deserialize HTTP requests. Using this
///with an async `TcpStream` will give you a very bare async HTTP server.
///
///This crate sends out one HTTP request / response in order to perform the websocket
///handshake then never talks HTTP again. Because of this an async HTTP implementation
///is needed.
///
///# Example
///
///```rust,no_run
///# extern crate tokio_core;
///# extern crate tokio_io;
///# extern crate websocket_tokio_reform as websocket;
///# extern crate hyper;
///# use std::io;
///use websocket::async::HttpServerCodec;
///# use websocket::async::futures::{Future, Sink, Stream};
///# use tokio_core::net::TcpStream;
///# use tokio_core::reactor::Core;
///# use tokio_io::AsyncRead;
///# use hyper::http::h1::Incoming;
///# use hyper::version::HttpVersion;
///# use hyper::header::Headers;
///# use hyper::method::Method;
///# use hyper::uri::RequestUri;
///# use hyper::status::StatusCode;
///# fn main() {
///
///let mut core = Core::new().unwrap();
///let addr = "nothing-to-see-here.com".parse().unwrap();
///
///let f = TcpStream::connect(&addr, &core.handle())
/// .map(|s| s.framed(HttpServerCodec))
/// .map_err(|e| e.into())
/// .and_then(|s| s.into_future().map_err(|(e, _)| e))
/// .and_then(|(m, s)| match m {
/// Some(ref m) if m.subject.0 == Method::Get => Ok(s),
/// _ => panic!(),
/// })
/// .and_then(|stream| {
/// stream
/// .send(Incoming {
/// version: HttpVersion::Http11,
/// subject: StatusCode::NotFound,
/// headers: Headers::new(),
/// })
/// .map_err(|e| e.into())
/// });
///
///core.run(f).unwrap();
///# }
///```
#[derive(Copy, Clone, Debug)]
pub struct HttpServerCodec;
impl Encoder for HttpServerCodec {
type Item = Incoming<StatusCode>;
type Error = io::Error;
fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> {
// TODO: optomize this!
let response = format!("{} {}\r\n{}\r\n", item.version, item.subject, item.headers);
let byte_len = response.as_bytes().len();
if byte_len > dst.remaining_mut() {
dst.reserve(byte_len);
}
dst.writer().write(response.as_bytes()).map(|_| ())
}
}
impl Decoder for HttpServerCodec {
type Item = Incoming<(Method, RequestUri)>;
type Error = HttpCodecError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// check if we get a request from hyper
// TODO: this is ineffecient, but hyper does not give us a better way to parse
match split_off_http(src) {
Some(buf) => {
let mut reader = BufReader::with_capacity(&*buf as &[u8], buf.len());
let res = match parse_request(&mut reader) {
Err(hyper::Error::Io(ref e)) if e.kind() == io::ErrorKind::UnexpectedEof => {
return Ok(None);
}
Err(hyper::Error::TooLarge) => return Ok(None),
Err(e) => return Err(e.into()),
Ok(r) => r,
};
Ok(Some(res))
}
None => Ok(None),
}
}
}
/// Any error that can happen during the writing or parsing of HTTP requests
/// and responses. This consists of HTTP parsing errors (the `Http` variant) and
/// errors that can occur when writing to IO (the `Io` variant).
#[derive(Debug)]
pub enum HttpCodecError {
/// An error that occurs during the writing or reading of HTTP data
/// from a socket.
Io(io::Error),
/// An error that occurs during the parsing of an HTTP request or response.
Http(hyper::Error),
}
impl Display for HttpCodecError {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), fmt::Error> {
fmt.write_str(self.description())
}
}
impl Error for HttpCodecError {
fn description(&self) -> &str {
match *self {
HttpCodecError::Io(ref e) => e.description(),
HttpCodecError::Http(ref e) => e.description(),
}
}
fn cause(&self) -> Option<&Error> {
match *self {
HttpCodecError::Io(ref error) => Some(error),
HttpCodecError::Http(ref error) => Some(error),
}
}
}
impl From<io::Error> for HttpCodecError {
fn from(err: io::Error) -> HttpCodecError {
HttpCodecError::Io(err)
}
}
impl From<hyper::Error> for HttpCodecError {
fn from(err: hyper::Error) -> HttpCodecError {
HttpCodecError::Http(err)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Cursor;
use stream::ReadWritePair;
use tokio_core::reactor::Core;
use futures::{Future, Sink, Stream};
use tokio_io::AsyncRead;
use hyper::version::HttpVersion;
use hyper::header::Headers;
#[test]
fn test_client_http_codec() {
let mut core = Core::new().unwrap();
let response = "HTTP/1.1 404 Not Found\r\n\r\npssst extra data here";
let input = Cursor::new(response.as_bytes());
let output = Cursor::new(Vec::new());
let f = ReadWritePair(input, output)
.framed(HttpClientCodec)
.send(Incoming {
version: HttpVersion::Http11,
subject: (Method::Get, RequestUri::AbsolutePath("/".to_string())),
headers: Headers::new(),
})
.map_err(|e| e.into())
.and_then(|s| s.into_future().map_err(|(e, _)| e))
.and_then(|(m, _)| match m {
Some(ref m) if StatusCode::from_u16(m.subject.0) == StatusCode::NotFound => Ok(()),
_ => Err(io::Error::new(io::ErrorKind::Other, "test failed").into()),
});
core.run(f).unwrap();
}
#[test]
fn test_server_http_codec() {
let mut core = Core::new().unwrap();
let request = "\
GET / HTTP/1.0\r\n\
Host: www.rust-lang.org\r\n\
\r\n\
"
.as_bytes();
let input = Cursor::new(request);
let output = Cursor::new(Vec::new());
let f = ReadWritePair(input, output)
.framed(HttpServerCodec)
.into_future()
.map_err(|(e, _)| e)
.and_then(|(m, s)| match m {
Some(ref m) if m.subject.0 == Method::Get => Ok(s),
_ => Err(io::Error::new(io::ErrorKind::Other, "test failed").into()),
})
.and_then(|s| {
s.send(Incoming {
version: HttpVersion::Http11,
subject: StatusCode::NotFound,
headers: Headers::new(),
}).map_err(|e| e.into())
});
core.run(f).unwrap();
}
}
| 33.390093 | 99 | 0.559573 |
d5ac4346879f90e230a9ccf3b5185357d708eeda | 136 | #![cfg(feature = "ctr")]
#![feature(test)]
#[macro_use] extern crate stream_cipher;
extern crate aesni;
bench_sync!(aesni::Aes128Ctr);
| 19.428571 | 40 | 0.713235 |
4af037cb3e5f38b52015edc29d825a5bf8b0697f | 223 | fn main() {
println!("Hello, world!");
let someval = Some(3u8);
match someval {
Some(3) => println!("three"),
_ => (),
}
if let Some(i) = someval {
println!("num={}", i);
}
}
| 18.583333 | 37 | 0.439462 |
1c97ba2eb0d1a9e8d3c9aab991e9cea0334c7e6f | 465 | use crate::utils;
use clap::Command;
#[test]
fn very_large_display_order() {
let cmd = Command::new("test").subcommand(Command::new("sub").display_order(usize::MAX));
assert!(utils::compare_output(
cmd,
"test --help",
"test
USAGE:
test [SUBCOMMAND]
OPTIONS:
-h, --help Print help information
SUBCOMMANDS:
help Print this message or the help of the given subcommand(s)
sub
",
false
));
}
| 17.222222 | 93 | 0.604301 |
9bd485fd5894377c9dc4bd1ecc931826f1cb4c3f | 2,303 | // structs3.rs
// Structs contain data, but can also have logic. In this exercise we have
// defined the Package struct and we want to test some logic attached to it.
// Make the code compile and the tests pass!
// If you have issues execute `rustlings hint structs3`
#[derive(Debug)]
struct Package {
sender_country: String,
recipient_country: String,
weight_in_grams: i32,
}
impl Package {
fn new(sender_country: String, recipient_country: String, weight_in_grams: i32) -> Package {
if weight_in_grams <= 0 {
panic!("weight_in_grams must be non-negative");
// Something goes here...
} else {
return Package {
sender_country,
recipient_country,
weight_in_grams,
};
}
}
fn is_international(&self) -> bool {
self.sender_country != self.recipient_country
// Something goes here...
}
fn get_fees(&self, cents_per_gram: i32) -> i32 {
self.weight_in_grams * cents_per_gram
// Something goes here...
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn fail_creating_weightless_package() {
let sender_country = String::from("Spain");
let recipient_country = String::from("Austria");
Package::new(sender_country, recipient_country, -2210);
}
#[test]
fn create_international_package() {
let sender_country = String::from("Spain");
let recipient_country = String::from("Russia");
let package = Package::new(sender_country, recipient_country, 1200);
assert!(package.is_international());
}
#[test]
fn create_local_package() {
let sender_country = String::from("Canada");
let recipient_country = sender_country.clone();
let package = Package::new(sender_country, recipient_country, 1200);
assert!(!package.is_international());
}
#[test]
fn calculate_transport_fees() {
let sender_country = String::from("Spain");
let recipient_country = String::from("Spain");
let cents_per_gram = 3;
let package = Package::new(sender_country, recipient_country, 1500);
assert_eq!(package.get_fees(cents_per_gram), 4500);
}
}
| 27.416667 | 96 | 0.624837 |
bff2d4b5efc2703e812f5b9d144e35053d56d5c9 | 15,548 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::RTCCON {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct RTCOER {
bits: bool,
}
impl RTCOER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct HALFSECR {
bits: bool,
}
impl HALFSECR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct RTCSYNCR {
bits: bool,
}
impl RTCSYNCR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct RTCWRENR {
bits: bool,
}
impl RTCWRENR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct RTCCLKONR {
bits: bool,
}
impl RTCCLKONR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct RTSECSELR {
bits: bool,
}
impl RTSECSELR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct SIDLR {
bits: bool,
}
impl SIDLR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct FRZR {
bits: bool,
}
impl FRZR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct ONR {
bits: bool,
}
impl ONR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CALR {
bits: u16,
}
impl CALR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _RTCOEW<'a> {
w: &'a mut W,
}
impl<'a> _RTCOEW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _HALFSECW<'a> {
w: &'a mut W,
}
impl<'a> _HALFSECW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _RTCSYNCW<'a> {
w: &'a mut W,
}
impl<'a> _RTCSYNCW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _RTCWRENW<'a> {
w: &'a mut W,
}
impl<'a> _RTCWRENW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _RTCCLKONW<'a> {
w: &'a mut W,
}
impl<'a> _RTCCLKONW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 6;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _RTSECSELW<'a> {
w: &'a mut W,
}
impl<'a> _RTSECSELW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 7;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _SIDLW<'a> {
w: &'a mut W,
}
impl<'a> _SIDLW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 13;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _FRZW<'a> {
w: &'a mut W,
}
impl<'a> _FRZW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 14;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _ONW<'a> {
w: &'a mut W,
}
impl<'a> _ONW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 15;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CALW<'a> {
w: &'a mut W,
}
impl<'a> _CALW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 1023;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 0"]
#[inline]
pub fn rtcoe(&self) -> RTCOER {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
};
RTCOER { bits }
}
#[doc = "Bit 1"]
#[inline]
pub fn halfsec(&self) -> HALFSECR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) != 0
};
HALFSECR { bits }
}
#[doc = "Bit 2"]
#[inline]
pub fn rtcsync(&self) -> RTCSYNCR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
};
RTCSYNCR { bits }
}
#[doc = "Bit 3"]
#[inline]
pub fn rtcwren(&self) -> RTCWRENR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) != 0
};
RTCWRENR { bits }
}
#[doc = "Bit 6"]
#[inline]
pub fn rtcclkon(&self) -> RTCCLKONR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 6;
((self.bits >> OFFSET) & MASK as u32) != 0
};
RTCCLKONR { bits }
}
#[doc = "Bit 7"]
#[inline]
pub fn rtsecsel(&self) -> RTSECSELR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 7;
((self.bits >> OFFSET) & MASK as u32) != 0
};
RTSECSELR { bits }
}
#[doc = "Bit 13"]
#[inline]
pub fn sidl(&self) -> SIDLR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 13;
((self.bits >> OFFSET) & MASK as u32) != 0
};
SIDLR { bits }
}
#[doc = "Bit 14"]
#[inline]
pub fn frz(&self) -> FRZR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 14;
((self.bits >> OFFSET) & MASK as u32) != 0
};
FRZR { bits }
}
#[doc = "Bit 15"]
#[inline]
pub fn on(&self) -> ONR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 15;
((self.bits >> OFFSET) & MASK as u32) != 0
};
ONR { bits }
}
#[doc = "Bits 16:25"]
#[inline]
pub fn cal(&self) -> CALR {
let bits = {
const MASK: u16 = 1023;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) as u16
};
CALR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0"]
#[inline]
pub fn rtcoe(&mut self) -> _RTCOEW {
_RTCOEW { w: self }
}
#[doc = "Bit 1"]
#[inline]
pub fn halfsec(&mut self) -> _HALFSECW {
_HALFSECW { w: self }
}
#[doc = "Bit 2"]
#[inline]
pub fn rtcsync(&mut self) -> _RTCSYNCW {
_RTCSYNCW { w: self }
}
#[doc = "Bit 3"]
#[inline]
pub fn rtcwren(&mut self) -> _RTCWRENW {
_RTCWRENW { w: self }
}
#[doc = "Bit 6"]
#[inline]
pub fn rtcclkon(&mut self) -> _RTCCLKONW {
_RTCCLKONW { w: self }
}
#[doc = "Bit 7"]
#[inline]
pub fn rtsecsel(&mut self) -> _RTSECSELW {
_RTSECSELW { w: self }
}
#[doc = "Bit 13"]
#[inline]
pub fn sidl(&mut self) -> _SIDLW {
_SIDLW { w: self }
}
#[doc = "Bit 14"]
#[inline]
pub fn frz(&mut self) -> _FRZW {
_FRZW { w: self }
}
#[doc = "Bit 15"]
#[inline]
pub fn on(&mut self) -> _ONW {
_ONW { w: self }
}
#[doc = "Bits 16:25"]
#[inline]
pub fn cal(&mut self) -> _CALW {
_CALW { w: self }
}
}
| 24.408163 | 59 | 0.485143 |
f4d4fdcf0506eb203fcec1f2329a76464a3d333d | 942 | use crate::{
proto::{self, ToProto, Transaction::TransactionBody_oneof_data},
transaction::Transaction,
Client, FileId,
};
use failure::Error;
use query_interface::{interfaces, vtable_for};
use std::any::Any;
// Delete the given file. After deletion, it will be marked as deleted and will have no contents.
pub struct TransactionFileDelete {
id: FileId,
}
interfaces!(
TransactionFileDelete: Any,
ToProto<TransactionBody_oneof_data>
);
impl TransactionFileDelete {
pub fn new(client: &Client, id: FileId) -> Transaction<Self> {
Transaction::new(client, Self { id })
}
}
impl ToProto<TransactionBody_oneof_data> for TransactionFileDelete {
fn to_proto(&self) -> Result<TransactionBody_oneof_data, Error> {
let mut data = proto::FileDelete::FileDeleteTransactionBody::new();
data.set_fileID(self.id.to_proto()?);
Ok(TransactionBody_oneof_data::fileDelete(data))
}
}
| 26.914286 | 97 | 0.710191 |
7224c7c9333ce56fc3889ddcbf9c8f75de483558 | 5,139 | // Colour difference computation implementations.
// Copyright (c) 2021 Michał Nazarewicz <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//! # Empfindung
//!
//! Empfindung is a library providing implementations of colour difference
//! algorithms. Specifically, distances based on L\*a\*b\* colour space often
//! referred to as ΔE*. (This is also where the package gets its name. The ‘E’
//! stands for German ‘Empfindung’).
//!
//! The crate provides CIEDE2000 (in [`cie00`] module), CIE94 (in [`cie94`]),
//! CIE76 (in [`cie76`] module) and CMC l:c (in [`cmc`] module) implementations.
//!
//! ## Example
//!
//! ```
//! use empfindung::cie00;
//! use empfindung::cie76;
//!
//! fn main() {
//! let color_1 = lab::Lab {
//! l: 38.972,
//! a: 58.991,
//! b: 37.138,
//! };
//!
//! let color_2 = lab::Lab {
//! l: 54.528,
//! a: 42.416,
//! b: 54.497,
//! };
//!
//! let delta_e = cie00::diff(color_1, color_2);
//! println!("The CIEDE2000 colour difference is: {}", delta_e);
//! assert_eq!(20.553642, delta_e);
//!
//! let color_1 = (
//! 38.972,
//! 58.991,
//! 37.138,
//! );
//!
//! let color_2 = (
//! 54.528,
//! 42.416,
//! 54.497,
//! );
//!
//! let delta_e = cie76::diff(color_1, color_2);
//! println!("The Euclidean distance is: {}", delta_e);
//! assert_eq!(28.601656, delta_e);
//! }
//! ```
//!
//! ## Crate Features
//!
//! The crate defines `lab` and `rgb` features which are enabled by default.
//!
//! With both of them enabled, create provides [`ToLab`] implementation for
//! `rgb::RGB<u8>` type which means that `diff` functions can be used with
//! `rgb::RGB<u8>` arguments.
//!
//! Furthermore, if `lab` enabled the `diff` functions can accept `lab::Lab`
//! argument and `diff_rgb` functions as well as `DE2000` is provided. Note
//! that the latter two are a deprecated features.
#[cfg(all(feature = "lab", feature = "rgb"))]
use std::convert::TryInto;
pub mod cie00;
pub mod cie76;
pub mod cie94;
pub mod cmc;
#[doc(hidden)]
pub use cie00 as de2000;
#[allow(deprecated)]
pub use cie00::DE2000;
/// Object which can be converted to L\*a\*\b* colour representation.
pub trait ToLab {
/// Returns L\*, a\* and b\* coordinates of a colour.
fn to_lab(&self) -> (f32, f32, f32);
}
impl ToLab for (f32, f32, f32) {
fn to_lab(&self) -> (f32, f32, f32) { *self }
}
impl ToLab for &(f32, f32, f32) {
fn to_lab(&self) -> (f32, f32, f32) { **self }
}
impl ToLab for [f32; 3] {
fn to_lab(&self) -> (f32, f32, f32) { (self[0], self[1], self[2]) }
}
impl ToLab for &[f32; 3] {
fn to_lab(&self) -> (f32, f32, f32) { (self[0], self[1], self[2]) }
}
#[cfg(feature = "lab")]
impl ToLab for lab::Lab {
fn to_lab(&self) -> (f32, f32, f32) { (self.l, self.a, self.b) }
}
#[cfg(feature = "lab")]
impl ToLab for &lab::Lab {
fn to_lab(&self) -> (f32, f32, f32) { (self.l, self.a, self.b) }
}
#[cfg(all(feature = "lab", feature = "rgb"))]
impl ToLab for rgb::RGB<u8> {
fn to_lab(&self) -> (f32, f32, f32) {
lab::Lab::from_rgb(self.as_ref().try_into().unwrap()).to_lab()
}
}
#[cfg(all(feature = "lab", feature = "rgb"))]
impl ToLab for &rgb::RGB<u8> {
fn to_lab(&self) -> (f32, f32, f32) {
lab::Lab::from_rgb(self.as_ref().try_into().unwrap()).to_lab()
}
}
#[cfg(all(feature = "lab", feature = "rgb"))]
impl ToLab for rgb::RGBA<u8> {
/// Assumes an sRGB colour and converts it into L\*a\*\b\*.
fn to_lab(&self) -> (f32, f32, f32) {
lab::Lab::from_rgb(self.as_ref()[0..3].try_into().unwrap()).to_lab()
}
}
#[cfg(all(feature = "lab", feature = "rgb"))]
impl ToLab for &rgb::RGBA<u8> {
/// Assumes an sRGB colour and converts it into L\*a\*\b\* ignoring its
/// alpha.
fn to_lab(&self) -> (f32, f32, f32) {
lab::Lab::from_rgb(self.as_ref()[0..3].try_into().unwrap()).to_lab()
}
}
pub(crate) mod math {
pub fn hypot(x: f32, y: f32) -> f32 { (x * x + y * y).sqrt() }
}
#[cfg(test)]
pub(crate) mod testutil;
| 30.408284 | 80 | 0.610235 |
e67bf13cb5bf45b49f822619e23a03a76a4a7c2f | 93 | use crate::{
domain,
};
pub struct CreatePlanet{
pub resources : domain::Resources
} | 13.285714 | 37 | 0.666667 |
d9bb47ab276753b15ada778e050aa65e7197328c | 4,930 | use crate::{id::Id, modules::Modules};
use ahash::RandomState;
use std::collections::HashMap;
use swc_common::{SyntaxContext, DUMMY_SP};
use swc_ecma_ast::*;
use swc_ecma_visit::{
noop_visit_mut_type, noop_visit_type, Node, Visit, VisitMut, VisitMutWith, VisitWith,
};
#[derive(Debug, Default)]
pub(crate) struct InlineData {
ids: HashMap<Id, Id, RandomState>,
}
/// Inline **injected** variables.
pub(crate) fn inline(injected_ctxt: SyntaxContext, module: &mut Modules) {
log::debug!("Inlining injected variables");
let mut v = Inliner {
injected_ctxt,
data: Default::default(),
};
module.visit_with(&mut v);
module.visit_mut_with(&mut v);
module.retain_mut(|_, s| match s {
ModuleItem::Stmt(Stmt::Empty(..)) => false,
_ => true,
});
}
#[derive(Debug)]
struct Inliner {
injected_ctxt: SyntaxContext,
data: InlineData,
}
impl Inliner {
fn store(&mut self, from: Id, to: Id) {
if let Some(prev) = self.data.ids.insert(from.clone(), to.clone()) {
unreachable!(
"Multiple identifiers equivalent up to span hygiene found: {:#?}\nFirst = \
{:#?}\nSecond = {:#?}",
from, prev, to
)
}
}
}
impl Visit for Inliner {
noop_visit_type!();
/// Noop
fn visit_module_decl(&mut self, _: &ModuleDecl, _: &dyn Node) {}
/// Noop. We don't inline variables declared in subscopes.
fn visit_function(&mut self, _: &Function, _: &dyn Node) {}
/// Noop. We don't inline variables declared in subscopes.
fn visit_block_stmt(&mut self, _: &BlockStmt, _: &dyn Node) {}
fn visit_var_decl(&mut self, n: &VarDecl, _: &dyn Node) {
if n.span.ctxt != self.injected_ctxt || n.kind != VarDeclKind::Const {
return;
}
n.visit_children_with(self);
}
fn visit_var_declarator(&mut self, n: &VarDeclarator, _: &dyn Node) {
n.visit_children_with(self);
match (&n.name, n.init.as_deref()) {
(Pat::Ident(from), Some(Expr::Ident(to))) => {
self.store(from.id.clone().into(), to.into());
}
_ => {}
}
}
}
impl VisitMut for Inliner {
noop_visit_mut_type!();
/// Don't modify exported ident.
fn visit_mut_export_named_specifier(&mut self, n: &mut ExportNamedSpecifier) {
if n.exported.is_none() {
n.exported = Some(n.orig.clone());
}
n.orig.visit_mut_with(self);
}
fn visit_mut_ident(&mut self, n: &mut Ident) {
if let Some(mapped) = self.data.ids.get(&n.clone().into()).cloned() {
*n = mapped.into();
n.visit_mut_with(self);
}
}
/// General logic for member expression.s
fn visit_mut_member_expr(&mut self, n: &mut MemberExpr) {
n.obj.visit_mut_with(self);
if n.computed {
n.prop.visit_mut_with(self);
}
}
fn visit_mut_module_items(&mut self, n: &mut Vec<ModuleItem>) {
n.visit_mut_children_with(self);
n.retain(|v| match v {
ModuleItem::Stmt(Stmt::Empty(..)) => false,
_ => true,
});
}
fn visit_mut_prop(&mut self, n: &mut Prop) {
match n {
Prop::Shorthand(i) => {
let orig = i.clone();
i.visit_mut_with(self);
if i.span.ctxt == orig.span.ctxt {
return;
}
if i.sym != orig.sym {
*n = Prop::KeyValue(KeyValueProp {
key: PropName::Ident(orig),
value: Box::new(Expr::Ident(i.clone())),
});
return;
}
}
_ => {
n.visit_mut_children_with(self);
}
}
}
fn visit_mut_prop_name(&mut self, n: &mut PropName) {
match n {
PropName::Ident(_) => {}
PropName::Str(_) => {}
PropName::Num(_) => {}
PropName::Computed(e) => {
e.expr.visit_mut_with(self);
}
PropName::BigInt(_) => {}
}
}
fn visit_mut_stmt(&mut self, n: &mut Stmt) {
n.visit_mut_children_with(self);
match n {
Stmt::Decl(Decl::Var(var)) if var.decls.is_empty() => {
*n = Stmt::Empty(EmptyStmt { span: DUMMY_SP });
}
_ => {}
}
}
fn visit_mut_var_declarators(&mut self, n: &mut Vec<VarDeclarator>) {
n.retain(|d| {
match &d.name {
Pat::Ident(name) => {
if self.data.ids.contains_key(&name.id.clone().into()) {
return false;
}
}
_ => {}
}
true
});
n.visit_mut_children_with(self);
}
}
| 27.388889 | 91 | 0.510548 |
8775d8b9068028fd6cc122cda0b630f42693bf18 | 26,035 | use bytes::Bytes;
use futures::{
future::{err, result, Either},
Future,
};
use hex::FromHex;
#[doc(hidden)]
pub use interledger::api::AccountDetails;
pub use interledger::service_util::ExchangeRateProvider;
use std::sync::Arc;
use crate::metrics::{incoming_metrics, outgoing_metrics};
use crate::trace::{trace_forwarding, trace_incoming, trace_outgoing};
use interledger::{
api::{NodeApi, NodeStore},
btp::{connect_client, create_btp_service_and_filter, BtpStore},
ccp::CcpRouteManagerBuilder,
http::{error::*, HttpClientService, HttpServer as IlpOverHttpServer},
ildcp::IldcpService,
packet::Address,
packet::{ErrorCode, RejectBuilder},
router::Router,
service::{
outgoing_service_fn, Account as AccountTrait, IncomingService, OutgoingRequest,
OutgoingService, Username,
},
service_util::{
BalanceService, EchoService, ExchangeRateFetcher, ExchangeRateService,
ExpiryShortenerService, MaxPacketAmountService, RateLimitService, ValidatorService,
},
settlement::{create_settlements_filter, SettlementMessageService},
store_redis::{Account, AccountId, ConnectionInfo, IntoConnectionInfo, RedisStoreBuilder},
stream::StreamReceiverService,
};
use lazy_static::lazy_static;
use metrics_core::{Builder, Drain, Observe};
use metrics_runtime;
use ring::hmac;
use serde::{de::Error as DeserializeError, Deserialize, Deserializer};
use std::{convert::TryFrom, net::SocketAddr, str, str::FromStr, time::Duration};
use tokio::spawn;
use tracing::{debug, debug_span, error, info};
use tracing_futures::Instrument;
use url::Url;
use warp::{
self,
http::{Response, StatusCode},
Filter,
};
static REDIS_SECRET_GENERATION_STRING: &str = "ilp_redis_secret";
static DEFAULT_REDIS_URL: &str = "redis://127.0.0.1:6379";
lazy_static! {
static ref DEFAULT_ILP_ADDRESS: Address = Address::from_str("local.host").unwrap();
}
fn default_settlement_api_bind_address() -> SocketAddr {
SocketAddr::from(([127, 0, 0, 1], 7771))
}
fn default_http_bind_address() -> SocketAddr {
SocketAddr::from(([127, 0, 0, 1], 7770))
}
fn default_redis_url() -> ConnectionInfo {
DEFAULT_REDIS_URL.into_connection_info().unwrap()
}
fn default_exchange_rate_poll_interval() -> u64 {
60_000
}
fn default_exchange_rate_poll_failure_tolerance() -> u32 {
5
}
fn deserialize_optional_address<'de, D>(deserializer: D) -> Result<Option<Address>, D::Error>
where
D: Deserializer<'de>,
{
if let Ok(address) = Bytes::deserialize(deserializer) {
Address::try_from(address)
.map(Some)
.map_err(|err| DeserializeError::custom(format!("Invalid address: {:?}", err)))
} else {
Ok(None)
}
}
fn deserialize_32_bytes_hex<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error>
where
D: Deserializer<'de>,
{
<[u8; 32]>::from_hex(String::deserialize(deserializer)?).map_err(|err| {
DeserializeError::custom(format!(
"Invalid hex value (must be 32 hex-encoded bytes): {:?}",
err
))
})
}
fn deserialize_optional_username<'de, D>(deserializer: D) -> Result<Option<Username>, D::Error>
where
D: Deserializer<'de>,
{
if let Ok(username) = String::deserialize(deserializer) {
Username::from_str(&username)
.map(Some)
.map_err(|err| DeserializeError::custom(format!("Invalid username: {:?}", err)))
} else {
Ok(None)
}
}
fn deserialize_redis_connection<'de, D>(deserializer: D) -> Result<ConnectionInfo, D::Error>
where
D: Deserializer<'de>,
{
Url::parse(&String::deserialize(deserializer)?)
.map_err(|err| DeserializeError::custom(format!("Invalid URL: {:?}", err)))?
.into_connection_info()
.map_err(|err| {
DeserializeError::custom(format!(
"Error converting into Redis connection info: {:?}",
err
))
})
}
/// Configuration for [Prometheus](https://prometheus.io) metrics collection.
#[derive(Deserialize, Clone)]
pub struct PrometheusConfig {
/// IP address and port to host the Prometheus endpoint on.
pub bind_address: SocketAddr,
/// Amount of time, in milliseconds, that the node will collect data points for the
/// Prometheus histograms. Defaults to 300000ms (5 minutes).
#[serde(default = "PrometheusConfig::default_histogram_window")]
pub histogram_window: u64,
/// Granularity, in milliseconds, that the node will use to roll off old data.
/// For example, a value of 1000ms (1 second) would mean that the node forgets the oldest
/// 1 second of histogram data points every second. Defaults to 10000ms (10 seconds).
#[serde(default = "PrometheusConfig::default_histogram_granularity")]
pub histogram_granularity: u64,
}
impl PrometheusConfig {
fn default_histogram_window() -> u64 {
300_000
}
fn default_histogram_granularity() -> u64 {
10_000
}
}
/// An all-in-one Interledger node that includes sender and receiver functionality,
/// a connector, and a management API. The node uses Redis for persistence.
#[derive(Deserialize, Clone)]
pub struct InterledgerNode {
/// ILP address of the node
#[serde(deserialize_with = "deserialize_optional_address")]
#[serde(default)]
pub ilp_address: Option<Address>,
/// Root secret used to derive encryption keys
#[serde(deserialize_with = "deserialize_32_bytes_hex")]
pub secret_seed: [u8; 32],
/// HTTP Authorization token for the node admin (sent as a Bearer token)
pub admin_auth_token: String,
/// Redis URI (for example, "redis://127.0.0.1:6379" or "unix:/tmp/redis.sock")
#[serde(
deserialize_with = "deserialize_redis_connection",
default = "default_redis_url",
alias = "redis_url"
)]
pub redis_connection: ConnectionInfo,
/// IP address and port to listen for HTTP connections
/// This is used for both the API and ILP over HTTP packets
#[serde(default = "default_http_bind_address")]
pub http_bind_address: SocketAddr,
/// IP address and port to listen for the Settlement Engine API
#[serde(default = "default_settlement_api_bind_address")]
pub settlement_api_bind_address: SocketAddr,
/// When SPSP payments are sent to the root domain, the payment pointer is resolved
/// to <domain>/.well-known/pay. This value determines which account those payments
/// will be sent to.
#[serde(default, deserialize_with = "deserialize_optional_username")]
pub default_spsp_account: Option<Username>,
/// Interval, defined in milliseconds, on which the node will broadcast routing
/// information to other nodes using CCP. Defaults to 30000ms (30 seconds).
pub route_broadcast_interval: Option<u64>,
/// Interval, defined in milliseconds, on which the node will poll the exchange rate provider.
/// Defaults to 60000ms (60 seconds).
#[serde(default = "default_exchange_rate_poll_interval")]
pub exchange_rate_poll_interval: u64,
/// The number of consecutive failed polls to the exchange rate provider
/// that the connector will tolerate before invalidating the exchange rate cache.
#[serde(default = "default_exchange_rate_poll_failure_tolerance")]
pub exchange_rate_poll_failure_tolerance: u32,
/// API to poll for exchange rates. Currently the supported options are:
/// - [CoinCap](https://docs.coincap.io)
/// - [CryptoCompare](https://cryptocompare.com) (note this requires an API key)
/// If this value is not set, the node will not poll for exchange rates and will
/// instead use the rates configured via the HTTP API.
#[serde(default)]
pub exchange_rate_provider: Option<ExchangeRateProvider>,
/// Spread, as a fraction, to add on top of the exchange rate.
/// This amount is kept as the node operator's profit, or may cover
/// fluctuations in exchange rates.
/// For example, take an incoming packet with an amount of 100. If the
/// exchange rate is 1:2 and the spread is 0.01, the amount on the
/// outgoing packet would be 198 (instead of 200 without the spread).
#[serde(default)]
pub exchange_rate_spread: f64,
/// Configuration for [Prometheus](https://prometheus.io) metrics collection.
/// If this configuration is not provided, the node will not collect metrics.
#[serde(default)]
pub prometheus: Option<PrometheusConfig>,
}
impl InterledgerNode {
/// Returns a future that runs the Interledger.rs Node.
///
/// If the Prometheus configuration was provided, it will
/// also run the Prometheus metrics server on the given address.
// TODO when a BTP connection is made, insert a outgoing HTTP entry into the Store to tell other
// connector instances to forward packets for that account to us
pub fn serve(&self) -> impl Future<Item = (), Error = ()> {
if self.prometheus.is_some() {
Either::A(
self.serve_prometheus()
.join(self.serve_node())
.and_then(|_| Ok(())),
)
} else {
Either::B(self.serve_node())
}
}
#[allow(clippy::cognitive_complexity)]
fn serve_node(&self) -> impl Future<Item = (), Error = ()> {
let redis_secret = generate_redis_secret(&self.secret_seed);
let secret_seed = Bytes::from(&self.secret_seed[..]);
let http_bind_address = self.http_bind_address;
let settlement_api_bind_address = self.settlement_api_bind_address;
let ilp_address = if let Some(address) = &self.ilp_address {
address.clone()
} else {
DEFAULT_ILP_ADDRESS.clone()
};
let ilp_address_clone = ilp_address.clone();
let ilp_address_clone2 = ilp_address.clone();
let admin_auth_token = self.admin_auth_token.clone();
let default_spsp_account = self.default_spsp_account.clone();
let redis_addr = self.redis_connection.addr.clone();
let route_broadcast_interval = self.route_broadcast_interval;
let exchange_rate_provider = self.exchange_rate_provider.clone();
let exchange_rate_poll_interval = self.exchange_rate_poll_interval;
let exchange_rate_poll_failure_tolerance = self.exchange_rate_poll_failure_tolerance;
let exchange_rate_spread = self.exchange_rate_spread;
debug!(target: "interledger-node",
"Starting Interledger node with ILP address: {}",
ilp_address
);
Box::new(RedisStoreBuilder::new(self.redis_connection.clone(), redis_secret)
.node_ilp_address(ilp_address.clone())
.connect()
.map_err(move |err| error!(target: "interledger-node", "Error connecting to Redis: {:?} {:?}", redis_addr, err))
.and_then(move |store| {
store.clone().get_btp_outgoing_accounts()
.map_err(|_| error!(target: "interledger-node", "Error getting accounts"))
.and_then(move |btp_accounts| {
let outgoing_service =
outgoing_service_fn(move |request: OutgoingRequest<Account>| {
error!(target: "interledger-node", "No route found for outgoing account ");
Err(RejectBuilder {
code: ErrorCode::F02_UNREACHABLE,
message: &format!(
"No outgoing route for account: {} (ILP address of the Prepare packet: {:?})",
request.to.id(),
request.prepare.destination(),
)
.as_bytes(),
triggered_by: Some(&ilp_address_clone),
data: &[],
}
.build())
});
// Connect to all of the accounts that have outgoing ilp_over_btp_urls configured
// but don't fail if we are unable to connect
// TODO try reconnecting to those accounts later
connect_client(ilp_address_clone2.clone(), btp_accounts, false, outgoing_service).and_then(
move |btp_client_service| {
let (btp_server_service, btp_filter) = create_btp_service_and_filter(ilp_address_clone2, store.clone(), btp_client_service.clone());
let btp = btp_client_service.clone();
// The BTP service is both an Incoming and Outgoing one so we pass it first as the Outgoing
// service to others like the router and then call handle_incoming on it to set up the incoming handler
let outgoing_service = btp_server_service.clone();
let outgoing_service = HttpClientService::new(
store.clone(),
outgoing_service,
);
let outgoing_service = outgoing_service.wrap(outgoing_metrics);
// Note: the expiry shortener must come after the Validator so that the expiry duration
// is shortened before we check whether there is enough time left
let outgoing_service = ValidatorService::outgoing(
store.clone(),
outgoing_service
);
let outgoing_service =
ExpiryShortenerService::new(outgoing_service);
let outgoing_service = StreamReceiverService::new(
secret_seed.clone(),
store.clone(),
outgoing_service,
);
let outgoing_service = BalanceService::new(
store.clone(),
outgoing_service,
);
let outgoing_service = ExchangeRateService::new(
exchange_rate_spread,
store.clone(),
outgoing_service,
);
// Set up the Router and Routing Manager
let incoming_service = Router::new(
store.clone(),
// Add tracing to add the outgoing request details to the incoming span
outgoing_service.clone().wrap(trace_forwarding),
);
// Add tracing to track the outgoing request details
let outgoing_service = outgoing_service.wrap(trace_outgoing).in_current_span();
let mut ccp_builder = CcpRouteManagerBuilder::new(
ilp_address.clone(),
store.clone(),
outgoing_service.clone(),
incoming_service,
);
ccp_builder.ilp_address(ilp_address.clone());
if let Some(ms) = route_broadcast_interval {
ccp_builder.broadcast_interval(ms);
}
let incoming_service = ccp_builder.to_service();
let incoming_service = EchoService::new(store.clone(), incoming_service);
let incoming_service = SettlementMessageService::new(incoming_service);
let incoming_service = IldcpService::new(incoming_service);
let incoming_service =
MaxPacketAmountService::new(
store.clone(),
incoming_service
);
let incoming_service =
ValidatorService::incoming(store.clone(), incoming_service);
let incoming_service = RateLimitService::new(
store.clone(),
incoming_service,
);
// Add tracing to track the incoming request details
let incoming_service = incoming_service.wrap(trace_incoming).in_current_span();
let incoming_service = incoming_service.wrap(incoming_metrics);
// Handle incoming packets sent via BTP
btp_server_service.handle_incoming(incoming_service.clone().wrap(|request, mut next| {
let btp = debug_span!(target: "interledger-node", "btp");
let _btp_scope = btp.enter();
next.handle_request(request).in_current_span()
}).in_current_span());
btp_client_service.handle_incoming(incoming_service.clone().wrap(|request, mut next| {
let btp = debug_span!(target: "interledger-node", "btp");
let _btp_scope = btp.enter();
next.handle_request(request).in_current_span()
}).in_current_span());
// Node HTTP API
let mut api = NodeApi::new(
secret_seed,
admin_auth_token,
store.clone(),
incoming_service.clone().wrap(|request, mut next| {
let api = debug_span!(target: "interledger-node", "api");
let _api_scope = api.enter();
next.handle_request(request).in_current_span()
}).in_current_span(),
outgoing_service.clone(),
btp.clone(),
);
if let Some(username) = default_spsp_account {
api.default_spsp_account(username);
}
// add an API of ILP over HTTP and add rejection handler
let api = api.into_warp_filter()
.or(IlpOverHttpServer::new(incoming_service.clone().wrap(|request, mut next| {
let http = debug_span!(target: "interledger-node", "http");
let _http_scope = http.enter();
next.handle_request(request).in_current_span()
}).in_current_span(), store.clone()).as_filter())
.recover(default_rejection_handler);
// Mount the BTP endpoint at /ilp/btp
let btp_endpoint = warp::path("ilp")
.and(warp::path("btp"))
.and(warp::path::end())
.and(btp_filter);
// Note that other endpoints added to the API must come first
// because the API includes error handling and consumes the request.
// TODO should we just make BTP part of the API?
let api = btp_endpoint.or(api).with(warp::log("interledger-api")).boxed();
info!(target: "interledger-node", "Interledger.rs node HTTP API listening on: {}", http_bind_address);
spawn(warp::serve(api).bind(http_bind_address));
// Settlement API
let settlement_api = create_settlements_filter(
store.clone(),
outgoing_service.clone(),
);
info!(target: "interledger-node", "Settlement API listening on: {}", settlement_api_bind_address);
spawn(warp::serve(settlement_api).bind(settlement_api_bind_address));
// Exchange Rate Polling
if let Some(provider) = exchange_rate_provider {
let exchange_rate_fetcher = ExchangeRateFetcher::new(provider, exchange_rate_poll_failure_tolerance, store.clone());
exchange_rate_fetcher.spawn_interval(Duration::from_millis(exchange_rate_poll_interval));
} else {
debug!(target: "interledger-node", "Not using exchange rate provider. Rates must be set via the HTTP API");
}
Ok(())
},
)
})
})
.in_current_span())
}
/// Starts a Prometheus metrics server that will listen on the configured address.
///
/// # Errors
/// This will fail if another Prometheus server is already running in this
/// process or on the configured port.
#[allow(clippy::cognitive_complexity)]
fn serve_prometheus(&self) -> impl Future<Item = (), Error = ()> {
Box::new(if let Some(ref prometheus) = self.prometheus {
// Set up the metrics collector
let receiver = metrics_runtime::Builder::default()
.histogram(
Duration::from_millis(prometheus.histogram_window),
Duration::from_millis(prometheus.histogram_granularity),
)
.build()
.expect("Failed to create metrics Receiver");
let controller = receiver.controller();
// Try installing the global recorder
match metrics::set_boxed_recorder(Box::new(receiver)) {
Ok(_) => {
let observer =
Arc::new(metrics_runtime::observers::PrometheusBuilder::default());
let filter = warp::get2().and(warp::path::end()).map(move || {
let mut observer = observer.build();
controller.observe(&mut observer);
let prometheus_response = observer.drain();
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "text/plain; version=0.0.4")
.body(prometheus_response)
});
info!(target: "interledger-node",
"Prometheus metrics server listening on: {}",
prometheus.bind_address
);
Either::A(
warp::serve(filter)
.bind(prometheus.bind_address)
.map_err(|_| {
error!(target: "interledger-node", "Error binding Prometheus server to the configured address")
}),
)
}
Err(e) => {
error!(target: "interledger-node", "Error installing global metrics recorder (this is likely caused by trying to run two nodes with Prometheus metrics in the same process): {:?}", e);
Either::B(err(()))
}
}
} else {
error!(target: "interledger-node", "No prometheus configuration provided");
Either::B(err(()))
})
}
/// Run the node on the default Tokio runtime
pub fn run(&self) {
tokio_run(self.serve());
}
#[doc(hidden)]
#[allow(dead_code)]
pub fn insert_account(
&self,
account: AccountDetails,
) -> impl Future<Item = AccountId, Error = ()> {
let redis_secret = generate_redis_secret(&self.secret_seed);
result(self.redis_connection.clone().into_connection_info())
.map_err(|err| error!(target: "interledger-node", "Invalid Redis connection details: {:?}", err))
.and_then(move |redis_url| RedisStoreBuilder::new(redis_url, redis_secret).connect())
.map_err(|err| error!(target: "interledger-node", "Error connecting to Redis: {:?}", err))
.and_then(move |store| {
store
.insert_account(account)
.map_err(|_| error!(target: "interledger-node", "Unable to create account"))
.and_then(|account| {
debug!(target: "interledger-node", "Created account: {}", account.id());
Ok(account.id())
})
})
}
}
fn generate_redis_secret(secret_seed: &[u8; 32]) -> [u8; 32] {
let mut redis_secret: [u8; 32] = [0; 32];
let sig = hmac::sign(
&hmac::Key::new(hmac::HMAC_SHA256, secret_seed),
REDIS_SECRET_GENERATION_STRING.as_bytes(),
);
redis_secret.copy_from_slice(sig.as_ref());
redis_secret
}
#[doc(hidden)]
pub fn tokio_run(fut: impl Future<Item = (), Error = ()> + Send + 'static) {
let mut runtime = tokio::runtime::Builder::new()
// Don't swallow panics
.panic_handler(|err| std::panic::resume_unwind(err))
.name_prefix("interledger-rs-worker-")
.build()
.expect("failed to start new runtime");
runtime.spawn(fut);
runtime.shutdown_on_idle().wait().unwrap();
}
| 47.509124 | 203 | 0.556866 |
f91ec493c831ba3edb75a5380453f6d44ff70467 | 410 | use crate::optim;
pub trait Context: std::fmt::Debug + Send + Sync {
fn optimizer_passes(&self) -> Vec<Box<optim::OptimizerPass>>;
}
#[derive(Debug)]
pub struct DefaultContext;
impl Context for DefaultContext {
fn optimizer_passes(&self) -> Vec<Box<optim::OptimizerPass>> {
let mut passes = optim::normalization();
passes.extend(optim::codegen().into_iter());
passes
}
}
| 24.117647 | 66 | 0.658537 |
878cae4e387f4fd01c998b9898d7a7e452c57c07 | 964 | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags:--test -O
#[test]
#[should_panic(expected = "creating inhabited type")]
fn test() {
FontLanguageOverride::system_font(SystemFont::new());
}
pub enum FontLanguageOverride {
Normal,
Override(&'static str),
System(SystemFont)
}
pub enum SystemFont {}
impl FontLanguageOverride {
fn system_font(f: SystemFont) -> Self {
FontLanguageOverride::System(f)
}
}
impl SystemFont {
fn new() -> Self {
panic!("creating inhabited type")
}
}
| 25.368421 | 68 | 0.696058 |
0a946e3c12cb3e1e3cb995e205adb54605abd2be | 11,458 | // Copyright 2020 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! # `IoSourceExt`
//!
//! User functions to asynchronously access files.
//! Using `IoSource` directly is inconvenient and requires dealing with state
//! machines for the backing uring, future libraries, etc. `IoSourceExt` instead
//! provides users with a future that can be `await`ed from async context.
//!
//! Each member of `IoSourceExt` returns a future for the supported operation. One or more
//! operation can be pending at a time.
//!
//! Operations can only access memory in a `Vec` or an implementor of `BackingMemory`. See the
//! `URingExecutor` documentation for an explaination of why.
use crate::poll_source::PollSource;
use crate::UringSource;
use async_trait::async_trait;
use std::os::unix::io::AsRawFd;
use std::rc::Rc;
use thiserror::Error as ThisError;
use crate::uring_mem::{BackingMemory, MemRegion};
#[derive(ThisError, Debug)]
pub enum Error {
/// An error with a polled(FD) source.
#[error("An error with a poll source: {0}")]
Poll(crate::poll_source::Error),
/// An error with a uring source.
#[error("An error with a uring source: {0}")]
Uring(crate::uring_executor::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
/// Ergonomic methods for async reads.
#[async_trait(?Send)]
pub trait ReadAsync {
/// Reads from the iosource at `file_offset` and fill the given `vec`.
async fn read_to_vec<'a>(&'a self, file_offset: u64, vec: Vec<u8>) -> Result<(usize, Vec<u8>)>;
/// Reads to the given `mem` at the given offsets from the file starting at `file_offset`.
async fn read_to_mem<'a>(
&'a self,
file_offset: u64,
mem: Rc<dyn BackingMemory>,
mem_offsets: &'a [MemRegion],
) -> Result<usize>;
/// Wait for the FD of `self` to be readable.
async fn wait_readable(&self) -> Result<()>;
/// Reads a single u64 from the current offset.
async fn read_u64(&self) -> Result<u64>;
}
/// Ergonomic methods for async writes.
#[async_trait(?Send)]
pub trait WriteAsync {
/// Writes from the given `vec` to the file starting at `file_offset`.
async fn write_from_vec<'a>(
&'a self,
file_offset: u64,
vec: Vec<u8>,
) -> Result<(usize, Vec<u8>)>;
/// Writes from the given `mem` from the given offsets to the file starting at `file_offset`.
async fn write_from_mem<'a>(
&'a self,
file_offset: u64,
mem: Rc<dyn BackingMemory>,
mem_offsets: &'a [MemRegion],
) -> Result<usize>;
/// See `fallocate(2)`. Note this op is synchronous when using the Polled backend.
async fn fallocate(&self, file_offset: u64, len: u64, mode: u32) -> Result<()>;
/// Sync all completed write operations to the backing storage.
async fn fsync(&self) -> Result<()>;
}
/// Subtrait for general async IO.
#[async_trait(?Send)]
pub trait IoSourceExt<F>: ReadAsync + WriteAsync {
/// Yields the underlying IO source.
fn into_source(self: Box<Self>) -> F;
/// Provides a mutable ref to the underlying IO source.
fn as_source_mut(&mut self) -> &mut F;
/// Provides a ref to the underlying IO source.
fn as_source(&self) -> &F;
}
/// Creates a concrete `IoSourceExt` that uses uring if available or falls back to the fd_executor if not.
/// Note that on older kernels (pre 5.6) FDs such as event or timer FDs are unreliable when
/// having readvwritev performed through io_uring. To deal with EventFd or TimerFd, use
/// `IoSourceExt::read_u64`.
pub fn new<'a, F: AsRawFd + 'a>(f: F) -> Result<Box<dyn IoSourceExt<F> + 'a>> {
if crate::uring_executor::use_uring() {
new_uring(f)
} else {
new_poll(f)
}
}
/// Creates a concrete `IoSourceExt` using Uring.
pub(crate) fn new_uring<'a, F: AsRawFd + 'a>(f: F) -> Result<Box<dyn IoSourceExt<F> + 'a>> {
UringSource::new(f)
.map(|u| Box::new(u) as Box<dyn IoSourceExt<F>>)
.map_err(Error::Uring)
}
/// Creates a concrete `IoSourceExt` using the fd_executor.
pub(crate) fn new_poll<'a, F: AsRawFd + 'a>(f: F) -> Result<Box<dyn IoSourceExt<F> + 'a>> {
PollSource::new(f)
.map(|u| Box::new(u) as Box<dyn IoSourceExt<F>>)
.map_err(Error::Poll)
}
#[cfg(test)]
mod tests {
use std::fs::{File, OpenOptions};
use futures::pin_mut;
use super::*;
use crate::uring_mem::{MemRegion, VecIoWrapper};
use crate::Executor;
use std::os::unix::io::AsRawFd;
use std::rc::Rc;
#[test]
fn readvec() {
async fn go<F: AsRawFd + Unpin>(async_source: Box<dyn IoSourceExt<F>>) {
let v = vec![0x55u8; 32];
let v_ptr = v.as_ptr();
let ret = async_source.read_to_vec(0, v).await.unwrap();
assert_eq!(ret.0, 32);
let ret_v = ret.1;
assert_eq!(v_ptr, ret_v.as_ptr());
assert!(ret_v.iter().all(|&b| b == 0));
}
let f = File::open("/dev/zero").unwrap();
let uring_source = new_uring(f).unwrap();
let fut = go(uring_source);
pin_mut!(fut);
crate::uring_executor::URingExecutor::new(crate::RunOne::new(fut))
.unwrap()
.run()
.unwrap();
let f = File::open("/dev/zero").unwrap();
let poll_source = new_poll(f).unwrap();
let fut = go(poll_source);
pin_mut!(fut);
crate::fd_executor::FdExecutor::new(crate::RunOne::new(fut))
.unwrap()
.run()
.unwrap();
}
#[test]
fn writevec() {
async fn go<F: AsRawFd + Unpin>(async_source: Box<dyn IoSourceExt<F>>) {
let v = vec![0x55u8; 32];
let v_ptr = v.as_ptr();
let ret = async_source.write_from_vec(0, v).await.unwrap();
assert_eq!(ret.0, 32);
let ret_v = ret.1;
assert_eq!(v_ptr, ret_v.as_ptr());
}
let f = OpenOptions::new().write(true).open("/dev/null").unwrap();
let uring_source = new_uring(f).unwrap();
let fut = go(uring_source);
pin_mut!(fut);
crate::uring_executor::URingExecutor::new(crate::RunOne::new(fut))
.unwrap()
.run()
.unwrap();
let f = OpenOptions::new().write(true).open("/dev/null").unwrap();
let poll_source = new_poll(f).unwrap();
let fut = go(poll_source);
pin_mut!(fut);
crate::fd_executor::FdExecutor::new(crate::RunOne::new(fut))
.unwrap()
.run()
.unwrap();
}
#[test]
fn readmem() {
async fn go<F: AsRawFd + Unpin>(async_source: Box<dyn IoSourceExt<F>>) {
let mem = Rc::new(VecIoWrapper::from(vec![0x55u8; 8192]));
let ret = async_source
.read_to_mem(
0,
Rc::<VecIoWrapper>::clone(&mem),
&[
MemRegion { offset: 0, len: 32 },
MemRegion {
offset: 200,
len: 56,
},
],
)
.await
.unwrap();
assert_eq!(ret, 32 + 56);
let vec: Vec<u8> = match Rc::try_unwrap(mem) {
Ok(v) => v.into(),
Err(_) => panic!("Too many vec refs"),
};
assert!(vec.iter().take(32).all(|&b| b == 0));
assert!(vec.iter().skip(32).take(168).all(|&b| b == 0x55));
assert!(vec.iter().skip(200).take(56).all(|&b| b == 0));
assert!(vec.iter().skip(256).all(|&b| b == 0x55));
}
let f = File::open("/dev/zero").unwrap();
let uring_source = new_uring(f).unwrap();
let fut = go(uring_source);
pin_mut!(fut);
crate::uring_executor::URingExecutor::new(crate::RunOne::new(fut))
.unwrap()
.run()
.unwrap();
let f = File::open("/dev/zero").unwrap();
let poll_source = new_poll(f).unwrap();
let fut = go(poll_source);
pin_mut!(fut);
crate::fd_executor::FdExecutor::new(crate::RunOne::new(fut))
.unwrap()
.run()
.unwrap();
}
#[test]
fn writemem() {
async fn go<F: AsRawFd + Unpin>(async_source: Box<dyn IoSourceExt<F>>) {
let mem = Rc::new(VecIoWrapper::from(vec![0x55u8; 8192]));
let ret = async_source
.write_from_mem(
0,
Rc::<VecIoWrapper>::clone(&mem),
&[MemRegion { offset: 0, len: 32 }],
)
.await
.unwrap();
assert_eq!(ret, 32);
}
let f = OpenOptions::new().write(true).open("/dev/null").unwrap();
let uring_source = new_uring(f).unwrap();
let fut = go(uring_source);
pin_mut!(fut);
crate::uring_executor::URingExecutor::new(crate::RunOne::new(fut))
.unwrap()
.run()
.unwrap();
let f = OpenOptions::new().write(true).open("/dev/null").unwrap();
let poll_source = new_poll(f).unwrap();
let fut = go(poll_source);
pin_mut!(fut);
crate::fd_executor::FdExecutor::new(crate::RunOne::new(fut))
.unwrap()
.run()
.unwrap();
}
#[test]
fn read_u64s() {
async fn go<F: AsRawFd + Unpin>(async_source: F) -> u64 {
let source = new(async_source).unwrap();
source.read_u64().await.unwrap()
}
let f = File::open("/dev/zero").unwrap();
let fut = go(f);
pin_mut!(fut);
let val = crate::uring_executor::URingExecutor::new(crate::RunOne::new(fut))
.unwrap()
.run()
.unwrap();
assert_eq!(val, 0);
}
#[test]
fn read_eventfds() {
use base::EventFd;
async fn go<F: AsRawFd + Unpin>(source: Box<dyn IoSourceExt<F>>) -> u64 {
source.read_u64().await.unwrap()
}
let eventfd = EventFd::new().unwrap();
eventfd.write(0x55).unwrap();
let fut = go(new(eventfd).unwrap());
pin_mut!(fut);
let val = crate::uring_executor::URingExecutor::new(crate::RunOne::new(fut))
.unwrap()
.run()
.unwrap();
assert_eq!(val, 0x55);
let eventfd = EventFd::new().unwrap();
eventfd.write(0xaa).unwrap();
let fut = go(new_poll(eventfd).unwrap());
pin_mut!(fut);
let val = crate::fd_executor::FdExecutor::new(crate::RunOne::new(fut))
.unwrap()
.run()
.unwrap();
assert_eq!(val, 0xaa);
}
#[test]
fn fsync() {
async fn go<F: AsRawFd + Unpin>(source: Box<dyn IoSourceExt<F>>) {
let v = vec![0x55u8; 32];
let v_ptr = v.as_ptr();
let ret = source.write_from_vec(0, v).await.unwrap();
assert_eq!(ret.0, 32);
let ret_v = ret.1;
assert_eq!(v_ptr, ret_v.as_ptr());
source.fsync().await.unwrap();
}
let f = tempfile::tempfile().unwrap();
let source = new(f).unwrap();
let fut = go(source);
pin_mut!(fut);
crate::run_one(fut).unwrap();
}
}
| 33.30814 | 106 | 0.54739 |
efeac7810d7984422e878a939751370c4ff2f929 | 1,092 | //! # Elektra
//! Safe bindings for libelektra.
//!
//! See the [project's readme](https://master.libelektra.org/src/bindings/rust) for an introduction and examples.
extern crate bitflags;
extern crate elektra_sys;
/// `StringKey` and `BinaryKey` are the essential structs that encapsulate name, value and metainfo.
pub mod key;
/// `KeyBuilder` can easily build keys with many meta values.
pub mod keybuilder;
/// Trait to read values from a key.
pub mod readable;
/// A wrapper Trait to make keys readonly.
pub mod readonly;
/// Trait to write values to a key.
pub mod writable;
/// `KeySet` is a set of keys.
pub mod keyset;
/// General methods to access the Key database.
pub mod kdb;
pub use self::key::{BinaryKey, StringKey, MetaIter, NameIter, KeyNameInvalidError, KeyNameReadOnlyError, KeyNotFoundError};
pub use self::keybuilder::KeyBuilder;
pub use self::readable::ReadableKey;
pub use self::readonly::ReadOnly;
pub use self::writable::WriteableKey;
pub use self::keyset::{KeySet, ReadOnlyStringKeyIter, StringKeyIter, Cursor, LookupOption};
pub use self::kdb::{KDB, KDBError};
| 35.225806 | 123 | 0.751832 |
9b52a47d6c2830b3e90af1e85b94c7e716c7be6e | 1,052 | use std::fs::File;
use std::io::Read;
use std::path::Path;
use encoding::{DecoderTrap, Encoding};
use crate::error::LinderaErrorKind;
use crate::LinderaResult;
pub fn read_file(filename: &Path) -> LinderaResult<Vec<u8>> {
let mut input_read = File::open(filename)
.map_err(|err| LinderaErrorKind::Io.with_error(anyhow::anyhow!(err)))?;
let mut buffer = Vec::new();
input_read
.read_to_end(&mut buffer)
.map_err(|err| LinderaErrorKind::Io.with_error(anyhow::anyhow!(err)))?;
Ok(buffer)
}
pub fn read_euc_file(filename: &Path) -> LinderaResult<String> {
let buffer = read_file(filename)?;
encoding::all::EUC_JP
.decode(&buffer, DecoderTrap::Strict)
.map_err(|err| LinderaErrorKind::Decode.with_error(anyhow::anyhow!(err)))
}
pub fn read_utf8_file(filename: &Path) -> LinderaResult<String> {
let buffer = read_file(filename)?;
encoding::all::UTF_8
.decode(&buffer, DecoderTrap::Strict)
.map_err(|err| LinderaErrorKind::Decode.with_error(anyhow::anyhow!(err)))
}
| 31.878788 | 81 | 0.676806 |
db20efc47566aec3e05e4e9e34b74627f8b30ae4 | 3,828 | // Copyright 2018-2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Test utilities
#![cfg(test)]
use primitives::BuildStorage;
use primitives::{traits::{IdentityLookup}, testing::{Digest, DigestItem, Header}};
use substrate_primitives::{H256, Blake2Hasher};
use runtime_io;
use srml_support::impl_outer_origin;
use crate::{GenesisConfig, Module, Trait};
impl_outer_origin!{
pub enum Origin for Runtime {}
}
// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Runtime;
impl system::Trait for Runtime {
type Origin = Origin;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = ::primitives::traits::BlakeTwo256;
type Digest = Digest;
type AccountId = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = ();
type Log = DigestItem;
}
impl Trait for Runtime {
type Balance = u64;
type OnFreeBalanceZero = ();
type OnNewAccount = ();
type Event = ();
type TransactionPayment = ();
type DustRemoval = ();
type TransferPayment = ();
}
pub struct ExtBuilder {
transaction_base_fee: u64,
transaction_byte_fee: u64,
existential_deposit: u64,
transfer_fee: u64,
creation_fee: u64,
monied: bool,
vesting: bool,
}
impl Default for ExtBuilder {
fn default() -> Self {
Self {
transaction_base_fee: 0,
transaction_byte_fee: 0,
existential_deposit: 0,
transfer_fee: 0,
creation_fee: 0,
monied: false,
vesting: false,
}
}
}
impl ExtBuilder {
pub fn existential_deposit(mut self, existential_deposit: u64) -> Self {
self.existential_deposit = existential_deposit;
self
}
#[allow(dead_code)]
pub fn transfer_fee(mut self, transfer_fee: u64) -> Self {
self.transfer_fee = transfer_fee;
self
}
pub fn creation_fee(mut self, creation_fee: u64) -> Self {
self.creation_fee = creation_fee;
self
}
pub fn transaction_fees(mut self, base_fee: u64, byte_fee: u64) -> Self {
self.transaction_base_fee = base_fee;
self.transaction_byte_fee = byte_fee;
self
}
pub fn monied(mut self, monied: bool) -> Self {
self.monied = monied;
if self.existential_deposit == 0 {
self.existential_deposit = 1;
}
self
}
pub fn vesting(mut self, vesting: bool) -> Self {
self.vesting = vesting;
self
}
pub fn build(self) -> runtime_io::TestExternalities<Blake2Hasher> {
let mut t = system::GenesisConfig::<Runtime>::default().build_storage().unwrap().0;
t.extend(GenesisConfig::<Runtime> {
transaction_base_fee: self.transaction_base_fee,
transaction_byte_fee: self.transaction_byte_fee,
balances: if self.monied {
vec![(1, 10 * self.existential_deposit), (2, 20 * self.existential_deposit), (3, 30 * self.existential_deposit), (4, 40 * self.existential_deposit)]
} else {
vec![]
},
existential_deposit: self.existential_deposit,
transfer_fee: self.transfer_fee,
creation_fee: self.creation_fee,
vesting: if self.vesting && self.monied {
vec![(1, 0, 10), (2, 10, 20)]
} else {
vec![]
},
}.build_storage().unwrap().0);
t.into()
}
}
pub type System = system::Module<Runtime>;
pub type Balances = Module<Runtime>;
| 28.355556 | 152 | 0.712905 |
50fd68f291a6649e750b2a773cdef891b575bd62 | 1,950 | //! VIR (Verifier Intermediate Representation) is an abstract syntax that represents
//! the elements of Rust code relevant to verification.
//! Compared to the original Rust code, VIR focuses on values,
//! rather than on how values are stored in memory.
//! For example, Box, Rc, and Arc are irrelevant to VIR and are not present in VIR.
//! We rely on Rust for type checking and lifetime checking -- VIR does not
//! attempt to replicate these (although it does do "mode" checking to check correct
//! usage of the 3 modes: exec, proof, and spec).
//!
//! The vir crate defines both the abstract syntax and the transformations from
//! the abstract syntax into the AIR verification format, which is then
//! verified by the Z3 SMT solver:
//!
//! Rust-AST --> Rust-HIR --> VIR --> AIR --> Z3-SMT
//!
//! VIR actually consists of two distinct abstract syntax trees, VIR-AST and VIR-SST:
//!
//! Rust-AST --> Rust-HIR --> VIR-AST --> VIR-SST --> AIR --> Z3-SMT
//!
//! VIR-AST keeps the original tree structure of mutually nested Rust HIR expressions and statements.
//! (Note: we chose to translate Rust-HIR --> VIR-AST rather than Rust-MIR --> VIR-AST to preserve
//! this tree structure as much as possible.)
//! VIR-SST, on the other hand, disallows statements inside expressions and disallows side
//! effects inside expressions (though it otherwise allows arbitrarily complex nested expressions).
//! The generated AIR code closely follows the structure of the VIR-SST code.
//!
//! To ensure that VIR stays simple and easy to use, the vir crate does not depend on rustc.
#![feature(or_patterns)]
pub mod ast;
mod ast_to_sst;
pub mod ast_util;
mod ast_visitor;
pub mod context;
pub mod datatype_to_air;
pub mod def;
pub mod func_to_air;
pub mod headers;
pub mod model;
pub mod modes;
mod prelude;
mod recursion;
mod scc;
mod sst;
mod sst_to_air;
mod sst_vars;
mod sst_visitor;
mod triggers;
mod triggers_auto;
mod util;
pub mod well_formed;
| 36.792453 | 101 | 0.733846 |
d9c177a4ef5d79cc2586c7e6251c294cf36d5a3d | 453 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
//
// A test that uses built-in contracts for the Vec struct.
#[macro_use]
extern crate mirai_annotations;
pub fn main() {
let mut v: Vec<i32> = Vec::new();
verify!(v.len() == 0);
let old_len = v.len();
v.push(0);
verify!(v.len() == old_len + 1);
}
| 23.842105 | 66 | 0.644592 |
64356862d2addf1e6e18673acb94425556a42a90 | 2,486 | use crate::{print, println, serial_print, serial_println};
use core::fmt;
static CONSOLE_LOG_LEVEL: spin::RwLock<Level> = spin::RwLock::new(Level::Warn);
static SERIAL_LOG_LEVEL: spin::RwLock<Level> = spin::RwLock::new(Level::Info);
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub(crate) enum Level {
Error,
Warn,
Info,
Debug,
Trace,
}
impl fmt::Display for Level {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match self {
Level::Error => "ERROR",
Level::Warn => "WARN",
Level::Info => "INFO",
Level::Debug => "DEBUG",
Level::Trace => "TRACE",
};
write!(f, "{}", s)
}
}
pub(crate) fn set_level(console_level: Level, serial_level: Level) {
*CONSOLE_LOG_LEVEL.write() = console_level;
*SERIAL_LOG_LEVEL.write() = serial_level;
}
#[doc(hidden)]
pub(crate) fn _log(
level: Level,
args: fmt::Arguments,
file: &str,
line: u32,
cont_line: bool,
newline: bool,
) {
if level <= *SERIAL_LOG_LEVEL.read() {
match (cont_line, newline) {
(true, true) => serial_println!("{}", args),
(true, false) => serial_print!("{}", args),
(false, true) => serial_println!("[{}] {}:{} {}", level, file, line, args),
(false, false) => serial_print!("[{}] {}:{} {}", level, file, line, args),
}
}
if level <= *CONSOLE_LOG_LEVEL.read() {
match (cont_line, newline) {
(true, true) => println!("{}", args),
(true, false) => print!("{}", args),
(false, true) => println!("[{}] {}", level, args),
(false, false) => print!("[{}] {}", level, args),
}
}
}
#[macro_export]
macro_rules! log {
($level:expr, $($arg:tt)*) => {
$crate::log::_log($level, format_args!($($arg)*), file!(), line!(), false, true);
}
}
#[macro_export]
macro_rules! error {
($($arg:tt)*) => ($crate::log!($crate::log::Level::Error, $($arg)*));
}
#[macro_export]
macro_rules! warn {
($($arg:tt)*) => ($crate::log!($crate::log::Level::Warn, $($arg)*));
}
#[macro_export]
macro_rules! info {
($($arg:tt)*) => ($crate::log!($crate::log::Level::Info, $($arg)*));
}
#[macro_export]
macro_rules! debug {
($($arg:tt)*) => ($crate::log!($crate::log::Level::Debug, $($arg)*));
}
#[macro_export]
macro_rules! trace {
($($arg:tt)*) => ($crate::log!($crate::log::Level::Trace, $($arg)*));
}
| 28.25 | 89 | 0.53218 |
4bfa4ec25a6b8ec9cfb18d353182082899a7a047 | 613 | /*
* Python SDK for Opsgenie REST API
*
* Python SDK for Opsgenie REST API
*
* The version of the OpenAPI document: 2.0.0
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateAlertDescriptionPayload {
/// Description of the alert
#[serde(rename = "description")]
pub description: String,
}
impl UpdateAlertDescriptionPayload {
pub fn new(description: String) -> UpdateAlertDescriptionPayload {
UpdateAlertDescriptionPayload {
description,
}
}
}
| 20.433333 | 70 | 0.686786 |
75453d97c930600035bd11d2d053d00f1adbd42d | 387 | // primitive_types3.rs
// Create an array with at least 100 elements in it where the ??? is.
// Execute `rustlings hint primitive_types3` for hints!
fn main() {
// let a: [i32; 100] = [0; 100];
let a: [i32; 99] = [0; 99];
if a.len() >= 100 {
println!("Wow, that's a big array!");
} else {
println!("Meh, I eat arrays like that for breakfast.");
}
}
| 25.8 | 69 | 0.568475 |
9cadd483d5ad078429aa1c0f615273b26bd64cdc | 7,290 | //
// WASM-4: https://wasm4.org/docs
#![allow(unused)]
// ┌───────────────────────────────────────────────────────────────────────────┐
// │ │
// │ Platform Constants │
// │ │
// └───────────────────────────────────────────────────────────────────────────┘
pub const SCREEN_SIZE: i32 = 160;
// ┌───────────────────────────────────────────────────────────────────────────┐
// │ │
// │ Memory Addresses │
// │ │
// └───────────────────────────────────────────────────────────────────────────┘
pub static mut PALETTE: *mut [u32; 4] = 0x04 as *mut [u32; 4];
pub const DRAW_COLORS: *mut u16 = 0x14 as *mut u16;
pub const GAMEPAD1: *const u8 = 0x16 as *const u8;
pub const GAMEPAD2: *const u8 = 0x17 as *const u8;
pub const GAMEPAD3: *const u8 = 0x18 as *const u8;
pub const GAMEPAD4: *const u8 = 0x19 as *const u8;
pub const MOUSE_X: *const i16 = 0x1a as *const i16;
pub const MOUSE_Y: *const i16 = 0x1c as *const i16;
pub const MOUSE_BUTTONS: *const u8 = 0x1e as *const u8;
pub const SYSTEM_FLAGS: *mut u8 = 0x1f as *mut u8;
pub static mut FRAMEBUFFER: *mut [u8; 6400] = 0xa0 as *mut [u8; 6400];
pub const BUTTON_1: u8 = 1;
pub const BUTTON_2: u8 = 2;
pub const BUTTON_LEFT: u8 = 16;
pub const BUTTON_RIGHT: u8 = 32;
pub const BUTTON_UP: u8 = 64;
pub const BUTTON_DOWN: u8 = 128;
pub const MOUSE_LEFT: u8 = 1;
pub const MOUSE_RIGHT: u8 = 2;
pub const MOUSE_MIDDLE: u8 = 4;
pub const SYSTEM_PRESERVE_FRAMEBUFFER: u8 = 1;
pub const SYSTEM_HIDE_GAMEPAD_OVERLAY: u8 = 2;
// ┌───────────────────────────────────────────────────────────────────────────┐
// │ │
// │ Drawing Functions │
// │ │
// └───────────────────────────────────────────────────────────────────────────┘
/// Copies pixels to the framebuffer.
pub fn blit(sprite: &[u8], x: i32, y: i32, width: u32, height: u32, flags: u32) {
unsafe { extern_blit(sprite.as_ptr(), x, y, width, height, flags) }
}
extern "C" {
#[link_name = "blit"]
fn extern_blit(sprite: *const u8, x: i32, y: i32, width: u32, height: u32, flags: u32);
}
/// Copies a subregion within a larger sprite atlas to the framebuffer.
#[allow(clippy::too_many_arguments)]
pub fn blit_sub(
sprite: &[u8],
x: i32,
y: i32,
width: u32,
height: u32,
src_x: u32,
src_y: u32,
stride: u32,
flags: u32,
) {
unsafe {
extern_blit_sub(
sprite.as_ptr(),
x,
y,
width,
height,
src_x,
src_y,
stride,
flags,
)
}
}
extern "C" {
#[link_name = "blitSub"]
fn extern_blit_sub(
sprite: *const u8,
x: i32,
y: i32,
width: u32,
height: u32,
src_x: u32,
src_y: u32,
stride: u32,
flags: u32,
);
}
pub const BLIT_2BPP: u32 = 1;
pub const BLIT_1BPP: u32 = 0;
pub const BLIT_FLIP_X: u32 = 2;
pub const BLIT_FLIP_Y: u32 = 4;
pub const BLIT_ROTATE: u32 = 8;
/// Draws a line between two points.
pub fn line(x1: i32, y1: i32, x2: i32, y2: i32) {
unsafe { extern_line(x1, y1, x2, y2) }
}
extern "C" {
#[link_name = "line"]
fn extern_line(x1: i32, y1: i32, x2: i32, y2: i32);
}
/// Draws an oval (or circle).
pub fn oval(x: i32, y: i32, width: u32, height: u32) {
unsafe { extern_oval(x, y, width, height) }
}
extern "C" {
#[link_name = "oval"]
fn extern_oval(x: i32, y: i32, width: u32, height: u32);
}
/// Draws a rectangle.
pub fn rect(x: i32, y: i32, width: u32, height: u32) {
unsafe { extern_rect(x, y, width, height) }
}
extern "C" {
#[link_name = "rect"]
fn extern_rect(x: i32, y: i32, width: u32, height: u32);
}
/// Draws text using the built-in system font.
pub fn text<T: AsRef<str>>(text: T, x: i32, y: i32) {
let text_ref = text.as_ref();
unsafe { extern_text(text_ref.as_ptr(), text_ref.len(), x, y) }
}
extern "C" {
#[link_name = "textUtf8"]
fn extern_text(text: *const u8, length: usize, x: i32, y: i32);
}
/// Draws a vertical line
pub fn vline(x: i32, y: i32, len: u32) {
unsafe {
extern_vline(x, y, len);
}
}
extern "C" {
#[link_name = "vline"]
fn extern_vline(x: i32, y: i32, len: u32);
}
/// Draws a horizontal line
pub fn hline(x: i32, y: i32, len: u32) {
unsafe {
extern_hline(x, y, len);
}
}
extern "C" {
#[link_name = "hline"]
fn extern_hline(x: i32, y: i32, len: u32);
}
// ┌───────────────────────────────────────────────────────────────────────────┐
// │ │
// │ Sound Functions │
// │ │
// └───────────────────────────────────────────────────────────────────────────┘
/// Plays a sound tone.
pub fn tone(frequency: u32, duration: u32, volume: u32, flags: u32) {
unsafe { extern_tone(frequency, duration, volume, flags) }
}
extern "C" {
#[link_name = "tone"]
fn extern_tone(frequency: u32, duration: u32, volume: u32, flags: u32);
}
pub const TONE_PULSE1: u32 = 0;
pub const TONE_PULSE2: u32 = 1;
pub const TONE_TRIANGLE: u32 = 2;
pub const TONE_NOISE: u32 = 3;
pub const TONE_MODE1: u32 = 0;
pub const TONE_MODE2: u32 = 4;
pub const TONE_MODE3: u32 = 8;
pub const TONE_MODE4: u32 = 12;
// ┌───────────────────────────────────────────────────────────────────────────┐
// │ │
// │ Storage Functions │
// │ │
// └───────────────────────────────────────────────────────────────────────────┘
extern "C" {
/// Reads up to `size` bytes from persistent storage into the pointer `dest`.
pub fn diskr(dest: *mut u8, size: u32) -> u32;
/// Writes up to `size` bytes from the pointer `src` into persistent storage.
pub fn diskw(src: *const u8, size: u32) -> u32;
}
// ┌───────────────────────────────────────────────────────────────────────────┐
// │ │
// │ Other Functions │
// │ │
// └───────────────────────────────────────────────────────────────────────────┘
/// Prints a message to the debug console.
pub fn trace<T: AsRef<str>>(text: T) {
let text_ref = text.as_ref();
unsafe { extern_trace(text_ref.as_ptr(), text_ref.len()) }
}
extern "C" {
#[link_name = "traceUtf8"]
fn extern_trace(trace: *const u8, length: usize);
}
| 32.690583 | 91 | 0.419753 |
48e6cae706e845f28b92bce3b5ae0233a158ba41 | 1,404 | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[deriving(PartialEq, Eq, PartialOrd, Ord)]
enum E<T> {
E0,
E1(T),
E2(T,T)
}
pub fn main() {
let e0 = E0;
let e11 = E1(1);
let e12 = E1(2);
let e21 = E2(1, 1);
let e22 = E2(1, 2);
// in order for both PartialOrd and Ord
let es = [e0, e11, e12, e21, e22];
for (i, e1) in es.iter().enumerate() {
for (j, e2) in es.iter().enumerate() {
let ord = i.cmp(&j);
let eq = i == j;
let lt = i < j;
let le = i <= j;
let gt = i > j;
let ge = i >= j;
// PartialEq
assert_eq!(*e1 == *e2, eq);
assert_eq!(*e1 != *e2, !eq);
// PartialOrd
assert_eq!(*e1 < *e2, lt);
assert_eq!(*e1 > *e2, gt);
assert_eq!(*e1 <= *e2, le);
assert_eq!(*e1 >= *e2, ge);
// Ord
assert_eq!(e1.cmp(e2), ord);
}
}
}
| 26 | 68 | 0.513533 |
dbbcc66c48720e7c4dc7ae80e45540f857556314 | 17,754 | use llvm_sys::core::{LLVMIsAInstruction, LLVMTypeOf, LLVMGetTypeKind};
use llvm_sys::LLVMTypeKind;
use llvm_sys::prelude::LLVMValueRef;
use crate::types::{AnyTypeEnum, BasicTypeEnum};
use crate::values::traits::AsValueRef;
use crate::values::{IntValue, FunctionValue, PointerValue, VectorValue, ArrayValue, StructValue, FloatValue, PhiValue, InstructionValue, MetadataValue};
use std::convert::TryFrom;
macro_rules! enum_value_set {
($enum_name:ident: $($args:ident),*) => (
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum $enum_name<'ctx> {
$(
$args($args<'ctx>),
)*
}
impl AsValueRef for $enum_name<'_> {
fn as_value_ref(&self) -> LLVMValueRef {
match *self {
$(
$enum_name::$args(ref t) => t.as_value_ref(),
)*
}
}
}
$(
impl<'ctx> From<$args<'ctx>> for $enum_name<'ctx> {
fn from(value: $args) -> $enum_name {
$enum_name::$args(value)
}
}
impl<'ctx> PartialEq<$args<'ctx>> for $enum_name<'ctx> {
fn eq(&self, other: &$args<'ctx>) -> bool {
self.as_value_ref() == other.as_value_ref()
}
}
impl<'ctx> PartialEq<$enum_name<'ctx>> for $args<'ctx> {
fn eq(&self, other: &$enum_name<'ctx>) -> bool {
self.as_value_ref() == other.as_value_ref()
}
}
impl<'ctx> TryFrom<$enum_name<'ctx>> for $args<'ctx> {
type Error = ();
fn try_from(value: $enum_name<'ctx>) -> Result<Self, Self::Error> {
match value {
$enum_name::$args(ty) => Ok(ty),
_ => Err(()),
}
}
}
)*
);
}
enum_value_set! {AggregateValueEnum: ArrayValue, StructValue}
enum_value_set! {AnyValueEnum: ArrayValue, IntValue, FloatValue, PhiValue, FunctionValue, PointerValue, StructValue, VectorValue, InstructionValue}
enum_value_set! {BasicValueEnum: ArrayValue, IntValue, FloatValue, PointerValue, StructValue, VectorValue}
enum_value_set! {BasicMetadataValueEnum: ArrayValue, IntValue, FloatValue, PointerValue, StructValue, VectorValue, MetadataValue}
impl<'ctx> AnyValueEnum<'ctx> {
pub(crate) fn new(value: LLVMValueRef) -> Self {
let type_kind = unsafe {
LLVMGetTypeKind(LLVMTypeOf(value))
};
match type_kind {
LLVMTypeKind::LLVMFloatTypeKind |
LLVMTypeKind::LLVMFP128TypeKind |
LLVMTypeKind::LLVMDoubleTypeKind |
LLVMTypeKind::LLVMHalfTypeKind |
LLVMTypeKind::LLVMX86_FP80TypeKind |
LLVMTypeKind::LLVMPPC_FP128TypeKind => AnyValueEnum::FloatValue(FloatValue::new(value)),
LLVMTypeKind::LLVMIntegerTypeKind => AnyValueEnum::IntValue(IntValue::new(value)),
LLVMTypeKind::LLVMStructTypeKind => AnyValueEnum::StructValue(StructValue::new(value)),
LLVMTypeKind::LLVMPointerTypeKind => AnyValueEnum::PointerValue(PointerValue::new(value)),
LLVMTypeKind::LLVMArrayTypeKind => AnyValueEnum::ArrayValue(ArrayValue::new(value)),
LLVMTypeKind::LLVMVectorTypeKind => AnyValueEnum::VectorValue(VectorValue::new(value)),
LLVMTypeKind::LLVMFunctionTypeKind => AnyValueEnum::FunctionValue(FunctionValue::new(value).unwrap()),
LLVMTypeKind::LLVMVoidTypeKind => {
if unsafe { LLVMIsAInstruction(value) }.is_null() {
panic!("Void value isn't an instruction.");
}
AnyValueEnum::InstructionValue(InstructionValue::new(value))
},
LLVMTypeKind::LLVMMetadataTypeKind => panic!("Metadata values are not supported as AnyValue's."),
_ => panic!("The given type is not supported.")
}
}
pub fn get_type(&self) -> AnyTypeEnum<'ctx> {
let type_ = unsafe {
LLVMTypeOf(self.as_value_ref())
};
AnyTypeEnum::new(type_)
}
pub fn is_array_value(self) -> bool {
if let AnyValueEnum::ArrayValue(_) = self {
true
} else {
false
}
}
pub fn is_int_value(self) -> bool {
if let AnyValueEnum::IntValue(_) = self {
true
} else {
false
}
}
pub fn is_float_value(self) -> bool {
if let AnyValueEnum::FloatValue(_) = self {
true
} else {
false
}
}
pub fn is_phi_value(self) -> bool {
if let AnyValueEnum::PhiValue(_) = self {
true
} else {
false
}
}
pub fn is_function_value(self) -> bool {
if let AnyValueEnum::FunctionValue(_) = self {
true
} else {
false
}
}
pub fn is_pointer_value(self) -> bool {
if let AnyValueEnum::PointerValue(_) = self {
true
} else {
false
}
}
pub fn is_struct_value(self) -> bool {
if let AnyValueEnum::StructValue(_) = self {
true
} else {
false
}
}
pub fn is_vector_value(self) -> bool {
if let AnyValueEnum::VectorValue(_) = self {
true
} else {
false
}
}
pub fn is_instruction_value(self) -> bool {
if let AnyValueEnum::InstructionValue(_) = self {
true
} else {
false
}
}
pub fn into_array_value(self) -> ArrayValue<'ctx> {
if let AnyValueEnum::ArrayValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_int_value(self) -> IntValue<'ctx> {
if let AnyValueEnum::IntValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_float_value(self) -> FloatValue<'ctx> {
if let AnyValueEnum::FloatValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_phi_value(self) -> PhiValue<'ctx> {
if let AnyValueEnum::PhiValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_function_value(self) -> FunctionValue<'ctx> {
if let AnyValueEnum::FunctionValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_pointer_value(self) -> PointerValue<'ctx> {
if let AnyValueEnum::PointerValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_struct_value(self) -> StructValue<'ctx> {
if let AnyValueEnum::StructValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_vector_value(self) -> VectorValue<'ctx> {
if let AnyValueEnum::VectorValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_instruction_value(self) -> InstructionValue<'ctx> {
if let AnyValueEnum::InstructionValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
}
impl<'ctx> BasicValueEnum<'ctx> {
pub(crate) fn new(value: LLVMValueRef) -> Self {
let type_kind = unsafe {
LLVMGetTypeKind(LLVMTypeOf(value))
};
match type_kind {
LLVMTypeKind::LLVMFloatTypeKind |
LLVMTypeKind::LLVMFP128TypeKind |
LLVMTypeKind::LLVMDoubleTypeKind |
LLVMTypeKind::LLVMHalfTypeKind |
LLVMTypeKind::LLVMX86_FP80TypeKind |
LLVMTypeKind::LLVMPPC_FP128TypeKind => BasicValueEnum::FloatValue(FloatValue::new(value)),
LLVMTypeKind::LLVMIntegerTypeKind => BasicValueEnum::IntValue(IntValue::new(value)),
LLVMTypeKind::LLVMStructTypeKind => BasicValueEnum::StructValue(StructValue::new(value)),
LLVMTypeKind::LLVMPointerTypeKind => BasicValueEnum::PointerValue(PointerValue::new(value)),
LLVMTypeKind::LLVMArrayTypeKind => BasicValueEnum::ArrayValue(ArrayValue::new(value)),
LLVMTypeKind::LLVMVectorTypeKind => BasicValueEnum::VectorValue(VectorValue::new(value)),
_ => unreachable!("The given type is not a basic type."),
}
}
pub fn get_type(&self) -> BasicTypeEnum<'ctx> {
let type_ = unsafe {
LLVMTypeOf(self.as_value_ref())
};
BasicTypeEnum::new(type_)
}
pub fn is_array_value(self) -> bool {
if let BasicValueEnum::ArrayValue(_) = self {
true
} else {
false
}
}
pub fn is_int_value(self) -> bool {
if let BasicValueEnum::IntValue(_) = self {
true
} else {
false
}
}
pub fn is_float_value(self) -> bool {
if let BasicValueEnum::FloatValue(_) = self {
true
} else {
false
}
}
pub fn is_pointer_value(self) -> bool {
if let BasicValueEnum::PointerValue(_) = self {
true
} else {
false
}
}
pub fn is_struct_value(self) -> bool {
if let BasicValueEnum::StructValue(_) = self {
true
} else {
false
}
}
pub fn is_vector_value(self) -> bool {
if let BasicValueEnum::VectorValue(_) = self {
true
} else {
false
}
}
pub fn into_array_value(self) -> ArrayValue<'ctx> {
if let BasicValueEnum::ArrayValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_int_value(self) -> IntValue<'ctx> {
if let BasicValueEnum::IntValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_float_value(self) -> FloatValue<'ctx> {
if let BasicValueEnum::FloatValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_pointer_value(self) -> PointerValue<'ctx> {
if let BasicValueEnum::PointerValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_struct_value(self) -> StructValue<'ctx> {
if let BasicValueEnum::StructValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_vector_value(self) -> VectorValue<'ctx> {
if let BasicValueEnum::VectorValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
}
impl<'ctx> AggregateValueEnum<'ctx> {
pub(crate) fn new(value: LLVMValueRef) -> Self {
let type_kind = unsafe {
LLVMGetTypeKind(LLVMTypeOf(value))
};
match type_kind {
LLVMTypeKind::LLVMArrayTypeKind => AggregateValueEnum::ArrayValue(ArrayValue::new(value)),
LLVMTypeKind::LLVMStructTypeKind => AggregateValueEnum::StructValue(StructValue::new(value)),
_ => unreachable!("The given type is not an aggregate type."),
}
}
pub fn is_array_value(self) -> bool {
if let AggregateValueEnum::ArrayValue(_) = self {
true
} else {
false
}
}
pub fn is_struct_value(self) -> bool {
if let AggregateValueEnum::StructValue(_) = self {
true
} else {
false
}
}
pub fn into_array_value(self) -> ArrayValue<'ctx> {
if let AggregateValueEnum::ArrayValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_struct_value(self) -> StructValue<'ctx> {
if let AggregateValueEnum::StructValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
}
impl<'ctx> BasicMetadataValueEnum<'ctx> {
pub(crate) fn new(value: LLVMValueRef) -> Self {
let type_kind = unsafe {
LLVMGetTypeKind(LLVMTypeOf(value))
};
match type_kind {
LLVMTypeKind::LLVMFloatTypeKind |
LLVMTypeKind::LLVMFP128TypeKind |
LLVMTypeKind::LLVMDoubleTypeKind |
LLVMTypeKind::LLVMHalfTypeKind |
LLVMTypeKind::LLVMX86_FP80TypeKind |
LLVMTypeKind::LLVMPPC_FP128TypeKind => BasicMetadataValueEnum::FloatValue(FloatValue::new(value)),
LLVMTypeKind::LLVMIntegerTypeKind => BasicMetadataValueEnum::IntValue(IntValue::new(value)),
LLVMTypeKind::LLVMStructTypeKind => BasicMetadataValueEnum::StructValue(StructValue::new(value)),
LLVMTypeKind::LLVMPointerTypeKind => BasicMetadataValueEnum::PointerValue(PointerValue::new(value)),
LLVMTypeKind::LLVMArrayTypeKind => BasicMetadataValueEnum::ArrayValue(ArrayValue::new(value)),
LLVMTypeKind::LLVMVectorTypeKind => BasicMetadataValueEnum::VectorValue(VectorValue::new(value)),
LLVMTypeKind::LLVMMetadataTypeKind => BasicMetadataValueEnum::MetadataValue(MetadataValue::new(value)),
_ => unreachable!("Unsupported type"),
}
}
pub fn is_array_value(self) -> bool {
if let BasicMetadataValueEnum::ArrayValue(_) = self {
true
} else {
false
}
}
pub fn is_int_value(self) -> bool {
if let BasicMetadataValueEnum::IntValue(_) = self {
true
} else {
false
}
}
pub fn is_float_value(self) -> bool {
if let BasicMetadataValueEnum::FloatValue(_) = self {
true
} else {
false
}
}
pub fn is_pointer_value(self) -> bool {
if let BasicMetadataValueEnum::PointerValue(_) = self {
true
} else {
false
}
}
pub fn is_struct_value(self) -> bool {
if let BasicMetadataValueEnum::StructValue(_) = self {
true
} else {
false
}
}
pub fn is_vector_value(self) -> bool {
if let BasicMetadataValueEnum::VectorValue(_) = self {
true
} else {
false
}
}
pub fn is_metadata_value(self) -> bool {
if let BasicMetadataValueEnum::MetadataValue(_) = self {
true
} else {
false
}
}
pub fn into_array_value(self) -> ArrayValue<'ctx> {
if let BasicMetadataValueEnum::ArrayValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_int_value(self) -> IntValue<'ctx> {
if let BasicMetadataValueEnum::IntValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_float_value(self) -> FloatValue<'ctx> {
if let BasicMetadataValueEnum::FloatValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_pointer_value(self) -> PointerValue<'ctx> {
if let BasicMetadataValueEnum::PointerValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_struct_value(self) -> StructValue<'ctx> {
if let BasicMetadataValueEnum::StructValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_vector_value(self) -> VectorValue<'ctx> {
if let BasicMetadataValueEnum::VectorValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
pub fn into_metadata_value(self) -> MetadataValue<'ctx> {
if let BasicMetadataValueEnum::MetadataValue(v) = self {
v
} else {
panic!("Found {:?} but expected a different variant", self)
}
}
}
impl<'ctx> From<BasicValueEnum<'ctx>> for AnyValueEnum<'ctx> {
fn from(value: BasicValueEnum<'ctx>) -> Self {
AnyValueEnum::new(value.as_value_ref())
}
}
impl<'ctx> TryFrom<AnyValueEnum<'ctx>> for BasicValueEnum<'ctx> {
type Error = ();
fn try_from(value: AnyValueEnum<'ctx>) -> Result<Self, Self::Error> {
Ok(match value {
AnyValueEnum::ArrayValue(av) => av.into(),
AnyValueEnum::IntValue(iv) => iv.into(),
AnyValueEnum::FloatValue(fv) => fv.into(),
AnyValueEnum::PointerValue(pv) => pv.into(),
AnyValueEnum::StructValue(sv) => sv.into(),
AnyValueEnum::VectorValue(vv) => vv.into(),
_ => return Err(()),
})
}
}
| 30.557659 | 152 | 0.546525 |
336fd98fcdb143f9a1262c95e3c9a9f71985fd47 | 5,083 | mod name_partial {
mod valid {
use bstr::ByteSlice;
macro_rules! mktest {
($name:ident, $input:expr) => {
#[test]
fn $name() {
assert!(git_validate::reference::name_partial($input.as_bstr()).is_ok())
}
};
}
mktest!(refs_path, b"refs/heads/main");
mktest!(refs_path_with_file_extension, b"refs/heads/main.ext");
mktest!(refs_path_underscores_and_dashes, b"refs/heads/main-2nd_ext");
mktest!(relative_path, b"etc/foo");
mktest!(all_uppercase, b"MAIN");
mktest!(all_uppercase_with_underscore, b"NEW_HEAD");
mktest!(partial_name_lowercase, b"main");
mktest!(chinese_utf8, "heads/你好吗".as_bytes());
}
mod invalid {
use bstr::ByteSlice;
use git_validate::{reference::name::Error as RefError, tag::name::Error as TagError};
macro_rules! mktest {
($name:ident, $input:literal, $expected:pat) => {
#[test]
fn $name() {
match git_validate::reference::name_partial($input.as_bstr()) {
Err($expected) => {}
got => panic!("Wanted {}, got {:?}", stringify!($expected), got),
}
}
};
}
mktest!(
refs_path_double_dot,
b"refs/../somewhere",
RefError::Tag(TagError::DoubleDot)
);
mktest!(
refs_path_name_starts_with_dot,
b".refs/somewhere",
RefError::Tag(TagError::StartsWithDot)
);
mktest!(
refs_path_component_is_singular_dot,
b"refs/./still-inside-but-not-cool",
RefError::SingleDot
);
mktest!(any_path_starts_with_slash, b"/etc/foo", RefError::StartsWithSlash);
mktest!(empty_path, b"", RefError::Tag(TagError::Empty));
mktest!(refs_starts_with_slash, b"/refs/heads/main", RefError::StartsWithSlash);
mktest!(
ends_with_slash,
b"refs/heads/main/",
RefError::Tag(TagError::EndsWithSlash)
);
mktest!(
path_with_duplicate_slashes,
b"refs//heads/main",
RefError::RepeatedSlash
);
mktest!(
path_with_spaces,
b"refs//heads/name with spaces",
RefError::Tag(TagError::InvalidByte(_))
);
mktest!(
path_with_backslashes,
b"refs\\heads/name with spaces",
RefError::Tag(TagError::InvalidByte(_))
);
}
}
mod name {
mod valid {
use bstr::ByteSlice;
macro_rules! mktest {
($name:ident, $input:expr) => {
#[test]
fn $name() {
assert!(git_validate::refname($input.as_bstr()).is_ok())
}
};
}
mktest!(refs_path, b"refs/heads/main");
mktest!(refs_path_with_file_extension, b"refs/heads/main.ext");
mktest!(refs_path_underscores_and_dashes, b"refs/heads/main-2nd_ext");
mktest!(relative_path, b"etc/foo");
mktest!(all_uppercase, b"MAIN");
mktest!(all_uppercase_with_underscore, b"NEW_HEAD");
mktest!(chinese_utf8, "refs/heads/你好吗".as_bytes());
}
mod invalid {
use bstr::ByteSlice;
use git_validate::{reference::name::Error as RefError, tag::name::Error as TagError};
macro_rules! mktest {
($name:ident, $input:literal, $expected:pat) => {
#[test]
fn $name() {
match git_validate::reference::name($input.as_bstr()) {
Err($expected) => {}
got => panic!("Wanted {}, got {:?}", stringify!($expected), got),
}
}
};
}
mktest!(
refs_path_double_dot,
b"refs/../somewhere",
RefError::Tag(TagError::DoubleDot)
);
mktest!(
refs_path_name_starts_with_dot,
b".refs/somewhere",
RefError::Tag(TagError::StartsWithDot)
);
mktest!(
refs_path_component_is_singular_dot,
b"refs/./still-inside-but-not-cool",
RefError::SingleDot
);
mktest!(capitalized_name_without_path, b"Main", RefError::SomeLowercase);
mktest!(lowercase_name_without_path, b"main", RefError::SomeLowercase);
mktest!(any_path_starts_with_slash, b"/etc/foo", RefError::StartsWithSlash);
mktest!(empty_path, b"", RefError::Tag(TagError::Empty));
mktest!(refs_starts_with_slash, b"/refs/heads/main", RefError::StartsWithSlash);
mktest!(
ends_with_slash,
b"refs/heads/main/",
RefError::Tag(TagError::EndsWithSlash)
);
mktest!(
a_path_with_duplicate_slashes,
b"refs//heads/main",
RefError::RepeatedSlash
);
}
}
| 34.114094 | 93 | 0.526067 |
2978f315aa52bbb5cb6764065ad25b6901546111 | 1,682 | #![allow(unused_imports, dead_code)]
use crate::constancerc::get_runtime_configuration::abstraction::i_file_system::{
IFileSystem, RcFileExtension,
};
use crate::constancerc::get_runtime_configuration::concrete::file_system::FileSystem;
fn get_path(extension: &str) -> String {
format!("my/path/with/filename.{}", extension)
}
fn get_path_from_enum(extension: &RcFileExtension) -> String {
let ext = match extension {
RcFileExtension::Yaml => "yml",
RcFileExtension::Json => "json",
};
get_path(ext)
}
#[test]
#[should_panic]
pub fn get_extension_foreign_extension_given_should_panic() {
// arrange
let file_system = FileSystem {};
let expected = "txt";
let path = get_path(expected);
// act
file_system.get_extension(&path);
}
#[test]
#[should_panic]
pub fn get_extension_no_extension_given_should_panic() {
// arrange
let file_system = FileSystem {};
let expected = "";
let path = get_path(expected);
// act
file_system.get_extension(&path);
}
#[test]
pub fn get_extension_json_extension_given_returns_json() {
// arrange
let file_system = FileSystem {};
let expected = RcFileExtension::Json;
let path = get_path_from_enum(&expected);
// act
let actual = file_system.get_extension(&path);
// assert
assert_eq!(actual, expected);
}
#[test]
pub fn get_extension_yml_extension_given_returns_yml() {
// arrange
let file_system = FileSystem {};
let expected = RcFileExtension::Yaml;
let path = get_path_from_enum(&expected);
// act
let actual = file_system.get_extension(&path);
// assert
assert_eq!(actual, expected);
}
| 22.72973 | 85 | 0.68371 |
67cfeb3daeae13f28ebe910155e85476a13e69bb | 93,171 | //! Generate Rust bindings for C and C++ libraries.
//!
//! Provide a C/C++ header file, receive Rust FFI code to call into C/C++
//! functions and use types defined in the header.
//!
//! See the [`Builder`](./struct.Builder.html) struct for usage.
//!
//! See the [Users Guide](https://rust-lang.github.io/rust-bindgen/) for
//! additional documentation.
#![deny(missing_docs)]
#![deny(unused_extern_crates)]
// To avoid rather annoying warnings when matching with CXCursor_xxx as a
// constant.
#![allow(non_upper_case_globals)]
// `quote!` nests quite deeply.
#![recursion_limit = "128"]
#[macro_use]
extern crate bitflags;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate quote;
#[cfg(feature = "logging")]
#[macro_use]
extern crate log;
#[cfg(not(feature = "logging"))]
#[macro_use]
mod log_stubs;
#[macro_use]
mod extra_assertions;
// A macro to declare an internal module for which we *must* provide
// documentation for. If we are building with the "testing_only_docs" feature,
// then the module is declared public, and our `#![deny(missing_docs)]` pragma
// applies to it. This feature is used in CI, so we won't let anything slip by
// undocumented. Normal builds, however, will leave the module private, so that
// we don't expose internals to library consumers.
macro_rules! doc_mod {
($m:ident, $doc_mod_name:ident) => {
#[cfg(feature = "testing_only_docs")]
pub mod $doc_mod_name {
//! Autogenerated documentation module.
pub use super::$m::*;
}
};
}
mod clang;
mod codegen;
mod deps;
mod features;
mod ir;
mod parse;
mod regex_set;
mod time;
pub mod callbacks;
doc_mod!(clang, clang_docs);
doc_mod!(features, features_docs);
doc_mod!(ir, ir_docs);
doc_mod!(parse, parse_docs);
doc_mod!(regex_set, regex_set_docs);
pub use crate::codegen::{AliasVariation, EnumVariation, MacroTypeVariation};
use crate::features::RustFeatures;
pub use crate::features::{
RustTarget, LATEST_STABLE_RUST, RUST_TARGET_STRINGS,
};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::item::Item;
use crate::parse::{ClangItemParser, ParseError};
use crate::regex_set::RegexSet;
use std::borrow::Cow;
use std::fs::{File, OpenOptions};
use std::io::{self, Write};
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::{env, iter};
// Some convenient typedefs for a fast hash map and hash set.
type HashMap<K, V> = ::rustc_hash::FxHashMap<K, V>;
type HashSet<K> = ::rustc_hash::FxHashSet<K>;
pub(crate) use std::collections::hash_map::Entry;
/// Default prefix for the anon fields.
pub const DEFAULT_ANON_FIELDS_PREFIX: &'static str = "__bindgen_anon_";
fn file_is_cpp(name_file: &str) -> bool {
name_file.ends_with(".hpp") ||
name_file.ends_with(".hxx") ||
name_file.ends_with(".hh") ||
name_file.ends_with(".h++")
}
fn args_are_cpp(clang_args: &[String]) -> bool {
for w in clang_args.windows(2) {
if w[0] == "-xc++" || w[1] == "-xc++" {
return true;
}
if w[0] == "-x" && w[1] == "c++" {
return true;
}
if w[0] == "-include" && file_is_cpp(&w[1]) {
return true;
}
}
false
}
bitflags! {
/// A type used to indicate which kind of items we have to generate.
pub struct CodegenConfig: u32 {
/// Whether to generate functions.
const FUNCTIONS = 1 << 0;
/// Whether to generate types.
const TYPES = 1 << 1;
/// Whether to generate constants.
const VARS = 1 << 2;
/// Whether to generate methods.
const METHODS = 1 << 3;
/// Whether to generate constructors
const CONSTRUCTORS = 1 << 4;
/// Whether to generate destructors.
const DESTRUCTORS = 1 << 5;
}
}
impl CodegenConfig {
/// Returns true if functions should be generated.
pub fn functions(self) -> bool {
self.contains(CodegenConfig::FUNCTIONS)
}
/// Returns true if types should be generated.
pub fn types(self) -> bool {
self.contains(CodegenConfig::TYPES)
}
/// Returns true if constants should be generated.
pub fn vars(self) -> bool {
self.contains(CodegenConfig::VARS)
}
/// Returns true if methds should be generated.
pub fn methods(self) -> bool {
self.contains(CodegenConfig::METHODS)
}
/// Returns true if constructors should be generated.
pub fn constructors(self) -> bool {
self.contains(CodegenConfig::CONSTRUCTORS)
}
/// Returns true if destructors should be generated.
pub fn destructors(self) -> bool {
self.contains(CodegenConfig::DESTRUCTORS)
}
}
impl Default for CodegenConfig {
fn default() -> Self {
CodegenConfig::all()
}
}
/// Configure and generate Rust bindings for a C/C++ header.
///
/// This is the main entry point to the library.
///
/// ```ignore
/// use bindgen::builder;
///
/// // Configure and generate bindings.
/// let bindings = builder().header("path/to/input/header")
/// .allowlist_type("SomeCoolClass")
/// .allowlist_function("do_some_cool_thing")
/// .generate()?;
///
/// // Write the generated bindings to an output file.
/// bindings.write_to_file("path/to/output.rs")?;
/// ```
///
/// # Enums
///
/// Bindgen can map C/C++ enums into Rust in different ways. The way bindgen maps enums depends on
/// the pattern passed to several methods:
///
/// 1. [`constified_enum_module()`](#method.constified_enum_module)
/// 2. [`bitfield_enum()`](#method.bitfield_enum)
/// 3. [`newtype_enum()`](#method.newtype_enum)
/// 4. [`rustified_enum()`](#method.rustified_enum)
///
/// For each C enum, bindgen tries to match the pattern in the following order:
///
/// 1. Constified enum module
/// 2. Bitfield enum
/// 3. Newtype enum
/// 4. Rustified enum
///
/// If none of the above patterns match, then bindgen will generate a set of Rust constants.
///
/// # Clang arguments
///
/// Extra arguments can be passed to with clang:
/// 1. [`clang_arg()`](#method.clang_arg): takes a single argument
/// 2. [`clang_args()`](#method.clang_args): takes an iterator of arguments
/// 3. `BINDGEN_EXTRA_CLANG_ARGS` environment variable: whitespace separate
/// environment variable of arguments
///
/// Clang arguments specific to your crate should be added via the
/// `clang_arg()`/`clang_args()` methods.
///
/// End-users of the crate may need to set the `BINDGEN_EXTRA_CLANG_ARGS` environment variable to
/// add additional arguments. For example, to build against a different sysroot a user could set
/// `BINDGEN_EXTRA_CLANG_ARGS` to `--sysroot=/path/to/sysroot`.
#[derive(Debug, Default)]
pub struct Builder {
options: BindgenOptions,
input_headers: Vec<String>,
// Tuples of unsaved file contents of the form (name, contents).
input_header_contents: Vec<(String, String)>,
}
/// Construct a new [`Builder`](./struct.Builder.html).
pub fn builder() -> Builder {
Default::default()
}
impl Builder {
/// Generates the command line flags use for creating `Builder`.
pub fn command_line_flags(&self) -> Vec<String> {
let mut output_vector: Vec<String> = Vec::new();
if let Some(header) = self.input_headers.last().cloned() {
// Positional argument 'header'
output_vector.push(header);
}
output_vector.push("--rust-target".into());
output_vector.push(self.options.rust_target.into());
// FIXME(emilio): This is a bit hacky, maybe we should stop re-using the
// RustFeatures to store the "disable_untagged_union" call, and make it
// a different flag that we check elsewhere / in generate().
if !self.options.rust_features.untagged_union &&
RustFeatures::from(self.options.rust_target).untagged_union
{
output_vector.push("--disable-untagged-union".into());
}
if self.options.default_enum_style != Default::default() {
output_vector.push("--default-enum-style".into());
output_vector.push(
match self.options.default_enum_style {
codegen::EnumVariation::Rust {
non_exhaustive: false,
} => "rust",
codegen::EnumVariation::Rust {
non_exhaustive: true,
} => "rust_non_exhaustive",
codegen::EnumVariation::NewType { is_bitfield: true } => {
"bitfield"
}
codegen::EnumVariation::NewType { is_bitfield: false } => {
"newtype"
}
codegen::EnumVariation::Consts => "consts",
codegen::EnumVariation::ModuleConsts => "moduleconsts",
}
.into(),
)
}
if self.options.default_macro_constant_type != Default::default() {
output_vector.push("--default-macro-constant-type".into());
output_vector
.push(self.options.default_macro_constant_type.as_str().into());
}
if self.options.default_alias_style != Default::default() {
output_vector.push("--default-alias-style".into());
output_vector
.push(self.options.default_alias_style.as_str().into());
}
let regex_sets = &[
(&self.options.bitfield_enums, "--bitfield-enum"),
(&self.options.newtype_enums, "--newtype-enum"),
(&self.options.rustified_enums, "--rustified-enum"),
(
&self.options.rustified_non_exhaustive_enums,
"--rustified-enum-non-exhaustive",
),
(
&self.options.constified_enum_modules,
"--constified-enum-module",
),
(&self.options.constified_enums, "--constified-enum"),
(&self.options.type_alias, "--type-alias"),
(&self.options.new_type_alias, "--new-type-alias"),
(&self.options.new_type_alias_deref, "--new-type-alias-deref"),
(&self.options.blocklisted_types, "--blocklist-type"),
(&self.options.blocklisted_functions, "--blocklist-function"),
(&self.options.blocklisted_items, "--blocklist-item"),
(&self.options.opaque_types, "--opaque-type"),
(&self.options.allowlisted_functions, "--allowlist-function"),
(&self.options.allowlisted_types, "--allowlist-type"),
(&self.options.allowlisted_vars, "--allowlist-var"),
(&self.options.no_partialeq_types, "--no-partialeq"),
(&self.options.no_copy_types, "--no-copy"),
(&self.options.no_debug_types, "--no-debug"),
(&self.options.no_default_types, "--no-default"),
(&self.options.no_hash_types, "--no-hash"),
(&self.options.must_use_types, "--must-use-type"),
];
for (set, flag) in regex_sets {
for item in set.get_items() {
output_vector.push((*flag).to_owned());
output_vector.push(item.to_owned());
}
}
if !self.options.layout_tests {
output_vector.push("--no-layout-tests".into());
}
if self.options.impl_debug {
output_vector.push("--impl-debug".into());
}
if self.options.impl_partialeq {
output_vector.push("--impl-partialeq".into());
}
if !self.options.derive_copy {
output_vector.push("--no-derive-copy".into());
}
if !self.options.derive_debug {
output_vector.push("--no-derive-debug".into());
}
if !self.options.derive_default {
output_vector.push("--no-derive-default".into());
} else {
output_vector.push("--with-derive-default".into());
}
if self.options.derive_hash {
output_vector.push("--with-derive-hash".into());
}
if self.options.derive_partialord {
output_vector.push("--with-derive-partialord".into());
}
if self.options.derive_ord {
output_vector.push("--with-derive-ord".into());
}
if self.options.derive_partialeq {
output_vector.push("--with-derive-partialeq".into());
}
if self.options.derive_eq {
output_vector.push("--with-derive-eq".into());
}
if self.options.time_phases {
output_vector.push("--time-phases".into());
}
if !self.options.generate_comments {
output_vector.push("--no-doc-comments".into());
}
if !self.options.allowlist_recursively {
output_vector.push("--no-recursive-allowlist".into());
}
if self.options.objc_extern_crate {
output_vector.push("--objc-extern-crate".into());
}
if self.options.generate_block {
output_vector.push("--generate-block".into());
}
if self.options.block_extern_crate {
output_vector.push("--block-extern-crate".into());
}
if self.options.builtins {
output_vector.push("--builtins".into());
}
if let Some(ref prefix) = self.options.ctypes_prefix {
output_vector.push("--ctypes-prefix".into());
output_vector.push(prefix.clone());
}
if self.options.anon_fields_prefix != DEFAULT_ANON_FIELDS_PREFIX {
output_vector.push("--anon-fields-prefix".into());
output_vector.push(self.options.anon_fields_prefix.clone());
}
if self.options.emit_ast {
output_vector.push("--emit-clang-ast".into());
}
if self.options.emit_ir {
output_vector.push("--emit-ir".into());
}
if let Some(ref graph) = self.options.emit_ir_graphviz {
output_vector.push("--emit-ir-graphviz".into());
output_vector.push(graph.clone())
}
if self.options.enable_cxx_namespaces {
output_vector.push("--enable-cxx-namespaces".into());
}
if self.options.enable_function_attribute_detection {
output_vector.push("--enable-function-attribute-detection".into());
}
if self.options.disable_name_namespacing {
output_vector.push("--disable-name-namespacing".into());
}
if self.options.disable_nested_struct_naming {
output_vector.push("--disable-nested-struct-naming".into());
}
if self.options.disable_header_comment {
output_vector.push("--disable-header-comment".into());
}
if !self.options.codegen_config.functions() {
output_vector.push("--ignore-functions".into());
}
output_vector.push("--generate".into());
//Temporary placeholder for below 4 options
let mut options: Vec<String> = Vec::new();
if self.options.codegen_config.functions() {
options.push("functions".into());
}
if self.options.codegen_config.types() {
options.push("types".into());
}
if self.options.codegen_config.vars() {
options.push("vars".into());
}
if self.options.codegen_config.methods() {
options.push("methods".into());
}
if self.options.codegen_config.constructors() {
options.push("constructors".into());
}
if self.options.codegen_config.destructors() {
options.push("destructors".into());
}
output_vector.push(options.join(","));
if !self.options.codegen_config.methods() {
output_vector.push("--ignore-methods".into());
}
if !self.options.convert_floats {
output_vector.push("--no-convert-floats".into());
}
if !self.options.prepend_enum_name {
output_vector.push("--no-prepend-enum-name".into());
}
if self.options.fit_macro_constants {
output_vector.push("--fit-macro-constant-types".into());
}
if self.options.array_pointers_in_arguments {
output_vector.push("--use-array-pointers-in-arguments".into());
}
if let Some(ref wasm_import_module_name) =
self.options.wasm_import_module_name
{
output_vector.push("--wasm-import-module-name".into());
output_vector.push(wasm_import_module_name.clone());
}
for line in &self.options.raw_lines {
output_vector.push("--raw-line".into());
output_vector.push(line.clone());
}
for (module, lines) in &self.options.module_lines {
for line in lines.iter() {
output_vector.push("--module-raw-line".into());
output_vector.push(module.clone());
output_vector.push(line.clone());
}
}
if self.options.use_core {
output_vector.push("--use-core".into());
}
if self.options.conservative_inline_namespaces {
output_vector.push("--conservative-inline-namespaces".into());
}
if self.options.generate_inline_functions {
output_vector.push("--generate-inline-functions".into());
}
if !self.options.record_matches {
output_vector.push("--no-record-matches".into());
}
if self.options.size_t_is_usize {
output_vector.push("--size_t-is-usize".into());
}
if !self.options.rustfmt_bindings {
output_vector.push("--no-rustfmt-bindings".into());
}
if let Some(path) = self
.options
.rustfmt_configuration_file
.as_ref()
.and_then(|f| f.to_str())
{
output_vector.push("--rustfmt-configuration-file".into());
output_vector.push(path.into());
}
if let Some(ref name) = self.options.dynamic_library_name {
output_vector.push("--dynamic-loading".into());
output_vector.push(name.clone());
}
if self.options.dynamic_link_require_all {
output_vector.push("--dynamic-link-require-all".into());
}
if self.options.respect_cxx_access_specs {
output_vector.push("--respect-cxx-access-specs".into());
}
if self.options.translate_enum_integer_types {
output_vector.push("--translate-enum-integer-types".into());
}
if self.options.c_naming {
output_vector.push("--c-naming".into());
}
if self.options.force_explicit_padding {
output_vector.push("--explicit-padding".into());
}
// Add clang arguments
output_vector.push("--".into());
if !self.options.clang_args.is_empty() {
output_vector.extend(self.options.clang_args.iter().cloned());
}
if self.input_headers.len() > 1 {
// To pass more than one header, we need to pass all but the last
// header via the `-include` clang arg
for header in &self.input_headers[..self.input_headers.len() - 1] {
output_vector.push("-include".to_string());
output_vector.push(header.clone());
}
}
output_vector
}
/// Add an input C/C++ header to generate bindings for.
///
/// This can be used to generate bindings to a single header:
///
/// ```ignore
/// let bindings = bindgen::Builder::default()
/// .header("input.h")
/// .generate()
/// .unwrap();
/// ```
///
/// Or you can invoke it multiple times to generate bindings to multiple
/// headers:
///
/// ```ignore
/// let bindings = bindgen::Builder::default()
/// .header("first.h")
/// .header("second.h")
/// .header("third.h")
/// .generate()
/// .unwrap();
/// ```
pub fn header<T: Into<String>>(mut self, header: T) -> Builder {
self.input_headers.push(header.into());
self
}
/// Add a depfile output which will be written alongside the generated bindings.
pub fn depfile<H: Into<String>, D: Into<PathBuf>>(
mut self,
output_module: H,
depfile: D,
) -> Builder {
self.options.depfile = Some(deps::DepfileSpec {
output_module: output_module.into(),
depfile_path: depfile.into(),
});
self
}
/// Add `contents` as an input C/C++ header named `name`.
///
/// The file `name` will be added to the clang arguments.
pub fn header_contents(mut self, name: &str, contents: &str) -> Builder {
// Apparently clang relies on having virtual FS correspondent to
// the real one, so we need absolute paths here
let absolute_path = env::current_dir()
.expect("Cannot retrieve current directory")
.join(name)
.to_str()
.expect("Cannot convert current directory name to string")
.to_owned();
self.input_header_contents
.push((absolute_path, contents.into()));
self
}
/// Specify the rust target
///
/// The default is the latest stable Rust version
pub fn rust_target(mut self, rust_target: RustTarget) -> Self {
self.options.set_rust_target(rust_target);
self
}
/// Disable support for native Rust unions, if supported.
pub fn disable_untagged_union(mut self) -> Self {
self.options.rust_features.untagged_union = false;
self
}
/// Disable insertion of bindgen's version identifier into generated
/// bindings.
pub fn disable_header_comment(mut self) -> Self {
self.options.disable_header_comment = true;
self
}
/// Set the output graphviz file.
pub fn emit_ir_graphviz<T: Into<String>>(mut self, path: T) -> Builder {
let path = path.into();
self.options.emit_ir_graphviz = Some(path);
self
}
/// Whether the generated bindings should contain documentation comments
/// (docstrings) or not. This is set to true by default.
///
/// Note that clang by default excludes comments from system headers, pass
/// `-fretain-comments-from-system-headers` as
/// [`clang_arg`][Builder::clang_arg] to include them. It can also be told
/// to process all comments (not just documentation ones) using the
/// `-fparse-all-comments` flag. See [slides on clang comment parsing](
/// https://llvm.org/devmtg/2012-11/Gribenko_CommentParsing.pdf) for
/// background and examples.
pub fn generate_comments(mut self, doit: bool) -> Self {
self.options.generate_comments = doit;
self
}
/// Whether to allowlist recursively or not. Defaults to true.
///
/// Given that we have explicitly allowlisted the "initiate_dance_party"
/// function in this C header:
///
/// ```c
/// typedef struct MoonBoots {
/// int bouncy_level;
/// } MoonBoots;
///
/// void initiate_dance_party(MoonBoots* boots);
/// ```
///
/// We would normally generate bindings to both the `initiate_dance_party`
/// function and the `MoonBoots` struct that it transitively references. By
/// configuring with `allowlist_recursively(false)`, `bindgen` will not emit
/// bindings for anything except the explicitly allowlisted items, and there
/// would be no emitted struct definition for `MoonBoots`. However, the
/// `initiate_dance_party` function would still reference `MoonBoots`!
///
/// **Disabling this feature will almost certainly cause `bindgen` to emit
/// bindings that will not compile!** If you disable this feature, then it
/// is *your* responsibility to provide definitions for every type that is
/// referenced from an explicitly allowlisted item. One way to provide the
/// definitions is by using the [`Builder::raw_line`](#method.raw_line)
/// method, another would be to define them in Rust and then `include!(...)`
/// the bindings immediately afterwards.
pub fn allowlist_recursively(mut self, doit: bool) -> Self {
self.options.allowlist_recursively = doit;
self
}
/// Deprecated alias for allowlist_recursively.
#[deprecated(note = "Use allowlist_recursively instead")]
pub fn whitelist_recursively(self, doit: bool) -> Self {
self.allowlist_recursively(doit)
}
/// Generate `#[macro_use] extern crate objc;` instead of `use objc;`
/// in the prologue of the files generated from objective-c files
pub fn objc_extern_crate(mut self, doit: bool) -> Self {
self.options.objc_extern_crate = doit;
self
}
/// Generate proper block signatures instead of void pointers.
pub fn generate_block(mut self, doit: bool) -> Self {
self.options.generate_block = doit;
self
}
/// Generate `#[macro_use] extern crate block;` instead of `use block;`
/// in the prologue of the files generated from apple block files
pub fn block_extern_crate(mut self, doit: bool) -> Self {
self.options.block_extern_crate = doit;
self
}
/// Whether to use the clang-provided name mangling. This is true by default
/// and probably needed for C++ features.
///
/// However, some old libclang versions seem to return incorrect results in
/// some cases for non-mangled functions, see [1], so we allow disabling it.
///
/// [1]: https://github.com/rust-lang/rust-bindgen/issues/528
pub fn trust_clang_mangling(mut self, doit: bool) -> Self {
self.options.enable_mangling = doit;
self
}
/// Hide the given type from the generated bindings. Regular expressions are
/// supported.
#[deprecated(note = "Use blocklist_type instead")]
pub fn hide_type<T: AsRef<str>>(self, arg: T) -> Builder {
self.blocklist_type(arg)
}
/// Hide the given type from the generated bindings. Regular expressions are
/// supported.
#[deprecated(note = "Use blocklist_type instead")]
pub fn blacklist_type<T: AsRef<str>>(self, arg: T) -> Builder {
self.blocklist_type(arg)
}
/// Hide the given type from the generated bindings. Regular expressions are
/// supported.
///
/// To blocklist types prefixed with "mylib" use `"mylib_.*"`.
/// For more complicated expressions check
/// [regex](https://docs.rs/regex/*/regex/) docs
pub fn blocklist_type<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.blocklisted_types.insert(arg);
self
}
/// Hide the given function from the generated bindings. Regular expressions
/// are supported.
#[deprecated(note = "Use blocklist_function instead")]
pub fn blacklist_function<T: AsRef<str>>(self, arg: T) -> Builder {
self.blocklist_function(arg)
}
/// Hide the given function from the generated bindings. Regular expressions
/// are supported.
///
/// To blocklist functions prefixed with "mylib" use `"mylib_.*"`.
/// For more complicated expressions check
/// [regex](https://docs.rs/regex/*/regex/) docs
pub fn blocklist_function<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.blocklisted_functions.insert(arg);
self
}
/// Hide the given item from the generated bindings, regardless of
/// whether it's a type, function, module, etc. Regular
/// expressions are supported.
#[deprecated(note = "Use blocklist_item instead")]
pub fn blacklist_item<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.blocklisted_items.insert(arg);
self
}
/// Hide the given item from the generated bindings, regardless of
/// whether it's a type, function, module, etc. Regular
/// expressions are supported.
///
/// To blocklist items prefixed with "mylib" use `"mylib_.*"`.
/// For more complicated expressions check
/// [regex](https://docs.rs/regex/*/regex/) docs
pub fn blocklist_item<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.blocklisted_items.insert(arg);
self
}
/// Treat the given type as opaque in the generated bindings. Regular
/// expressions are supported.
///
/// To change types prefixed with "mylib" into opaque, use `"mylib_.*"`.
/// For more complicated expressions check
/// [regex](https://docs.rs/regex/*/regex/) docs
pub fn opaque_type<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.opaque_types.insert(arg);
self
}
/// Allowlist the given type so that it (and all types that it transitively
/// refers to) appears in the generated bindings. Regular expressions are
/// supported.
#[deprecated(note = "use allowlist_type instead")]
pub fn whitelisted_type<T: AsRef<str>>(self, arg: T) -> Builder {
self.allowlist_type(arg)
}
/// Allowlist the given type so that it (and all types that it transitively
/// refers to) appears in the generated bindings. Regular expressions are
/// supported.
#[deprecated(note = "use allowlist_type instead")]
pub fn whitelist_type<T: AsRef<str>>(self, arg: T) -> Builder {
self.allowlist_type(arg)
}
/// Allowlist the given type so that it (and all types that it transitively
/// refers to) appears in the generated bindings. Regular expressions are
/// supported.
///
/// To allowlist types prefixed with "mylib" use `"mylib_.*"`.
/// For more complicated expressions check
/// [regex](https://docs.rs/regex/*/regex/) docs
pub fn allowlist_type<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.allowlisted_types.insert(arg);
self
}
/// Allowlist the given function so that it (and all types that it
/// transitively refers to) appears in the generated bindings. Regular
/// expressions are supported.
///
/// To allowlist functions prefixed with "mylib" use `"mylib_.*"`.
/// For more complicated expressions check
/// [regex](https://docs.rs/regex/*/regex/) docs
pub fn allowlist_function<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.allowlisted_functions.insert(arg);
self
}
/// Allowlist the given function.
///
/// Deprecated: use allowlist_function instead.
#[deprecated(note = "use allowlist_function instead")]
pub fn whitelist_function<T: AsRef<str>>(self, arg: T) -> Builder {
self.allowlist_function(arg)
}
/// Allowlist the given function.
///
/// Deprecated: use allowlist_function instead.
#[deprecated(note = "use allowlist_function instead")]
pub fn whitelisted_function<T: AsRef<str>>(self, arg: T) -> Builder {
self.allowlist_function(arg)
}
/// Allowlist the given variable so that it (and all types that it
/// transitively refers to) appears in the generated bindings. Regular
/// expressions are supported.
///
/// To allowlist variables prefixed with "mylib" use `"mylib_.*"`.
/// For more complicated expressions check
/// [regex](https://docs.rs/regex/*/regex/) docs
pub fn allowlist_var<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.allowlisted_vars.insert(arg);
self
}
/// Deprecated: use allowlist_var instead.
#[deprecated(note = "use allowlist_var instead")]
pub fn whitelist_var<T: AsRef<str>>(self, arg: T) -> Builder {
self.allowlist_var(arg)
}
/// Allowlist the given variable.
///
/// Deprecated: use allowlist_var instead.
#[deprecated(note = "use allowlist_var instead")]
pub fn whitelisted_var<T: AsRef<str>>(self, arg: T) -> Builder {
self.allowlist_var(arg)
}
/// Set the default style of code to generate for enums
pub fn default_enum_style(
mut self,
arg: codegen::EnumVariation,
) -> Builder {
self.options.default_enum_style = arg;
self
}
/// Mark the given enum (or set of enums, if using a pattern) as being
/// bitfield-like. Regular expressions are supported.
///
/// This makes bindgen generate a type that isn't a rust `enum`. Regular
/// expressions are supported.
///
/// This is similar to the newtype enum style, but with the bitwise
/// operators implemented.
pub fn bitfield_enum<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.bitfield_enums.insert(arg);
self
}
/// Mark the given enum (or set of enums, if using a pattern) as a newtype.
/// Regular expressions are supported.
///
/// This makes bindgen generate a type that isn't a Rust `enum`. Regular
/// expressions are supported.
pub fn newtype_enum<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.newtype_enums.insert(arg);
self
}
/// Mark the given enum (or set of enums, if using a pattern) as a Rust
/// enum.
///
/// This makes bindgen generate enums instead of constants. Regular
/// expressions are supported.
///
/// **Use this with caution**, creating this in unsafe code
/// (including FFI) with an invalid value will invoke undefined behaviour.
/// You may want to use the newtype enum style instead.
pub fn rustified_enum<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.rustified_enums.insert(arg);
self
}
/// Mark the given enum (or set of enums, if using a pattern) as a Rust
/// enum with the `#[non_exhaustive]` attribute.
///
/// This makes bindgen generate enums instead of constants. Regular
/// expressions are supported.
///
/// **Use this with caution**, creating this in unsafe code
/// (including FFI) with an invalid value will invoke undefined behaviour.
/// You may want to use the newtype enum style instead.
pub fn rustified_non_exhaustive_enum<T: AsRef<str>>(
mut self,
arg: T,
) -> Builder {
self.options.rustified_non_exhaustive_enums.insert(arg);
self
}
/// Mark the given enum (or set of enums, if using a pattern) as a set of
/// constants that are not to be put into a module.
pub fn constified_enum<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.constified_enums.insert(arg);
self
}
/// Mark the given enum (or set of enums, if using a pattern) as a set of
/// constants that should be put into a module.
///
/// This makes bindgen generate modules containing constants instead of
/// just constants. Regular expressions are supported.
pub fn constified_enum_module<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.constified_enum_modules.insert(arg);
self
}
/// Set the default type for macro constants
pub fn default_macro_constant_type(
mut self,
arg: codegen::MacroTypeVariation,
) -> Builder {
self.options.default_macro_constant_type = arg;
self
}
/// Set the default style of code to generate for typedefs
pub fn default_alias_style(
mut self,
arg: codegen::AliasVariation,
) -> Builder {
self.options.default_alias_style = arg;
self
}
/// Mark the given typedef alias (or set of aliases, if using a pattern) to
/// use regular Rust type aliasing.
///
/// This is the default behavior and should be used if `default_alias_style`
/// was set to NewType or NewTypeDeref and you want to override it for a
/// set of typedefs.
pub fn type_alias<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.type_alias.insert(arg);
self
}
/// Mark the given typedef alias (or set of aliases, if using a pattern) to
/// be generated as a new type by having the aliased type be wrapped in a
/// #[repr(transparent)] struct.
///
/// Used to enforce stricter type checking.
pub fn new_type_alias<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.new_type_alias.insert(arg);
self
}
/// Mark the given typedef alias (or set of aliases, if using a pattern) to
/// be generated as a new type by having the aliased type be wrapped in a
/// #[repr(transparent)] struct and also have an automatically generated
/// impl's of `Deref` and `DerefMut` to their aliased type.
pub fn new_type_alias_deref<T: AsRef<str>>(mut self, arg: T) -> Builder {
self.options.new_type_alias_deref.insert(arg);
self
}
/// Add a string to prepend to the generated bindings. The string is passed
/// through without any modification.
pub fn raw_line<T: Into<String>>(mut self, arg: T) -> Self {
self.options.raw_lines.push(arg.into());
self
}
/// Add a given line to the beginning of module `mod`.
pub fn module_raw_line<T, U>(mut self, mod_: T, line: U) -> Self
where
T: Into<String>,
U: Into<String>,
{
self.options
.module_lines
.entry(mod_.into())
.or_insert_with(Vec::new)
.push(line.into());
self
}
/// Add a given set of lines to the beginning of module `mod`.
pub fn module_raw_lines<T, I>(mut self, mod_: T, lines: I) -> Self
where
T: Into<String>,
I: IntoIterator,
I::Item: Into<String>,
{
self.options
.module_lines
.entry(mod_.into())
.or_insert_with(Vec::new)
.extend(lines.into_iter().map(Into::into));
self
}
/// Add an argument to be passed straight through to clang.
pub fn clang_arg<T: Into<String>>(mut self, arg: T) -> Builder {
self.options.clang_args.push(arg.into());
self
}
/// Add arguments to be passed straight through to clang.
pub fn clang_args<I>(mut self, iter: I) -> Builder
where
I: IntoIterator,
I::Item: AsRef<str>,
{
for arg in iter {
self = self.clang_arg(arg.as_ref())
}
self
}
/// Emit bindings for builtin definitions (for example `__builtin_va_list`)
/// in the generated Rust.
pub fn emit_builtins(mut self) -> Builder {
self.options.builtins = true;
self
}
/// Avoid converting floats to `f32`/`f64` by default.
pub fn no_convert_floats(mut self) -> Self {
self.options.convert_floats = false;
self
}
/// Set whether layout tests should be generated.
pub fn layout_tests(mut self, doit: bool) -> Self {
self.options.layout_tests = doit;
self
}
/// Set whether `Debug` should be implemented, if it can not be derived automatically.
pub fn impl_debug(mut self, doit: bool) -> Self {
self.options.impl_debug = doit;
self
}
/// Set whether `PartialEq` should be implemented, if it can not be derived automatically.
pub fn impl_partialeq(mut self, doit: bool) -> Self {
self.options.impl_partialeq = doit;
self
}
/// Set whether `Copy` should be derived by default.
pub fn derive_copy(mut self, doit: bool) -> Self {
self.options.derive_copy = doit;
self
}
/// Set whether `Debug` should be derived by default.
pub fn derive_debug(mut self, doit: bool) -> Self {
self.options.derive_debug = doit;
self
}
/// Set whether `Default` should be derived by default.
pub fn derive_default(mut self, doit: bool) -> Self {
self.options.derive_default = doit;
self
}
/// Set whether `Hash` should be derived by default.
pub fn derive_hash(mut self, doit: bool) -> Self {
self.options.derive_hash = doit;
self
}
/// Set whether `PartialOrd` should be derived by default.
/// If we don't compute partialord, we also cannot compute
/// ord. Set the derive_ord to `false` when doit is `false`.
pub fn derive_partialord(mut self, doit: bool) -> Self {
self.options.derive_partialord = doit;
if !doit {
self.options.derive_ord = false;
}
self
}
/// Set whether `Ord` should be derived by default.
/// We can't compute `Ord` without computing `PartialOrd`,
/// so we set the same option to derive_partialord.
pub fn derive_ord(mut self, doit: bool) -> Self {
self.options.derive_ord = doit;
self.options.derive_partialord = doit;
self
}
/// Set whether `PartialEq` should be derived by default.
///
/// If we don't derive `PartialEq`, we also cannot derive `Eq`, so deriving
/// `Eq` is also disabled when `doit` is `false`.
pub fn derive_partialeq(mut self, doit: bool) -> Self {
self.options.derive_partialeq = doit;
if !doit {
self.options.derive_eq = false;
}
self
}
/// Set whether `Eq` should be derived by default.
///
/// We can't derive `Eq` without also deriving `PartialEq`, so we also
/// enable deriving `PartialEq` when `doit` is `true`.
pub fn derive_eq(mut self, doit: bool) -> Self {
self.options.derive_eq = doit;
if doit {
self.options.derive_partialeq = doit;
}
self
}
/// Set whether or not to time bindgen phases, and print information to
/// stderr.
pub fn time_phases(mut self, doit: bool) -> Self {
self.options.time_phases = doit;
self
}
/// Emit Clang AST.
pub fn emit_clang_ast(mut self) -> Builder {
self.options.emit_ast = true;
self
}
/// Emit IR.
pub fn emit_ir(mut self) -> Builder {
self.options.emit_ir = true;
self
}
/// Enable C++ namespaces.
pub fn enable_cxx_namespaces(mut self) -> Builder {
self.options.enable_cxx_namespaces = true;
self
}
/// Enable detecting must_use attributes on C functions.
///
/// This is quite slow in some cases (see #1465), so it's disabled by
/// default.
///
/// Note that for this to do something meaningful for now at least, the rust
/// target version has to have support for `#[must_use]`.
pub fn enable_function_attribute_detection(mut self) -> Self {
self.options.enable_function_attribute_detection = true;
self
}
/// Disable name auto-namespacing.
///
/// By default, bindgen mangles names like `foo::bar::Baz` to look like
/// `foo_bar_Baz` instead of just `Baz`.
///
/// This method disables that behavior.
///
/// Note that this intentionally does not change the names used for
/// allowlisting and blocklisting, which should still be mangled with the
/// namespaces.
///
/// Note, also, that this option may cause bindgen to generate duplicate
/// names.
pub fn disable_name_namespacing(mut self) -> Builder {
self.options.disable_name_namespacing = true;
self
}
/// Disable nested struct naming.
///
/// The following structs have different names for C and C++. In case of C
/// they are visible as `foo` and `bar`. In case of C++ they are visible as
/// `foo` and `foo::bar`.
///
/// ```c
/// struct foo {
/// struct bar {
/// } b;
/// };
/// ```
///
/// Bindgen wants to avoid duplicate names by default so it follows C++ naming
/// and it generates `foo`/`foo_bar` instead of just `foo`/`bar`.
///
/// This method disables this behavior and it is indented to be used only
/// for headers that were written for C.
pub fn disable_nested_struct_naming(mut self) -> Builder {
self.options.disable_nested_struct_naming = true;
self
}
/// Treat inline namespaces conservatively.
///
/// This is tricky, because in C++ is technically legal to override an item
/// defined in an inline namespace:
///
/// ```cpp
/// inline namespace foo {
/// using Bar = int;
/// }
/// using Bar = long;
/// ```
///
/// Even though referencing `Bar` is a compiler error.
///
/// We want to support this (arguably esoteric) use case, but we don't want
/// to make the rest of bindgen users pay an usability penalty for that.
///
/// To support this, we need to keep all the inline namespaces around, but
/// then bindgen usage is a bit more difficult, because you cannot
/// reference, e.g., `std::string` (you'd need to use the proper inline
/// namespace).
///
/// We could complicate a lot of the logic to detect name collisions, and if
/// not detected generate a `pub use inline_ns::*` or something like that.
///
/// That's probably something we can do if we see this option is needed in a
/// lot of cases, to improve it's usability, but my guess is that this is
/// not going to be too useful.
pub fn conservative_inline_namespaces(mut self) -> Builder {
self.options.conservative_inline_namespaces = true;
self
}
/// Whether inline functions should be generated or not.
///
/// Note that they will usually not work. However you can use
/// `-fkeep-inline-functions` or `-fno-inline-functions` if you are
/// responsible of compiling the library to make them callable.
pub fn generate_inline_functions(mut self, doit: bool) -> Self {
self.options.generate_inline_functions = doit;
self
}
/// Ignore functions.
pub fn ignore_functions(mut self) -> Builder {
self.options.codegen_config.remove(CodegenConfig::FUNCTIONS);
self
}
/// Ignore methods.
pub fn ignore_methods(mut self) -> Builder {
self.options.codegen_config.remove(CodegenConfig::METHODS);
self
}
/// Avoid generating any unstable Rust, such as Rust unions, in the generated bindings.
#[deprecated(note = "please use `rust_target` instead")]
pub fn unstable_rust(self, doit: bool) -> Self {
let rust_target = if doit {
RustTarget::Nightly
} else {
LATEST_STABLE_RUST
};
self.rust_target(rust_target)
}
/// Use core instead of libstd in the generated bindings.
pub fn use_core(mut self) -> Builder {
self.options.use_core = true;
self
}
/// Use the given prefix for the raw types instead of `::std::os::raw`.
pub fn ctypes_prefix<T: Into<String>>(mut self, prefix: T) -> Builder {
self.options.ctypes_prefix = Some(prefix.into());
self
}
/// Use the given prefix for the anon fields.
pub fn anon_fields_prefix<T: Into<String>>(mut self, prefix: T) -> Builder {
self.options.anon_fields_prefix = prefix.into();
self
}
/// Allows configuring types in different situations, see the
/// [`ParseCallbacks`](./callbacks/trait.ParseCallbacks.html) documentation.
pub fn parse_callbacks(
mut self,
cb: Box<dyn callbacks::ParseCallbacks>,
) -> Self {
self.options.parse_callbacks = Some(cb);
self
}
/// Choose what to generate using a
/// [`CodegenConfig`](./struct.CodegenConfig.html).
pub fn with_codegen_config(mut self, config: CodegenConfig) -> Self {
self.options.codegen_config = config;
self
}
/// Whether to detect include paths using clang_sys.
pub fn detect_include_paths(mut self, doit: bool) -> Self {
self.options.detect_include_paths = doit;
self
}
/// Whether to try to fit macro constants to types smaller than u32/i32
pub fn fit_macro_constants(mut self, doit: bool) -> Self {
self.options.fit_macro_constants = doit;
self
}
/// Prepend the enum name to constant or newtype variants.
pub fn prepend_enum_name(mut self, doit: bool) -> Self {
self.options.prepend_enum_name = doit;
self
}
/// Set whether `size_t` should be translated to `usize` automatically.
pub fn size_t_is_usize(mut self, is: bool) -> Self {
self.options.size_t_is_usize = is;
self
}
/// Set whether rustfmt should format the generated bindings.
pub fn rustfmt_bindings(mut self, doit: bool) -> Self {
self.options.rustfmt_bindings = doit;
self
}
/// Set whether we should record matched items in our regex sets.
pub fn record_matches(mut self, doit: bool) -> Self {
self.options.record_matches = doit;
self
}
/// Set the absolute path to the rustfmt configuration file, if None, the standard rustfmt
/// options are used.
pub fn rustfmt_configuration_file(mut self, path: Option<PathBuf>) -> Self {
self = self.rustfmt_bindings(true);
self.options.rustfmt_configuration_file = path;
self
}
/// Sets an explicit path to rustfmt, to be used when rustfmt is enabled.
pub fn with_rustfmt<P: Into<PathBuf>>(mut self, path: P) -> Self {
self.options.rustfmt_path = Some(path.into());
self
}
/// If true, always emit explicit padding fields.
///
/// If a struct needs to be serialized in its native format (padding bytes
/// and all), for example writing it to a file or sending it on the network,
/// then this should be enabled, as anything reading the padding bytes of
/// a struct may lead to Undefined Behavior.
pub fn explicit_padding(mut self, doit: bool) -> Self {
self.options.force_explicit_padding = doit;
self
}
/// Generate the Rust bindings using the options built up thus far.
pub fn generate(mut self) -> Result<Bindings, ()> {
// Add any extra arguments from the environment to the clang command line.
if let Some(extra_clang_args) =
get_target_dependent_env_var("BINDGEN_EXTRA_CLANG_ARGS")
{
// Try to parse it with shell quoting. If we fail, make it one single big argument.
if let Some(strings) = shlex::split(&extra_clang_args) {
self.options.clang_args.extend(strings);
} else {
self.options.clang_args.push(extra_clang_args);
};
}
// Transform input headers to arguments on the clang command line.
self.options.input_header = self.input_headers.pop();
self.options.extra_input_headers = self.input_headers;
self.options.clang_args.extend(
self.options.extra_input_headers.iter().flat_map(|header| {
iter::once("-include".into())
.chain(iter::once(header.to_string()))
}),
);
self.options.input_unsaved_files.extend(
self.input_header_contents
.drain(..)
.map(|(name, contents)| {
clang::UnsavedFile::new(&name, &contents)
}),
);
Bindings::generate(self.options)
}
/// Preprocess and dump the input header files to disk.
///
/// This is useful when debugging bindgen, using C-Reduce, or when filing
/// issues. The resulting file will be named something like `__bindgen.i` or
/// `__bindgen.ii`
pub fn dump_preprocessed_input(&self) -> io::Result<()> {
let clang =
clang_sys::support::Clang::find(None, &[]).ok_or_else(|| {
io::Error::new(
io::ErrorKind::Other,
"Cannot find clang executable",
)
})?;
// The contents of a wrapper file that includes all the input header
// files.
let mut wrapper_contents = String::new();
// Whether we are working with C or C++ inputs.
let mut is_cpp = args_are_cpp(&self.options.clang_args);
// For each input header, add `#include "$header"`.
for header in &self.input_headers {
is_cpp |= file_is_cpp(header);
wrapper_contents.push_str("#include \"");
wrapper_contents.push_str(header);
wrapper_contents.push_str("\"\n");
}
// For each input header content, add a prefix line of `#line 0 "$name"`
// followed by the contents.
for &(ref name, ref contents) in &self.input_header_contents {
is_cpp |= file_is_cpp(name);
wrapper_contents.push_str("#line 0 \"");
wrapper_contents.push_str(name);
wrapper_contents.push_str("\"\n");
wrapper_contents.push_str(contents);
}
let wrapper_path = PathBuf::from(if is_cpp {
"__bindgen.cpp"
} else {
"__bindgen.c"
});
{
let mut wrapper_file = File::create(&wrapper_path)?;
wrapper_file.write_all(wrapper_contents.as_bytes())?;
}
let mut cmd = Command::new(&clang.path);
cmd.arg("-save-temps")
.arg("-E")
.arg("-C")
.arg("-c")
.arg(&wrapper_path)
.stdout(Stdio::piped());
for a in &self.options.clang_args {
cmd.arg(a);
}
let mut child = cmd.spawn()?;
let mut preprocessed = child.stdout.take().unwrap();
let mut file = File::create(if is_cpp {
"__bindgen.ii"
} else {
"__bindgen.i"
})?;
io::copy(&mut preprocessed, &mut file)?;
if child.wait()?.success() {
Ok(())
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"clang exited with non-zero status",
))
}
}
/// Don't derive `PartialEq` for a given type. Regular
/// expressions are supported.
pub fn no_partialeq<T: Into<String>>(mut self, arg: T) -> Builder {
self.options.no_partialeq_types.insert(arg.into());
self
}
/// Don't derive `Copy` for a given type. Regular
/// expressions are supported.
pub fn no_copy<T: Into<String>>(mut self, arg: T) -> Self {
self.options.no_copy_types.insert(arg.into());
self
}
/// Don't derive `Debug` for a given type. Regular
/// expressions are supported.
pub fn no_debug<T: Into<String>>(mut self, arg: T) -> Self {
self.options.no_debug_types.insert(arg.into());
self
}
/// Don't derive/impl `Default` for a given type. Regular
/// expressions are supported.
pub fn no_default<T: Into<String>>(mut self, arg: T) -> Self {
self.options.no_default_types.insert(arg.into());
self
}
/// Don't derive `Hash` for a given type. Regular
/// expressions are supported.
pub fn no_hash<T: Into<String>>(mut self, arg: T) -> Builder {
self.options.no_hash_types.insert(arg.into());
self
}
/// Add `#[must_use]` for the given type. Regular
/// expressions are supported.
pub fn must_use_type<T: Into<String>>(mut self, arg: T) -> Builder {
self.options.must_use_types.insert(arg.into());
self
}
/// Set whether `arr[size]` should be treated as `*mut T` or `*mut [T; size]` (same for mut)
pub fn array_pointers_in_arguments(mut self, doit: bool) -> Self {
self.options.array_pointers_in_arguments = doit;
self
}
/// Set the wasm import module name
pub fn wasm_import_module_name<T: Into<String>>(
mut self,
import_name: T,
) -> Self {
self.options.wasm_import_module_name = Some(import_name.into());
self
}
/// Specify the dynamic library name if we are generating bindings for a shared library.
pub fn dynamic_library_name<T: Into<String>>(
mut self,
dynamic_library_name: T,
) -> Self {
self.options.dynamic_library_name = Some(dynamic_library_name.into());
self
}
/// Require successful linkage for all routines in a shared library.
/// This allows us to optimize function calls by being able to safely assume function pointers
/// are valid.
pub fn dynamic_link_require_all(mut self, req: bool) -> Self {
self.options.dynamic_link_require_all = req;
self
}
/// Generate bindings as `pub` only if the bound item is publically accessible by C++.
pub fn respect_cxx_access_specs(mut self, doit: bool) -> Self {
self.options.respect_cxx_access_specs = doit;
self
}
/// Always translate enum integer types to native Rust integer types.
///
/// This will result in enums having types such as `u32` and `i16` instead
/// of `c_uint` and `c_short`. Types for Rustified enums are always
/// translated.
pub fn translate_enum_integer_types(mut self, doit: bool) -> Self {
self.options.translate_enum_integer_types = doit;
self
}
/// Generate types with C style naming.
///
/// This will add prefixes to the generated type names. For example instead of a struct `A` we
/// will generate struct `struct_A`. Currently applies to structs, unions, and enums.
pub fn c_naming(mut self, doit: bool) -> Self {
self.options.c_naming = doit;
self
}
}
/// Configuration options for generated bindings.
#[derive(Debug)]
struct BindgenOptions {
/// The set of types that have been blocklisted and should not appear
/// anywhere in the generated code.
blocklisted_types: RegexSet,
/// The set of functions that have been blocklisted and should not appear
/// in the generated code.
blocklisted_functions: RegexSet,
/// The set of items, regardless of item-type, that have been
/// blocklisted and should not appear in the generated code.
blocklisted_items: RegexSet,
/// The set of types that should be treated as opaque structures in the
/// generated code.
opaque_types: RegexSet,
/// The explicit rustfmt path.
rustfmt_path: Option<PathBuf>,
/// The path to which we should write a Makefile-syntax depfile (if any).
depfile: Option<deps::DepfileSpec>,
/// The set of types that we should have bindings for in the generated
/// code.
///
/// This includes all types transitively reachable from any type in this
/// set. One might think of allowlisted types/vars/functions as GC roots,
/// and the generated Rust code as including everything that gets marked.
allowlisted_types: RegexSet,
/// Allowlisted functions. See docs for `allowlisted_types` for more.
allowlisted_functions: RegexSet,
/// Allowlisted variables. See docs for `allowlisted_types` for more.
allowlisted_vars: RegexSet,
/// The default style of code to generate for enums
default_enum_style: codegen::EnumVariation,
/// The enum patterns to mark an enum as a bitfield
/// (newtype with bitwise operations).
bitfield_enums: RegexSet,
/// The enum patterns to mark an enum as a newtype.
newtype_enums: RegexSet,
/// The enum patterns to mark an enum as a Rust enum.
rustified_enums: RegexSet,
/// The enum patterns to mark an enum as a non-exhaustive Rust enum.
rustified_non_exhaustive_enums: RegexSet,
/// The enum patterns to mark an enum as a module of constants.
constified_enum_modules: RegexSet,
/// The enum patterns to mark an enum as a set of constants.
constified_enums: RegexSet,
/// The default type for C macro constants.
default_macro_constant_type: codegen::MacroTypeVariation,
/// The default style of code to generate for typedefs.
default_alias_style: codegen::AliasVariation,
/// Typedef patterns that will use regular type aliasing.
type_alias: RegexSet,
/// Typedef patterns that will be aliased by creating a new struct.
new_type_alias: RegexSet,
/// Typedef patterns that will be wrapped in a new struct and have
/// Deref and Deref to their aliased type.
new_type_alias_deref: RegexSet,
/// Whether we should generate builtins or not.
builtins: bool,
/// True if we should dump the Clang AST for debugging purposes.
emit_ast: bool,
/// True if we should dump our internal IR for debugging purposes.
emit_ir: bool,
/// Output graphviz dot file.
emit_ir_graphviz: Option<String>,
/// True if we should emulate C++ namespaces with Rust modules in the
/// generated bindings.
enable_cxx_namespaces: bool,
/// True if we should try to find unexposed attributes in functions, in
/// order to be able to generate #[must_use] attributes in Rust.
enable_function_attribute_detection: bool,
/// True if we should avoid mangling names with namespaces.
disable_name_namespacing: bool,
/// True if we should avoid generating nested struct names.
disable_nested_struct_naming: bool,
/// True if we should avoid embedding version identifiers into source code.
disable_header_comment: bool,
/// True if we should generate layout tests for generated structures.
layout_tests: bool,
/// True if we should implement the Debug trait for C/C++ structures and types
/// that do not support automatically deriving Debug.
impl_debug: bool,
/// True if we should implement the PartialEq trait for C/C++ structures and types
/// that do not support automatically deriving PartialEq.
impl_partialeq: bool,
/// True if we should derive Copy trait implementations for C/C++ structures
/// and types.
derive_copy: bool,
/// True if we should derive Debug trait implementations for C/C++ structures
/// and types.
derive_debug: bool,
/// True if we should derive Default trait implementations for C/C++ structures
/// and types.
derive_default: bool,
/// True if we should derive Hash trait implementations for C/C++ structures
/// and types.
derive_hash: bool,
/// True if we should derive PartialOrd trait implementations for C/C++ structures
/// and types.
derive_partialord: bool,
/// True if we should derive Ord trait implementations for C/C++ structures
/// and types.
derive_ord: bool,
/// True if we should derive PartialEq trait implementations for C/C++ structures
/// and types.
derive_partialeq: bool,
/// True if we should derive Eq trait implementations for C/C++ structures
/// and types.
derive_eq: bool,
/// True if we should avoid using libstd to use libcore instead.
use_core: bool,
/// An optional prefix for the "raw" types, like `c_int`, `c_void`...
ctypes_prefix: Option<String>,
/// The prefix for the anon fields.
anon_fields_prefix: String,
/// Whether to time the bindgen phases.
time_phases: bool,
/// True if we should generate constant names that are **directly** under
/// namespaces.
namespaced_constants: bool,
/// True if we should use MSVC name mangling rules.
msvc_mangling: bool,
/// Whether we should convert float types to f32/f64 types.
convert_floats: bool,
/// The set of raw lines to prepend to the top-level module of generated
/// Rust code.
raw_lines: Vec<String>,
/// The set of raw lines to prepend to each of the modules.
///
/// This only makes sense if the `enable_cxx_namespaces` option is set.
module_lines: HashMap<String, Vec<String>>,
/// The set of arguments to pass straight through to Clang.
clang_args: Vec<String>,
/// The input header file.
input_header: Option<String>,
/// Any additional input header files.
extra_input_headers: Vec<String>,
/// Unsaved files for input.
input_unsaved_files: Vec<clang::UnsavedFile>,
/// A user-provided visitor to allow customizing different kinds of
/// situations.
parse_callbacks: Option<Box<dyn callbacks::ParseCallbacks>>,
/// Which kind of items should we generate? By default, we'll generate all
/// of them.
codegen_config: CodegenConfig,
/// Whether to treat inline namespaces conservatively.
///
/// See the builder method description for more details.
conservative_inline_namespaces: bool,
/// Whether to keep documentation comments in the generated output. See the
/// documentation for more details. Defaults to true.
generate_comments: bool,
/// Whether to generate inline functions. Defaults to false.
generate_inline_functions: bool,
/// Whether to allowlist types recursively. Defaults to true.
allowlist_recursively: bool,
/// Instead of emitting 'use objc;' to files generated from objective c files,
/// generate '#[macro_use] extern crate objc;'
objc_extern_crate: bool,
/// Instead of emitting 'use block;' to files generated from objective c files,
/// generate '#[macro_use] extern crate block;'
generate_block: bool,
/// Instead of emitting 'use block;' to files generated from objective c files,
/// generate '#[macro_use] extern crate block;'
block_extern_crate: bool,
/// Whether to use the clang-provided name mangling. This is true and
/// probably needed for C++ features.
///
/// However, some old libclang versions seem to return incorrect results in
/// some cases for non-mangled functions, see [1], so we allow disabling it.
///
/// [1]: https://github.com/rust-lang/rust-bindgen/issues/528
enable_mangling: bool,
/// Whether to detect include paths using clang_sys.
detect_include_paths: bool,
/// Whether to try to fit macro constants into types smaller than u32/i32
fit_macro_constants: bool,
/// Whether to prepend the enum name to constant or newtype variants.
prepend_enum_name: bool,
/// Version of the Rust compiler to target
rust_target: RustTarget,
/// Features to enable, derived from `rust_target`
rust_features: RustFeatures,
/// Whether we should record which items in the regex sets ever matched.
///
/// This may be a bit slower, but will enable reporting of unused allowlist
/// items via the `error!` log.
record_matches: bool,
/// Whether `size_t` should be translated to `usize` automatically.
size_t_is_usize: bool,
/// Whether rustfmt should format the generated bindings.
rustfmt_bindings: bool,
/// The absolute path to the rustfmt configuration file, if None, the standard rustfmt
/// options are used.
rustfmt_configuration_file: Option<PathBuf>,
/// The set of types that we should not derive `PartialEq` for.
no_partialeq_types: RegexSet,
/// The set of types that we should not derive `Copy` for.
no_copy_types: RegexSet,
/// The set of types that we should not derive `Debug` for.
no_debug_types: RegexSet,
/// The set of types that we should not derive/impl `Default` for.
no_default_types: RegexSet,
/// The set of types that we should not derive `Hash` for.
no_hash_types: RegexSet,
/// The set of types that we should be annotated with `#[must_use]`.
must_use_types: RegexSet,
/// Decide if C arrays should be regular pointers in rust or array pointers
array_pointers_in_arguments: bool,
/// Wasm import module name.
wasm_import_module_name: Option<String>,
/// The name of the dynamic library (if we are generating bindings for a shared library). If
/// this is None, no dynamic bindings are created.
dynamic_library_name: Option<String>,
/// Require successful linkage for all routines in a shared library.
/// This allows us to optimize function calls by being able to safely assume function pointers
/// are valid. No effect if `dynamic_library_name` is None.
dynamic_link_require_all: bool,
/// Only make generated bindings `pub` if the items would be publically accessible
/// by C++.
respect_cxx_access_specs: bool,
/// Always translate enum integer types to native Rust integer types.
translate_enum_integer_types: bool,
/// Generate types with C style naming.
c_naming: bool,
/// Always output explicit padding fields
force_explicit_padding: bool,
}
/// TODO(emilio): This is sort of a lie (see the error message that results from
/// removing this), but since we don't share references across panic boundaries
/// it's ok.
impl ::std::panic::UnwindSafe for BindgenOptions {}
impl BindgenOptions {
fn build(&mut self) {
let mut regex_sets = [
&mut self.allowlisted_vars,
&mut self.allowlisted_types,
&mut self.allowlisted_functions,
&mut self.blocklisted_types,
&mut self.blocklisted_functions,
&mut self.blocklisted_items,
&mut self.opaque_types,
&mut self.bitfield_enums,
&mut self.constified_enums,
&mut self.constified_enum_modules,
&mut self.newtype_enums,
&mut self.rustified_enums,
&mut self.rustified_non_exhaustive_enums,
&mut self.type_alias,
&mut self.new_type_alias,
&mut self.new_type_alias_deref,
&mut self.no_partialeq_types,
&mut self.no_copy_types,
&mut self.no_debug_types,
&mut self.no_default_types,
&mut self.no_hash_types,
&mut self.must_use_types,
];
let record_matches = self.record_matches;
for regex_set in &mut regex_sets {
regex_set.build(record_matches);
}
}
/// Update rust target version
pub fn set_rust_target(&mut self, rust_target: RustTarget) {
self.rust_target = rust_target;
// Keep rust_features synced with rust_target
self.rust_features = rust_target.into();
}
/// Get features supported by target Rust version
pub fn rust_features(&self) -> RustFeatures {
self.rust_features
}
}
impl Default for BindgenOptions {
fn default() -> BindgenOptions {
let rust_target = RustTarget::default();
BindgenOptions {
rust_target,
rust_features: rust_target.into(),
blocklisted_types: Default::default(),
blocklisted_functions: Default::default(),
blocklisted_items: Default::default(),
opaque_types: Default::default(),
rustfmt_path: Default::default(),
depfile: Default::default(),
allowlisted_types: Default::default(),
allowlisted_functions: Default::default(),
allowlisted_vars: Default::default(),
default_enum_style: Default::default(),
bitfield_enums: Default::default(),
newtype_enums: Default::default(),
rustified_enums: Default::default(),
rustified_non_exhaustive_enums: Default::default(),
constified_enums: Default::default(),
constified_enum_modules: Default::default(),
default_macro_constant_type: Default::default(),
default_alias_style: Default::default(),
type_alias: Default::default(),
new_type_alias: Default::default(),
new_type_alias_deref: Default::default(),
builtins: false,
emit_ast: false,
emit_ir: false,
emit_ir_graphviz: None,
layout_tests: true,
impl_debug: false,
impl_partialeq: false,
derive_copy: true,
derive_debug: true,
derive_default: false,
derive_hash: false,
derive_partialord: false,
derive_ord: false,
derive_partialeq: false,
derive_eq: false,
enable_cxx_namespaces: false,
enable_function_attribute_detection: false,
disable_name_namespacing: false,
disable_nested_struct_naming: false,
disable_header_comment: false,
use_core: false,
ctypes_prefix: None,
anon_fields_prefix: DEFAULT_ANON_FIELDS_PREFIX.into(),
namespaced_constants: true,
msvc_mangling: false,
convert_floats: true,
raw_lines: vec![],
module_lines: HashMap::default(),
clang_args: vec![],
input_header: None,
extra_input_headers: vec![],
input_unsaved_files: vec![],
parse_callbacks: None,
codegen_config: CodegenConfig::all(),
conservative_inline_namespaces: false,
generate_comments: true,
generate_inline_functions: false,
allowlist_recursively: true,
generate_block: false,
objc_extern_crate: false,
block_extern_crate: false,
enable_mangling: true,
detect_include_paths: true,
fit_macro_constants: false,
prepend_enum_name: true,
time_phases: false,
record_matches: true,
rustfmt_bindings: true,
size_t_is_usize: false,
rustfmt_configuration_file: None,
no_partialeq_types: Default::default(),
no_copy_types: Default::default(),
no_debug_types: Default::default(),
no_default_types: Default::default(),
no_hash_types: Default::default(),
must_use_types: Default::default(),
array_pointers_in_arguments: false,
wasm_import_module_name: None,
dynamic_library_name: None,
dynamic_link_require_all: false,
respect_cxx_access_specs: false,
translate_enum_integer_types: false,
c_naming: false,
force_explicit_padding: false,
}
}
}
#[cfg(feature = "runtime")]
fn ensure_libclang_is_loaded() {
if clang_sys::is_loaded() {
return;
}
// XXX (issue #350): Ensure that our dynamically loaded `libclang`
// doesn't get dropped prematurely, nor is loaded multiple times
// across different threads.
lazy_static! {
static ref LIBCLANG: std::sync::Arc<clang_sys::SharedLibrary> = {
clang_sys::load().expect("Unable to find libclang");
clang_sys::get_library().expect(
"We just loaded libclang and it had better still be \
here!",
)
};
}
clang_sys::set_library(Some(LIBCLANG.clone()));
}
#[cfg(not(feature = "runtime"))]
fn ensure_libclang_is_loaded() {}
/// Generated Rust bindings.
#[derive(Debug)]
pub struct Bindings {
options: BindgenOptions,
module: proc_macro2::TokenStream,
}
pub(crate) const HOST_TARGET: &'static str =
include_str!(concat!(env!("OUT_DIR"), "/host-target.txt"));
// Some architecture triplets are different between rust and libclang, see #1211
// and duplicates.
fn rust_to_clang_target(rust_target: &str) -> String {
if rust_target.starts_with("aarch64-apple-") {
let mut clang_target = "arm64-apple-".to_owned();
clang_target.push_str(&rust_target["aarch64-apple-".len()..]);
return clang_target;
}
rust_target.to_owned()
}
/// Returns the effective target, and whether it was explicitly specified on the
/// clang flags.
fn find_effective_target(clang_args: &[String]) -> (String, bool) {
let mut args = clang_args.iter();
while let Some(opt) = args.next() {
if opt.starts_with("--target=") {
let mut split = opt.split('=');
split.next();
return (split.next().unwrap().to_owned(), true);
}
if opt == "-target" {
if let Some(target) = args.next() {
return (target.clone(), true);
}
}
}
// If we're running from a build script, try to find the cargo target.
if let Ok(t) = env::var("TARGET") {
return (rust_to_clang_target(&t), false);
}
(rust_to_clang_target(HOST_TARGET), false)
}
impl Bindings {
/// Generate bindings for the given options.
pub(crate) fn generate(
mut options: BindgenOptions,
) -> Result<Bindings, ()> {
ensure_libclang_is_loaded();
#[cfg(feature = "runtime")]
debug!(
"Generating bindings, libclang at {}",
clang_sys::get_library().unwrap().path().display()
);
#[cfg(not(feature = "runtime"))]
debug!("Generating bindings, libclang linked");
options.build();
let (effective_target, explicit_target) =
find_effective_target(&options.clang_args);
let is_host_build =
rust_to_clang_target(HOST_TARGET) == effective_target;
// NOTE: The is_host_build check wouldn't be sound normally in some
// cases if we were to call a binary (if you have a 32-bit clang and are
// building on a 64-bit system for example). But since we rely on
// opening libclang.so, it has to be the same architecture and thus the
// check is fine.
if !explicit_target && !is_host_build {
options
.clang_args
.insert(0, format!("--target={}", effective_target));
};
fn detect_include_paths(options: &mut BindgenOptions) {
if !options.detect_include_paths {
return;
}
// Filter out include paths and similar stuff, so we don't incorrectly
// promote them to `-isystem`.
let clang_args_for_clang_sys = {
let mut last_was_include_prefix = false;
options
.clang_args
.iter()
.filter(|arg| {
if last_was_include_prefix {
last_was_include_prefix = false;
return false;
}
let arg = &**arg;
// https://clang.llvm.org/docs/ClangCommandLineReference.html
// -isystem and -isystem-after are harmless.
if arg == "-I" || arg == "--include-directory" {
last_was_include_prefix = true;
return false;
}
if arg.starts_with("-I") ||
arg.starts_with("--include-directory=")
{
return false;
}
true
})
.cloned()
.collect::<Vec<_>>()
};
debug!(
"Trying to find clang with flags: {:?}",
clang_args_for_clang_sys
);
let clang = match clang_sys::support::Clang::find(
None,
&clang_args_for_clang_sys,
) {
None => return,
Some(clang) => clang,
};
debug!("Found clang: {:?}", clang);
// Whether we are working with C or C++ inputs.
let is_cpp = args_are_cpp(&options.clang_args) ||
options
.input_header
.as_ref()
.map_or(false, |i| file_is_cpp(&i));
let search_paths = if is_cpp {
clang.cpp_search_paths
} else {
clang.c_search_paths
};
if let Some(search_paths) = search_paths {
for path in search_paths.into_iter() {
if let Ok(path) = path.into_os_string().into_string() {
options.clang_args.push("-isystem".to_owned());
options.clang_args.push(path);
}
}
}
}
detect_include_paths(&mut options);
#[cfg(unix)]
fn can_read(perms: &std::fs::Permissions) -> bool {
use std::os::unix::fs::PermissionsExt;
perms.mode() & 0o444 > 0
}
#[cfg(not(unix))]
fn can_read(_: &std::fs::Permissions) -> bool {
true
}
if let Some(h) = options.input_header.as_ref() {
if let Ok(md) = std::fs::metadata(h) {
if md.is_dir() {
eprintln!("error: '{}' is a folder", h);
return Err(());
}
if !can_read(&md.permissions()) {
eprintln!(
"error: insufficient permissions to read '{}'",
h
);
return Err(());
}
options.clang_args.push(h.clone())
} else {
eprintln!("error: header '{}' does not exist.", h);
return Err(());
}
}
for (idx, f) in options.input_unsaved_files.iter().enumerate() {
if idx != 0 || options.input_header.is_some() {
options.clang_args.push("-include".to_owned());
}
options.clang_args.push(f.name.to_str().unwrap().to_owned())
}
debug!("Fixed-up options: {:?}", options);
let time_phases = options.time_phases;
let mut context = BindgenContext::new(options);
if is_host_build {
debug_assert_eq!(
context.target_pointer_size(),
std::mem::size_of::<*mut ()>(),
"{:?} {:?}",
effective_target,
HOST_TARGET
);
}
{
let _t = time::Timer::new("parse").with_output(time_phases);
parse(&mut context)?;
}
let (items, options) = codegen::codegen(context);
Ok(Bindings {
options,
module: quote! {
#( #items )*
},
})
}
/// Convert these bindings into source text (with raw lines prepended).
pub fn to_string(&self) -> String {
let mut bytes = vec![];
self.write(Box::new(&mut bytes) as Box<dyn Write>)
.expect("writing to a vec cannot fail");
String::from_utf8(bytes)
.expect("we should only write bindings that are valid utf-8")
}
/// Write these bindings as source text to a file.
pub fn write_to_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
let file = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(path.as_ref())?;
self.write(Box::new(file))?;
Ok(())
}
/// Write these bindings as source text to the given `Write`able.
pub fn write<'a>(&self, mut writer: Box<dyn Write + 'a>) -> io::Result<()> {
if !self.options.disable_header_comment {
let version = option_env!("CARGO_PKG_VERSION");
let header = format!(
"/* automatically generated by rust-bindgen {} */\n\n",
version.unwrap_or("(unknown version)")
);
writer.write_all(header.as_bytes())?;
}
for line in self.options.raw_lines.iter() {
writer.write_all(line.as_bytes())?;
writer.write_all("\n".as_bytes())?;
}
if !self.options.raw_lines.is_empty() {
writer.write_all("\n".as_bytes())?;
}
let bindings = self.module.to_string();
match self.rustfmt_generated_string(&bindings) {
Ok(rustfmt_bindings) => {
writer.write_all(rustfmt_bindings.as_bytes())?;
}
Err(err) => {
eprintln!(
"Failed to run rustfmt: {} (non-fatal, continuing)",
err
);
writer.write_all(bindings.as_bytes())?;
}
}
Ok(())
}
/// Gets the rustfmt path to rustfmt the generated bindings.
fn rustfmt_path<'a>(&'a self) -> io::Result<Cow<'a, PathBuf>> {
debug_assert!(self.options.rustfmt_bindings);
if let Some(ref p) = self.options.rustfmt_path {
return Ok(Cow::Borrowed(p));
}
if let Ok(rustfmt) = env::var("RUSTFMT") {
return Ok(Cow::Owned(rustfmt.into()));
}
#[cfg(feature = "which-rustfmt")]
match which::which("rustfmt") {
Ok(p) => Ok(Cow::Owned(p)),
Err(e) => {
Err(io::Error::new(io::ErrorKind::Other, format!("{}", e)))
}
}
#[cfg(not(feature = "which-rustfmt"))]
// No rustfmt binary was specified, so assume that the binary is called
// "rustfmt" and that it is in the user's PATH.
Ok(Cow::Owned("rustfmt".into()))
}
/// Checks if rustfmt_bindings is set and runs rustfmt on the string
fn rustfmt_generated_string<'a>(
&self,
source: &'a str,
) -> io::Result<Cow<'a, str>> {
let _t = time::Timer::new("rustfmt_generated_string")
.with_output(self.options.time_phases);
if !self.options.rustfmt_bindings {
return Ok(Cow::Borrowed(source));
}
let rustfmt = self.rustfmt_path()?;
let mut cmd = Command::new(&*rustfmt);
cmd.stdin(Stdio::piped()).stdout(Stdio::piped());
if let Some(path) = self
.options
.rustfmt_configuration_file
.as_ref()
.and_then(|f| f.to_str())
{
cmd.args(&["--config-path", path]);
}
let mut child = cmd.spawn()?;
let mut child_stdin = child.stdin.take().unwrap();
let mut child_stdout = child.stdout.take().unwrap();
let source = source.to_owned();
// Write to stdin in a new thread, so that we can read from stdout on this
// thread. This keeps the child from blocking on writing to its stdout which
// might block us from writing to its stdin.
let stdin_handle = ::std::thread::spawn(move || {
let _ = child_stdin.write_all(source.as_bytes());
source
});
let mut output = vec![];
io::copy(&mut child_stdout, &mut output)?;
let status = child.wait()?;
let source = stdin_handle.join().expect(
"The thread writing to rustfmt's stdin doesn't do \
anything that could panic",
);
match String::from_utf8(output) {
Ok(bindings) => match status.code() {
Some(0) => Ok(Cow::Owned(bindings)),
Some(2) => Err(io::Error::new(
io::ErrorKind::Other,
"Rustfmt parsing errors.".to_string(),
)),
Some(3) => {
warn!("Rustfmt could not format some lines.");
Ok(Cow::Owned(bindings))
}
_ => Err(io::Error::new(
io::ErrorKind::Other,
"Internal rustfmt error".to_string(),
)),
},
_ => Ok(Cow::Owned(source)),
}
}
}
/// Determines whether the given cursor is in any of the files matched by the
/// options.
fn filter_builtins(ctx: &BindgenContext, cursor: &clang::Cursor) -> bool {
ctx.options().builtins || !cursor.is_builtin()
}
/// Parse one `Item` from the Clang cursor.
fn parse_one(
ctx: &mut BindgenContext,
cursor: clang::Cursor,
parent: Option<ItemId>,
) -> clang_sys::CXChildVisitResult {
if !filter_builtins(ctx, &cursor) {
return CXChildVisit_Continue;
}
use clang_sys::CXChildVisit_Continue;
match Item::parse(cursor, parent, ctx) {
Ok(..) => {}
Err(ParseError::Continue) => {}
Err(ParseError::Recurse) => {
cursor.visit(|child| parse_one(ctx, child, parent));
}
}
CXChildVisit_Continue
}
/// Parse the Clang AST into our `Item` internal representation.
fn parse(context: &mut BindgenContext) -> Result<(), ()> {
use clang_sys::*;
let mut any_error = false;
for d in context.translation_unit().diags().iter() {
let msg = d.format();
let is_err = d.severity() >= CXDiagnostic_Error;
eprintln!("{}, err: {}", msg, is_err);
any_error |= is_err;
}
if any_error {
return Err(());
}
let cursor = context.translation_unit().cursor();
if context.options().emit_ast {
fn dump_if_not_builtin(cur: &clang::Cursor) -> CXChildVisitResult {
if !cur.is_builtin() {
clang::ast_dump(&cur, 0)
} else {
CXChildVisit_Continue
}
}
cursor.visit(|cur| dump_if_not_builtin(&cur));
}
let root = context.root_module();
context.with_module(root, |context| {
cursor.visit(|cursor| parse_one(context, cursor, None))
});
assert!(
context.current_module() == context.root_module(),
"How did this happen?"
);
Ok(())
}
/// Extracted Clang version data
#[derive(Debug)]
pub struct ClangVersion {
/// Major and minor semver, if parsing was successful
pub parsed: Option<(u32, u32)>,
/// full version string
pub full: String,
}
/// Get the major and the minor semver numbers of Clang's version
pub fn clang_version() -> ClangVersion {
ensure_libclang_is_loaded();
//Debian clang version 11.0.1-2
let raw_v: String = clang::extract_clang_version();
let split_v: Option<Vec<&str>> = raw_v
.split_whitespace()
.filter(|t| t.chars().next().map_or(false, |v| v.is_ascii_digit()))
.next()
.map(|v| v.split('.').collect());
match split_v {
Some(v) => {
if v.len() >= 2 {
let maybe_major = v[0].parse::<u32>();
let maybe_minor = v[1].parse::<u32>();
match (maybe_major, maybe_minor) {
(Ok(major), Ok(minor)) => {
return ClangVersion {
parsed: Some((major, minor)),
full: raw_v.clone(),
}
}
_ => {}
}
}
}
None => {}
};
ClangVersion {
parsed: None,
full: raw_v.clone(),
}
}
/// Looks for the env var `var_${TARGET}`, and falls back to just `var` when it is not found.
fn get_target_dependent_env_var(var: &str) -> Option<String> {
if let Ok(target) = env::var("TARGET") {
if let Ok(v) = env::var(&format!("{}_{}", var, target)) {
return Some(v);
}
if let Ok(v) =
env::var(&format!("{}_{}", var, target.replace("-", "_")))
{
return Some(v);
}
}
env::var(var).ok()
}
/// A ParseCallbacks implementation that will act on file includes by echoing a rerun-if-changed
/// line
///
/// When running in side a `build.rs` script, this can be used to make cargo invalidate the
/// generated bindings whenever any of the files included from the header change:
/// ```
/// use bindgen::builder;
/// let bindings = builder()
/// .header("path/to/input/header")
/// .parse_callbacks(Box::new(bindgen::CargoCallbacks))
/// .generate();
/// ```
#[derive(Debug)]
pub struct CargoCallbacks;
impl callbacks::ParseCallbacks for CargoCallbacks {
fn include_file(&self, filename: &str) {
println!("cargo:rerun-if-changed={}", filename);
}
}
/// Test command_line_flag function.
#[test]
fn commandline_flag_unit_test_function() {
//Test 1
let bindings = crate::builder();
let command_line_flags = bindings.command_line_flags();
let test_cases = vec![
"--rust-target",
"--no-derive-default",
"--generate",
"functions,types,vars,methods,constructors,destructors",
]
.iter()
.map(|&x| x.into())
.collect::<Vec<String>>();
assert!(test_cases
.iter()
.all(|ref x| command_line_flags.contains(x),));
//Test 2
let bindings = crate::builder()
.header("input_header")
.allowlist_type("Distinct_Type")
.allowlist_function("safe_function");
let command_line_flags = bindings.command_line_flags();
let test_cases = vec![
"--rust-target",
"input_header",
"--no-derive-default",
"--generate",
"functions,types,vars,methods,constructors,destructors",
"--allowlist-type",
"Distinct_Type",
"--allowlist-function",
"safe_function",
]
.iter()
.map(|&x| x.into())
.collect::<Vec<String>>();
println!("{:?}", command_line_flags);
assert!(test_cases
.iter()
.all(|ref x| command_line_flags.contains(x),));
}
#[test]
fn test_rust_to_clang_target() {
assert_eq!(rust_to_clang_target("aarch64-apple-ios"), "arm64-apple-ios");
}
| 34.228876 | 98 | 0.600884 |
6add0204bae7bf7ac9fbf967ddd566d9b93116c4 | 3,545 | //! Ternary search is similar to binary search except that it works on a function which decreases and
//! then increases. This implementation of ternary search returns the input value corresponding with
//! the minimum output value of the function you're searching on.
//!
//! - Time Complexity: O(log(high - low)).
//!
//! NOTE: You can also work with a function which increases and then decreases, simply negate your
use crate::utils::EPS;
/// Perform a ternary search on the interval low to high.
/// Remember that your function must be a continuous unimodal
/// function, this means a function which decreases then increases (U shape)
pub fn ternary_search<F: Fn(f64) -> f64>(mut low: f64, mut high: f64, function: F) -> f64 {
let mut best = f64::NAN;
loop {
let mid1 = (2. * low + high) / 3.;
let mid2 = (low + 2. * high) / 3.;
let res1 = function(mid1);
let res2 = function(mid2);
if res1 > res2 {
low = mid1;
} else {
high = mid2;
}
if !best.is_nan() && (best - mid1).abs() < EPS {
break;
}
best = mid1;
}
best
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_ternary_search() {
// Search for the lowest point on the function x^2 + 3x + 5
// using a ternary search on the interval [-100, +100]
let quadratic = |x| x * x + 3. * x + 5.;
// solve for frist derivate `2x + 3 == 0`; should be -1.5
let minimum = ternary_search(-100., 100., quadratic);
assert!((-1.5 - minimum).abs() < EPS);
}
}
pub mod discrete {
use crate::utils::EPS;
/// Find the index at which the value is the miminum in `values`. The `values` must
/// be a discrete and unimodal function with one and only one minimum.
pub fn ternary_search(values: &[f64], mut lo: usize, mut hi: usize) -> usize {
assert!(hi >= lo, "hi must be no less than lo");
loop {
match hi - lo {
0 => return lo,
1 => return if values[lo] < values[hi] { lo } else { hi },
2 => {
let (mut min_idx, mut min) = (lo, values[lo]);
let mut v = values[lo + 1];
if v < min {
min_idx = lo + 1;
min = v;
}
v = values[hi];
if v < min {
min_idx = hi;
}
return min_idx;
}
_ => {
let mid1 = (2 * lo + hi) / 3;
let mid2 = (lo + 2 * hi) / 3;
let res1 = values[mid1];
let res2 = values[mid2];
if (res1 - res2).abs() < EPS {
lo = mid1;
hi = mid2;
} else if res1 > res2 {
lo = mid1;
} else {
hi = mid2;
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_ternary_search_discrete() {
let values = [16., 12., 10., 3., 6., 7., 9., 10., 11., 12., 13., 17.];
let min_index = ternary_search(&values, 0, values.len() - 1);
let min_value = values[min_index];
assert_eq!(min_index, 3);
assert!((min_value - values[min_index]).abs() < EPS);
}
}
}
| 35.09901 | 101 | 0.467983 |
8a6541b399e381e8ff8e746978f26300e2f012b7 | 64,451 | //! A bunch of methods and structures more or less related to resolving imports.
use crate::diagnostics::Suggestion;
use crate::Determinacy::{self, *};
use crate::Namespace::{self, MacroNS, TypeNS};
use crate::{module_to_string, names_to_string};
use crate::{AmbiguityError, AmbiguityErrorMisc, AmbiguityKind};
use crate::{BindingKey, ModuleKind, ResolutionError, Resolver, Segment};
use crate::{CrateLint, Module, ModuleOrUniformRoot, ParentScope, PerNS, ScopeSet, Weak};
use crate::{NameBinding, NameBindingKind, PathResult, PrivacyError, ToNameBinding};
use rustc_ast::ast::NodeId;
use rustc_ast::unwrap_or;
use rustc_ast::util::lev_distance::find_best_match_for_name;
use rustc_ast_lowering::Resolver as ResolverAstLowering;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::ptr_key::PtrKey;
use rustc_errors::{pluralize, struct_span_err, Applicability};
use rustc_hir::def::{self, PartialRes};
use rustc_hir::def_id::DefId;
use rustc_middle::hir::exports::Export;
use rustc_middle::ty;
use rustc_middle::{bug, span_bug};
use rustc_session::lint::builtin::{PUB_USE_OF_PRIVATE_EXTERN_CRATE, UNUSED_IMPORTS};
use rustc_session::lint::BuiltinLintDiagnostics;
use rustc_session::DiagnosticMessageId;
use rustc_span::hygiene::ExpnId;
use rustc_span::symbol::{kw, Ident, Symbol};
use rustc_span::{MultiSpan, Span};
use log::*;
use std::cell::Cell;
use std::{mem, ptr};
type Res = def::Res<NodeId>;
/// Contains data for specific kinds of imports.
#[derive(Clone, Debug)]
pub enum ImportKind<'a> {
Single {
/// `source` in `use prefix::source as target`.
source: Ident,
/// `target` in `use prefix::source as target`.
target: Ident,
/// Bindings to which `source` refers to.
source_bindings: PerNS<Cell<Result<&'a NameBinding<'a>, Determinacy>>>,
/// Bindings introduced by `target`.
target_bindings: PerNS<Cell<Option<&'a NameBinding<'a>>>>,
/// `true` for `...::{self [as target]}` imports, `false` otherwise.
type_ns_only: bool,
/// Did this import result from a nested import? ie. `use foo::{bar, baz};`
nested: bool,
},
Glob {
is_prelude: bool,
max_vis: Cell<ty::Visibility>, // The visibility of the greatest re-export.
// n.b. `max_vis` is only used in `finalize_import` to check for re-export errors.
},
ExternCrate {
source: Option<Symbol>,
target: Ident,
},
MacroUse,
}
/// One import.
#[derive(Debug, Clone)]
crate struct Import<'a> {
pub kind: ImportKind<'a>,
/// The ID of the `extern crate`, `UseTree` etc that imported this `Import`.
///
/// In the case where the `Import` was expanded from a "nested" use tree,
/// this id is the ID of the leaf tree. For example:
///
/// ```ignore (pacify the mercilous tidy)
/// use foo::bar::{a, b}
/// ```
///
/// If this is the import for `foo::bar::a`, we would have the ID of the `UseTree`
/// for `a` in this field.
pub id: NodeId,
/// The `id` of the "root" use-kind -- this is always the same as
/// `id` except in the case of "nested" use trees, in which case
/// it will be the `id` of the root use tree. e.g., in the example
/// from `id`, this would be the ID of the `use foo::bar`
/// `UseTree` node.
pub root_id: NodeId,
/// Span of the entire use statement.
pub use_span: Span,
/// Span of the entire use statement with attributes.
pub use_span_with_attributes: Span,
/// Did the use statement have any attributes?
pub has_attributes: bool,
/// Span of this use tree.
pub span: Span,
/// Span of the *root* use tree (see `root_id`).
pub root_span: Span,
pub parent_scope: ParentScope<'a>,
pub module_path: Vec<Segment>,
/// The resolution of `module_path`.
pub imported_module: Cell<Option<ModuleOrUniformRoot<'a>>>,
pub vis: Cell<ty::Visibility>,
pub used: Cell<bool>,
}
impl<'a> Import<'a> {
pub fn is_glob(&self) -> bool {
match self.kind {
ImportKind::Glob { .. } => true,
_ => false,
}
}
pub fn is_nested(&self) -> bool {
match self.kind {
ImportKind::Single { nested, .. } => nested,
_ => false,
}
}
crate fn crate_lint(&self) -> CrateLint {
CrateLint::UsePath { root_id: self.root_id, root_span: self.root_span }
}
}
#[derive(Clone, Default, Debug)]
/// Records information about the resolution of a name in a namespace of a module.
pub struct NameResolution<'a> {
/// Single imports that may define the name in the namespace.
/// Imports are arena-allocated, so it's ok to use pointers as keys.
single_imports: FxHashSet<PtrKey<'a, Import<'a>>>,
/// The least shadowable known binding for this name, or None if there are no known bindings.
pub binding: Option<&'a NameBinding<'a>>,
shadowed_glob: Option<&'a NameBinding<'a>>,
}
impl<'a> NameResolution<'a> {
// Returns the binding for the name if it is known or None if it not known.
pub(crate) fn binding(&self) -> Option<&'a NameBinding<'a>> {
self.binding.and_then(|binding| {
if !binding.is_glob_import() || self.single_imports.is_empty() {
Some(binding)
} else {
None
}
})
}
crate fn add_single_import(&mut self, import: &'a Import<'a>) {
self.single_imports.insert(PtrKey(import));
}
}
impl<'a> Resolver<'a> {
crate fn resolve_ident_in_module_unadjusted(
&mut self,
module: ModuleOrUniformRoot<'a>,
ident: Ident,
ns: Namespace,
parent_scope: &ParentScope<'a>,
record_used: bool,
path_span: Span,
) -> Result<&'a NameBinding<'a>, Determinacy> {
self.resolve_ident_in_module_unadjusted_ext(
module,
ident,
ns,
parent_scope,
false,
record_used,
path_span,
)
.map_err(|(determinacy, _)| determinacy)
}
/// Attempts to resolve `ident` in namespaces `ns` of `module`.
/// Invariant: if `record_used` is `Some`, expansion and import resolution must be complete.
crate fn resolve_ident_in_module_unadjusted_ext(
&mut self,
module: ModuleOrUniformRoot<'a>,
ident: Ident,
ns: Namespace,
parent_scope: &ParentScope<'a>,
restricted_shadowing: bool,
record_used: bool,
path_span: Span,
) -> Result<&'a NameBinding<'a>, (Determinacy, Weak)> {
let module = match module {
ModuleOrUniformRoot::Module(module) => module,
ModuleOrUniformRoot::CrateRootAndExternPrelude => {
assert!(!restricted_shadowing);
let binding = self.early_resolve_ident_in_lexical_scope(
ident,
ScopeSet::AbsolutePath(ns),
parent_scope,
record_used,
record_used,
path_span,
);
return binding.map_err(|determinacy| (determinacy, Weak::No));
}
ModuleOrUniformRoot::ExternPrelude => {
assert!(!restricted_shadowing);
return if ns != TypeNS {
Err((Determined, Weak::No))
} else if let Some(binding) = self.extern_prelude_get(ident, !record_used) {
Ok(binding)
} else if !self.graph_root.unexpanded_invocations.borrow().is_empty() {
// Macro-expanded `extern crate` items can add names to extern prelude.
Err((Undetermined, Weak::No))
} else {
Err((Determined, Weak::No))
};
}
ModuleOrUniformRoot::CurrentScope => {
assert!(!restricted_shadowing);
if ns == TypeNS {
if ident.name == kw::Crate || ident.name == kw::DollarCrate {
let module = self.resolve_crate_root(ident);
let binding = (module, ty::Visibility::Public, module.span, ExpnId::root())
.to_name_binding(self.arenas);
return Ok(binding);
} else if ident.name == kw::Super || ident.name == kw::SelfLower {
// FIXME: Implement these with renaming requirements so that e.g.
// `use super;` doesn't work, but `use super as name;` does.
// Fall through here to get an error from `early_resolve_...`.
}
}
let scopes = ScopeSet::All(ns, true);
let binding = self.early_resolve_ident_in_lexical_scope(
ident,
scopes,
parent_scope,
record_used,
record_used,
path_span,
);
return binding.map_err(|determinacy| (determinacy, Weak::No));
}
};
let key = self.new_key(ident, ns);
let resolution =
self.resolution(module, key).try_borrow_mut().map_err(|_| (Determined, Weak::No))?; // This happens when there is a cycle of imports.
if let Some(binding) = resolution.binding {
if !restricted_shadowing && binding.expansion != ExpnId::root() {
if let NameBindingKind::Res(_, true) = binding.kind {
self.macro_expanded_macro_export_errors.insert((path_span, binding.span));
}
}
}
let check_usable = |this: &mut Self, binding: &'a NameBinding<'a>| {
if let Some(blacklisted_binding) = this.blacklisted_binding {
if ptr::eq(binding, blacklisted_binding) {
return Err((Determined, Weak::No));
}
}
// `extern crate` are always usable for backwards compatibility, see issue #37020,
// remove this together with `PUB_USE_OF_PRIVATE_EXTERN_CRATE`.
let usable = this.is_accessible_from(binding.vis, parent_scope.module)
|| binding.is_extern_crate();
if usable { Ok(binding) } else { Err((Determined, Weak::No)) }
};
if record_used {
return resolution
.binding
.and_then(|binding| {
// If the primary binding is blacklisted, search further and return the shadowed
// glob binding if it exists. What we really want here is having two separate
// scopes in a module - one for non-globs and one for globs, but until that's done
// use this hack to avoid inconsistent resolution ICEs during import validation.
if let Some(blacklisted_binding) = self.blacklisted_binding {
if ptr::eq(binding, blacklisted_binding) {
return resolution.shadowed_glob;
}
}
Some(binding)
})
.ok_or((Determined, Weak::No))
.and_then(|binding| {
if self.last_import_segment && check_usable(self, binding).is_err() {
Err((Determined, Weak::No))
} else {
self.record_use(ident, ns, binding, restricted_shadowing);
if let Some(shadowed_glob) = resolution.shadowed_glob {
// Forbid expanded shadowing to avoid time travel.
if restricted_shadowing
&& binding.expansion != ExpnId::root()
&& binding.res() != shadowed_glob.res()
{
self.ambiguity_errors.push(AmbiguityError {
kind: AmbiguityKind::GlobVsExpanded,
ident,
b1: binding,
b2: shadowed_glob,
misc1: AmbiguityErrorMisc::None,
misc2: AmbiguityErrorMisc::None,
});
}
}
if !(self.is_accessible_from(binding.vis, parent_scope.module) ||
// Remove this together with `PUB_USE_OF_PRIVATE_EXTERN_CRATE`
(self.last_import_segment && binding.is_extern_crate()))
{
self.privacy_errors.push(PrivacyError {
ident,
binding,
dedup_span: path_span,
});
}
Ok(binding)
}
});
}
// Items and single imports are not shadowable, if we have one, then it's determined.
if let Some(binding) = resolution.binding {
if !binding.is_glob_import() {
return check_usable(self, binding);
}
}
// --- From now on we either have a glob resolution or no resolution. ---
// Check if one of single imports can still define the name,
// if it can then our result is not determined and can be invalidated.
for single_import in &resolution.single_imports {
if !self.is_accessible_from(single_import.vis.get(), parent_scope.module) {
continue;
}
let module = unwrap_or!(
single_import.imported_module.get(),
return Err((Undetermined, Weak::No))
);
let ident = match single_import.kind {
ImportKind::Single { source, .. } => source,
_ => unreachable!(),
};
match self.resolve_ident_in_module(
module,
ident,
ns,
&single_import.parent_scope,
false,
path_span,
) {
Err(Determined) => continue,
Ok(binding)
if !self.is_accessible_from(binding.vis, single_import.parent_scope.module) =>
{
continue;
}
Ok(_) | Err(Undetermined) => return Err((Undetermined, Weak::No)),
}
}
// So we have a resolution that's from a glob import. This resolution is determined
// if it cannot be shadowed by some new item/import expanded from a macro.
// This happens either if there are no unexpanded macros, or expanded names cannot
// shadow globs (that happens in macro namespace or with restricted shadowing).
//
// Additionally, any macro in any module can plant names in the root module if it creates
// `macro_export` macros, so the root module effectively has unresolved invocations if any
// module has unresolved invocations.
// However, it causes resolution/expansion to stuck too often (#53144), so, to make
// progress, we have to ignore those potential unresolved invocations from other modules
// and prohibit access to macro-expanded `macro_export` macros instead (unless restricted
// shadowing is enabled, see `macro_expanded_macro_export_errors`).
let unexpanded_macros = !module.unexpanded_invocations.borrow().is_empty();
if let Some(binding) = resolution.binding {
if !unexpanded_macros || ns == MacroNS || restricted_shadowing {
return check_usable(self, binding);
} else {
return Err((Undetermined, Weak::No));
}
}
// --- From now on we have no resolution. ---
// Now we are in situation when new item/import can appear only from a glob or a macro
// expansion. With restricted shadowing names from globs and macro expansions cannot
// shadow names from outer scopes, so we can freely fallback from module search to search
// in outer scopes. For `early_resolve_ident_in_lexical_scope` to continue search in outer
// scopes we return `Undetermined` with `Weak::Yes`.
// Check if one of unexpanded macros can still define the name,
// if it can then our "no resolution" result is not determined and can be invalidated.
if unexpanded_macros {
return Err((Undetermined, Weak::Yes));
}
// Check if one of glob imports can still define the name,
// if it can then our "no resolution" result is not determined and can be invalidated.
for glob_import in module.globs.borrow().iter() {
if !self.is_accessible_from(glob_import.vis.get(), parent_scope.module) {
continue;
}
let module = match glob_import.imported_module.get() {
Some(ModuleOrUniformRoot::Module(module)) => module,
Some(_) => continue,
None => return Err((Undetermined, Weak::Yes)),
};
let tmp_parent_scope;
let (mut adjusted_parent_scope, mut ident) =
(parent_scope, ident.normalize_to_macros_2_0());
match ident.span.glob_adjust(module.expansion, glob_import.span) {
Some(Some(def)) => {
tmp_parent_scope =
ParentScope { module: self.macro_def_scope(def), ..*parent_scope };
adjusted_parent_scope = &tmp_parent_scope;
}
Some(None) => {}
None => continue,
};
let result = self.resolve_ident_in_module_unadjusted(
ModuleOrUniformRoot::Module(module),
ident,
ns,
adjusted_parent_scope,
false,
path_span,
);
match result {
Err(Determined) => continue,
Ok(binding)
if !self.is_accessible_from(binding.vis, glob_import.parent_scope.module) =>
{
continue;
}
Ok(_) | Err(Undetermined) => return Err((Undetermined, Weak::Yes)),
}
}
// No resolution and no one else can define the name - determinate error.
Err((Determined, Weak::No))
}
// Given a binding and an import that resolves to it,
// return the corresponding binding defined by the import.
crate fn import(
&self,
binding: &'a NameBinding<'a>,
import: &'a Import<'a>,
) -> &'a NameBinding<'a> {
let vis = if binding.pseudo_vis().is_at_least(import.vis.get(), self) ||
// cf. `PUB_USE_OF_PRIVATE_EXTERN_CRATE`
!import.is_glob() && binding.is_extern_crate()
{
import.vis.get()
} else {
binding.pseudo_vis()
};
if let ImportKind::Glob { ref max_vis, .. } = import.kind {
if vis == import.vis.get() || vis.is_at_least(max_vis.get(), self) {
max_vis.set(vis)
}
}
self.arenas.alloc_name_binding(NameBinding {
kind: NameBindingKind::Import { binding, import, used: Cell::new(false) },
ambiguity: None,
span: import.span,
vis,
expansion: import.parent_scope.expansion,
})
}
// Define the name or return the existing binding if there is a collision.
crate fn try_define(
&mut self,
module: Module<'a>,
key: BindingKey,
binding: &'a NameBinding<'a>,
) -> Result<(), &'a NameBinding<'a>> {
let res = binding.res();
self.check_reserved_macro_name(key.ident, res);
self.set_binding_parent_module(binding, module);
self.update_resolution(module, key, |this, resolution| {
if let Some(old_binding) = resolution.binding {
if res == Res::Err {
// Do not override real bindings with `Res::Err`s from error recovery.
return Ok(());
}
match (old_binding.is_glob_import(), binding.is_glob_import()) {
(true, true) => {
if res != old_binding.res() {
resolution.binding = Some(this.ambiguity(
AmbiguityKind::GlobVsGlob,
old_binding,
binding,
));
} else if !old_binding.vis.is_at_least(binding.vis, &*this) {
// We are glob-importing the same item but with greater visibility.
resolution.binding = Some(binding);
}
}
(old_glob @ true, false) | (old_glob @ false, true) => {
let (glob_binding, nonglob_binding) =
if old_glob { (old_binding, binding) } else { (binding, old_binding) };
if glob_binding.res() != nonglob_binding.res()
&& key.ns == MacroNS
&& nonglob_binding.expansion != ExpnId::root()
{
resolution.binding = Some(this.ambiguity(
AmbiguityKind::GlobVsExpanded,
nonglob_binding,
glob_binding,
));
} else {
resolution.binding = Some(nonglob_binding);
}
resolution.shadowed_glob = Some(glob_binding);
}
(false, false) => {
return Err(old_binding);
}
}
} else {
resolution.binding = Some(binding);
}
Ok(())
})
}
fn ambiguity(
&self,
kind: AmbiguityKind,
primary_binding: &'a NameBinding<'a>,
secondary_binding: &'a NameBinding<'a>,
) -> &'a NameBinding<'a> {
self.arenas.alloc_name_binding(NameBinding {
ambiguity: Some((secondary_binding, kind)),
..primary_binding.clone()
})
}
// Use `f` to mutate the resolution of the name in the module.
// If the resolution becomes a success, define it in the module's glob importers.
fn update_resolution<T, F>(&mut self, module: Module<'a>, key: BindingKey, f: F) -> T
where
F: FnOnce(&mut Resolver<'a>, &mut NameResolution<'a>) -> T,
{
// Ensure that `resolution` isn't borrowed when defining in the module's glob importers,
// during which the resolution might end up getting re-defined via a glob cycle.
let (binding, t) = {
let resolution = &mut *self.resolution(module, key).borrow_mut();
let old_binding = resolution.binding();
let t = f(self, resolution);
match resolution.binding() {
_ if old_binding.is_some() => return t,
None => return t,
Some(binding) => match old_binding {
Some(old_binding) if ptr::eq(old_binding, binding) => return t,
_ => (binding, t),
},
}
};
// Define `binding` in `module`s glob importers.
for import in module.glob_importers.borrow_mut().iter() {
let mut ident = key.ident;
let scope = match ident.span.reverse_glob_adjust(module.expansion, import.span) {
Some(Some(def)) => self.macro_def_scope(def),
Some(None) => import.parent_scope.module,
None => continue,
};
if self.is_accessible_from(binding.vis, scope) {
let imported_binding = self.import(binding, import);
let key = BindingKey { ident, ..key };
let _ = self.try_define(import.parent_scope.module, key, imported_binding);
}
}
t
}
// Define a "dummy" resolution containing a Res::Err as a placeholder for a
// failed resolution
fn import_dummy_binding(&mut self, import: &'a Import<'a>) {
if let ImportKind::Single { target, .. } = import.kind {
let dummy_binding = self.dummy_binding;
let dummy_binding = self.import(dummy_binding, import);
self.per_ns(|this, ns| {
let key = this.new_key(target, ns);
let _ = this.try_define(import.parent_scope.module, key, dummy_binding);
// Consider erroneous imports used to avoid duplicate diagnostics.
this.record_use(target, ns, dummy_binding, false);
});
}
}
}
/// An error that may be transformed into a diagnostic later. Used to combine multiple unresolved
/// import errors within the same use tree into a single diagnostic.
#[derive(Debug, Clone)]
struct UnresolvedImportError {
span: Span,
label: Option<String>,
note: Vec<String>,
suggestion: Option<Suggestion>,
}
pub struct ImportResolver<'a, 'b> {
pub r: &'a mut Resolver<'b>,
}
impl<'a, 'b> ty::DefIdTree for &'a ImportResolver<'a, 'b> {
fn parent(self, id: DefId) -> Option<DefId> {
self.r.parent(id)
}
}
impl<'a, 'b> ImportResolver<'a, 'b> {
// Import resolution
//
// This is a fixed-point algorithm. We resolve imports until our efforts
// are stymied by an unresolved import; then we bail out of the current
// module and continue. We terminate successfully once no more imports
// remain or unsuccessfully when no forward progress in resolving imports
// is made.
/// Resolves all imports for the crate. This method performs the fixed-
/// point iteration.
pub fn resolve_imports(&mut self) {
let mut prev_num_indeterminates = self.r.indeterminate_imports.len() + 1;
while self.r.indeterminate_imports.len() < prev_num_indeterminates {
prev_num_indeterminates = self.r.indeterminate_imports.len();
for import in mem::take(&mut self.r.indeterminate_imports) {
match self.resolve_import(&import) {
true => self.r.determined_imports.push(import),
false => self.r.indeterminate_imports.push(import),
}
}
}
}
pub fn finalize_imports(&mut self) {
for module in self.r.arenas.local_modules().iter() {
self.finalize_resolutions_in(module);
}
let mut seen_spans = FxHashSet::default();
let mut errors = vec![];
let mut prev_root_id: NodeId = NodeId::from_u32(0);
let determined_imports = mem::take(&mut self.r.determined_imports);
let indeterminate_imports = mem::take(&mut self.r.indeterminate_imports);
for (is_indeterminate, import) in determined_imports
.into_iter()
.map(|i| (false, i))
.chain(indeterminate_imports.into_iter().map(|i| (true, i)))
{
if let Some(err) = self.finalize_import(import) {
if let ImportKind::Single { source, ref source_bindings, .. } = import.kind {
if source.name == kw::SelfLower {
// Silence `unresolved import` error if E0429 is already emitted
if let Err(Determined) = source_bindings.value_ns.get() {
continue;
}
}
}
// If the error is a single failed import then create a "fake" import
// resolution for it so that later resolve stages won't complain.
self.r.import_dummy_binding(import);
if prev_root_id.as_u32() != 0
&& prev_root_id.as_u32() != import.root_id.as_u32()
&& !errors.is_empty()
{
// In the case of a new import line, throw a diagnostic message
// for the previous line.
self.throw_unresolved_import_error(errors, None);
errors = vec![];
}
if seen_spans.insert(err.span) {
let path = import_path_to_string(
&import.module_path.iter().map(|seg| seg.ident).collect::<Vec<_>>(),
&import.kind,
err.span,
);
errors.push((path, err));
prev_root_id = import.root_id;
}
} else if is_indeterminate {
// Consider erroneous imports used to avoid duplicate diagnostics.
self.r.used_imports.insert((import.id, TypeNS));
let path = import_path_to_string(
&import.module_path.iter().map(|seg| seg.ident).collect::<Vec<_>>(),
&import.kind,
import.span,
);
let err = UnresolvedImportError {
span: import.span,
label: None,
note: Vec::new(),
suggestion: None,
};
errors.push((path, err));
}
}
if !errors.is_empty() {
self.throw_unresolved_import_error(errors, None);
}
}
fn throw_unresolved_import_error(
&self,
errors: Vec<(String, UnresolvedImportError)>,
span: Option<MultiSpan>,
) {
/// Upper limit on the number of `span_label` messages.
const MAX_LABEL_COUNT: usize = 10;
let (span, msg) = if errors.is_empty() {
(span.unwrap(), "unresolved import".to_string())
} else {
let span = MultiSpan::from_spans(errors.iter().map(|(_, err)| err.span).collect());
let paths = errors.iter().map(|(path, _)| format!("`{}`", path)).collect::<Vec<_>>();
let msg = format!("unresolved import{} {}", pluralize!(paths.len()), paths.join(", "),);
(span, msg)
};
let mut diag = struct_span_err!(self.r.session, span, E0432, "{}", &msg);
if let Some((_, UnresolvedImportError { note, .. })) = errors.iter().last() {
for message in note {
diag.note(&message);
}
}
for (_, err) in errors.into_iter().take(MAX_LABEL_COUNT) {
if let Some(label) = err.label {
diag.span_label(err.span, label);
}
if let Some((suggestions, msg, applicability)) = err.suggestion {
diag.multipart_suggestion(&msg, suggestions, applicability);
}
}
diag.emit();
}
/// Attempts to resolve the given import, returning true if its resolution is determined.
/// If successful, the resolved bindings are written into the module.
fn resolve_import(&mut self, import: &'b Import<'b>) -> bool {
debug!(
"(resolving import for module) resolving import `{}::...` in `{}`",
Segment::names_to_string(&import.module_path),
module_to_string(import.parent_scope.module).unwrap_or_else(|| "???".to_string()),
);
let module = if let Some(module) = import.imported_module.get() {
module
} else {
// For better failure detection, pretend that the import will
// not define any names while resolving its module path.
let orig_vis = import.vis.replace(ty::Visibility::Invisible);
let path_res = self.r.resolve_path(
&import.module_path,
None,
&import.parent_scope,
false,
import.span,
import.crate_lint(),
);
import.vis.set(orig_vis);
match path_res {
PathResult::Module(module) => module,
PathResult::Indeterminate => return false,
PathResult::NonModule(..) | PathResult::Failed { .. } => return true,
}
};
import.imported_module.set(Some(module));
let (source, target, source_bindings, target_bindings, type_ns_only) = match import.kind {
ImportKind::Single {
source,
target,
ref source_bindings,
ref target_bindings,
type_ns_only,
..
} => (source, target, source_bindings, target_bindings, type_ns_only),
ImportKind::Glob { .. } => {
self.resolve_glob_import(import);
return true;
}
_ => unreachable!(),
};
let mut indeterminate = false;
self.r.per_ns(|this, ns| {
if !type_ns_only || ns == TypeNS {
if let Err(Undetermined) = source_bindings[ns].get() {
// For better failure detection, pretend that the import will
// not define any names while resolving its module path.
let orig_vis = import.vis.replace(ty::Visibility::Invisible);
let binding = this.resolve_ident_in_module(
module,
source,
ns,
&import.parent_scope,
false,
import.span,
);
import.vis.set(orig_vis);
source_bindings[ns].set(binding);
} else {
return;
};
let parent = import.parent_scope.module;
match source_bindings[ns].get() {
Err(Undetermined) => indeterminate = true,
// Don't update the resolution, because it was never added.
Err(Determined) if target.name == kw::Underscore => {}
Err(Determined) => {
let key = this.new_key(target, ns);
this.update_resolution(parent, key, |_, resolution| {
resolution.single_imports.remove(&PtrKey(import));
});
}
Ok(binding) if !binding.is_importable() => {
let msg = format!("`{}` is not directly importable", target);
struct_span_err!(this.session, import.span, E0253, "{}", &msg)
.span_label(import.span, "cannot be imported directly")
.emit();
// Do not import this illegal binding. Import a dummy binding and pretend
// everything is fine
this.import_dummy_binding(import);
}
Ok(binding) => {
let imported_binding = this.import(binding, import);
target_bindings[ns].set(Some(imported_binding));
this.define(parent, target, ns, imported_binding);
}
}
}
});
!indeterminate
}
/// Performs final import resolution, consistency checks and error reporting.
///
/// Optionally returns an unresolved import error. This error is buffered and used to
/// consolidate multiple unresolved import errors into a single diagnostic.
fn finalize_import(&mut self, import: &'b Import<'b>) -> Option<UnresolvedImportError> {
let orig_vis = import.vis.replace(ty::Visibility::Invisible);
let orig_blacklisted_binding = match &import.kind {
ImportKind::Single { target_bindings, .. } => {
Some(mem::replace(&mut self.r.blacklisted_binding, target_bindings[TypeNS].get()))
}
_ => None,
};
let prev_ambiguity_errors_len = self.r.ambiguity_errors.len();
let path_res = self.r.resolve_path(
&import.module_path,
None,
&import.parent_scope,
true,
import.span,
import.crate_lint(),
);
let no_ambiguity = self.r.ambiguity_errors.len() == prev_ambiguity_errors_len;
if let Some(orig_blacklisted_binding) = orig_blacklisted_binding {
self.r.blacklisted_binding = orig_blacklisted_binding;
}
import.vis.set(orig_vis);
if let PathResult::Failed { .. } | PathResult::NonModule(..) = path_res {
// Consider erroneous imports used to avoid duplicate diagnostics.
self.r.used_imports.insert((import.id, TypeNS));
}
let module = match path_res {
PathResult::Module(module) => {
// Consistency checks, analogous to `finalize_macro_resolutions`.
if let Some(initial_module) = import.imported_module.get() {
if !ModuleOrUniformRoot::same_def(module, initial_module) && no_ambiguity {
span_bug!(import.span, "inconsistent resolution for an import");
}
} else {
if self.r.privacy_errors.is_empty() {
let msg = "cannot determine resolution for the import";
let msg_note = "import resolution is stuck, try simplifying other imports";
self.r.session.struct_span_err(import.span, msg).note(msg_note).emit();
}
}
module
}
PathResult::Failed { is_error_from_last_segment: false, span, label, suggestion } => {
if no_ambiguity {
assert!(import.imported_module.get().is_none());
self.r
.report_error(span, ResolutionError::FailedToResolve { label, suggestion });
}
return None;
}
PathResult::Failed { is_error_from_last_segment: true, span, label, suggestion } => {
if no_ambiguity {
assert!(import.imported_module.get().is_none());
let err = match self.make_path_suggestion(
span,
import.module_path.clone(),
&import.parent_scope,
) {
Some((suggestion, note)) => UnresolvedImportError {
span,
label: None,
note,
suggestion: Some((
vec![(span, Segment::names_to_string(&suggestion))],
String::from("a similar path exists"),
Applicability::MaybeIncorrect,
)),
},
None => UnresolvedImportError {
span,
label: Some(label),
note: Vec::new(),
suggestion,
},
};
return Some(err);
}
return None;
}
PathResult::NonModule(path_res) if path_res.base_res() == Res::Err => {
if no_ambiguity {
assert!(import.imported_module.get().is_none());
}
// The error was already reported earlier.
return None;
}
PathResult::Indeterminate | PathResult::NonModule(..) => unreachable!(),
};
let (ident, target, source_bindings, target_bindings, type_ns_only) = match import.kind {
ImportKind::Single {
source,
target,
ref source_bindings,
ref target_bindings,
type_ns_only,
..
} => (source, target, source_bindings, target_bindings, type_ns_only),
ImportKind::Glob { is_prelude, ref max_vis } => {
if import.module_path.len() <= 1 {
// HACK(eddyb) `lint_if_path_starts_with_module` needs at least
// 2 segments, so the `resolve_path` above won't trigger it.
let mut full_path = import.module_path.clone();
full_path.push(Segment::from_ident(Ident::invalid()));
self.r.lint_if_path_starts_with_module(
import.crate_lint(),
&full_path,
import.span,
None,
);
}
if let ModuleOrUniformRoot::Module(module) = module {
if module.def_id() == import.parent_scope.module.def_id() {
// Importing a module into itself is not allowed.
return Some(UnresolvedImportError {
span: import.span,
label: Some(String::from("cannot glob-import a module into itself")),
note: Vec::new(),
suggestion: None,
});
}
}
if !is_prelude &&
max_vis.get() != ty::Visibility::Invisible && // Allow empty globs.
!max_vis.get().is_at_least(import.vis.get(), &*self)
{
let msg = "glob import doesn't reexport anything because no candidate is public enough";
self.r.lint_buffer.buffer_lint(UNUSED_IMPORTS, import.id, import.span, msg);
}
return None;
}
_ => unreachable!(),
};
let mut all_ns_err = true;
self.r.per_ns(|this, ns| {
if !type_ns_only || ns == TypeNS {
let orig_vis = import.vis.replace(ty::Visibility::Invisible);
let orig_blacklisted_binding =
mem::replace(&mut this.blacklisted_binding, target_bindings[ns].get());
let orig_last_import_segment = mem::replace(&mut this.last_import_segment, true);
let binding = this.resolve_ident_in_module(
module,
ident,
ns,
&import.parent_scope,
true,
import.span,
);
this.last_import_segment = orig_last_import_segment;
this.blacklisted_binding = orig_blacklisted_binding;
import.vis.set(orig_vis);
match binding {
Ok(binding) => {
// Consistency checks, analogous to `finalize_macro_resolutions`.
let initial_res = source_bindings[ns].get().map(|initial_binding| {
all_ns_err = false;
if let Some(target_binding) = target_bindings[ns].get() {
// Note that as_str() de-gensyms the Symbol
if target.name.as_str() == "_"
&& initial_binding.is_extern_crate()
&& !initial_binding.is_import()
{
this.record_use(
ident,
ns,
target_binding,
import.module_path.is_empty(),
);
}
}
initial_binding.res()
});
let res = binding.res();
if let Ok(initial_res) = initial_res {
if res != initial_res && this.ambiguity_errors.is_empty() {
span_bug!(import.span, "inconsistent resolution for an import");
}
} else {
if res != Res::Err
&& this.ambiguity_errors.is_empty()
&& this.privacy_errors.is_empty()
{
let msg = "cannot determine resolution for the import";
let msg_note =
"import resolution is stuck, try simplifying other imports";
this.session
.struct_span_err(import.span, msg)
.note(msg_note)
.emit();
}
}
}
Err(..) => {
// FIXME: This assert may fire if public glob is later shadowed by a private
// single import (see test `issue-55884-2.rs`). In theory single imports should
// always block globs, even if they are not yet resolved, so that this kind of
// self-inconsistent resolution never happens.
// Re-enable the assert when the issue is fixed.
// assert!(result[ns].get().is_err());
}
}
}
});
if all_ns_err {
let mut all_ns_failed = true;
self.r.per_ns(|this, ns| {
if !type_ns_only || ns == TypeNS {
let binding = this.resolve_ident_in_module(
module,
ident,
ns,
&import.parent_scope,
true,
import.span,
);
if binding.is_ok() {
all_ns_failed = false;
}
}
});
return if all_ns_failed {
let resolutions = match module {
ModuleOrUniformRoot::Module(module) => {
Some(self.r.resolutions(module).borrow())
}
_ => None,
};
let resolutions = resolutions.as_ref().into_iter().flat_map(|r| r.iter());
let names = resolutions.filter_map(|(BindingKey { ident: i, .. }, resolution)| {
if *i == ident {
return None;
} // Never suggest the same name
match *resolution.borrow() {
NameResolution { binding: Some(name_binding), .. } => {
match name_binding.kind {
NameBindingKind::Import { binding, .. } => {
match binding.kind {
// Never suggest the name that has binding error
// i.e., the name that cannot be previously resolved
NameBindingKind::Res(Res::Err, _) => None,
_ => Some(&i.name),
}
}
_ => Some(&i.name),
}
}
NameResolution { ref single_imports, .. } if single_imports.is_empty() => {
None
}
_ => Some(&i.name),
}
});
let lev_suggestion =
find_best_match_for_name(names, &ident.as_str(), None).map(|suggestion| {
(
vec![(ident.span, suggestion.to_string())],
String::from("a similar name exists in the module"),
Applicability::MaybeIncorrect,
)
});
let (suggestion, note) =
match self.check_for_module_export_macro(import, module, ident) {
Some((suggestion, note)) => (suggestion.or(lev_suggestion), note),
_ => (lev_suggestion, Vec::new()),
};
let label = match module {
ModuleOrUniformRoot::Module(module) => {
let module_str = module_to_string(module);
if let Some(module_str) = module_str {
format!("no `{}` in `{}`", ident, module_str)
} else {
format!("no `{}` in the root", ident)
}
}
_ => {
if !ident.is_path_segment_keyword() {
format!("no `{}` external crate", ident)
} else {
// HACK(eddyb) this shows up for `self` & `super`, which
// should work instead - for now keep the same error message.
format!("no `{}` in the root", ident)
}
}
};
Some(UnresolvedImportError {
span: import.span,
label: Some(label),
note,
suggestion,
})
} else {
// `resolve_ident_in_module` reported a privacy error.
self.r.import_dummy_binding(import);
None
};
}
let mut reexport_error = None;
let mut any_successful_reexport = false;
self.r.per_ns(|this, ns| {
if let Ok(binding) = source_bindings[ns].get() {
let vis = import.vis.get();
if !binding.pseudo_vis().is_at_least(vis, &*this) {
reexport_error = Some((ns, binding));
} else {
any_successful_reexport = true;
}
}
});
// All namespaces must be re-exported with extra visibility for an error to occur.
if !any_successful_reexport {
let (ns, binding) = reexport_error.unwrap();
if ns == TypeNS && binding.is_extern_crate() {
let msg = format!(
"extern crate `{}` is private, and cannot be \
re-exported (error E0365), consider declaring with \
`pub`",
ident
);
self.r.lint_buffer.buffer_lint(
PUB_USE_OF_PRIVATE_EXTERN_CRATE,
import.id,
import.span,
&msg,
);
} else if ns == TypeNS {
struct_span_err!(
self.r.session,
import.span,
E0365,
"`{}` is private, and cannot be re-exported",
ident
)
.span_label(import.span, format!("re-export of private `{}`", ident))
.note(&format!("consider declaring type or module `{}` with `pub`", ident))
.emit();
} else {
let msg = format!("`{}` is private, and cannot be re-exported", ident);
let note_msg =
format!("consider marking `{}` as `pub` in the imported module", ident,);
struct_span_err!(self.r.session, import.span, E0364, "{}", &msg)
.span_note(import.span, ¬e_msg)
.emit();
}
}
if import.module_path.len() <= 1 {
// HACK(eddyb) `lint_if_path_starts_with_module` needs at least
// 2 segments, so the `resolve_path` above won't trigger it.
let mut full_path = import.module_path.clone();
full_path.push(Segment::from_ident(ident));
self.r.per_ns(|this, ns| {
if let Ok(binding) = source_bindings[ns].get() {
this.lint_if_path_starts_with_module(
import.crate_lint(),
&full_path,
import.span,
Some(binding),
);
}
});
}
// Record what this import resolves to for later uses in documentation,
// this may resolve to either a value or a type, but for documentation
// purposes it's good enough to just favor one over the other.
self.r.per_ns(|this, ns| {
if let Ok(binding) = source_bindings[ns].get() {
this.import_res_map.entry(import.id).or_default()[ns] = Some(binding.res());
}
});
self.check_for_redundant_imports(ident, import, source_bindings, target_bindings, target);
debug!("(resolving single import) successfully resolved import");
None
}
fn check_for_redundant_imports(
&mut self,
ident: Ident,
import: &'b Import<'b>,
source_bindings: &PerNS<Cell<Result<&'b NameBinding<'b>, Determinacy>>>,
target_bindings: &PerNS<Cell<Option<&'b NameBinding<'b>>>>,
target: Ident,
) {
// Skip if the import was produced by a macro.
if import.parent_scope.expansion != ExpnId::root() {
return;
}
// Skip if we are inside a named module (in contrast to an anonymous
// module defined by a block).
if let ModuleKind::Def(..) = import.parent_scope.module.kind {
return;
}
let mut is_redundant = PerNS { value_ns: None, type_ns: None, macro_ns: None };
let mut redundant_span = PerNS { value_ns: None, type_ns: None, macro_ns: None };
self.r.per_ns(|this, ns| {
if let Ok(binding) = source_bindings[ns].get() {
if binding.res() == Res::Err {
return;
}
let orig_blacklisted_binding =
mem::replace(&mut this.blacklisted_binding, target_bindings[ns].get());
match this.early_resolve_ident_in_lexical_scope(
target,
ScopeSet::All(ns, false),
&import.parent_scope,
false,
false,
import.span,
) {
Ok(other_binding) => {
is_redundant[ns] = Some(
binding.res() == other_binding.res() && !other_binding.is_ambiguity(),
);
redundant_span[ns] = Some((other_binding.span, other_binding.is_import()));
}
Err(_) => is_redundant[ns] = Some(false),
}
this.blacklisted_binding = orig_blacklisted_binding;
}
});
if !is_redundant.is_empty() && is_redundant.present_items().all(|is_redundant| is_redundant)
{
let mut redundant_spans: Vec<_> = redundant_span.present_items().collect();
redundant_spans.sort();
redundant_spans.dedup();
self.r.lint_buffer.buffer_lint_with_diagnostic(
UNUSED_IMPORTS,
import.id,
import.span,
&format!("the item `{}` is imported redundantly", ident),
BuiltinLintDiagnostics::RedundantImport(redundant_spans, ident),
);
}
}
fn resolve_glob_import(&mut self, import: &'b Import<'b>) {
let module = match import.imported_module.get().unwrap() {
ModuleOrUniformRoot::Module(module) => module,
_ => {
self.r.session.span_err(import.span, "cannot glob-import all possible crates");
return;
}
};
if module.is_trait() {
self.r.session.span_err(import.span, "items in traits are not importable.");
return;
} else if module.def_id() == import.parent_scope.module.def_id() {
return;
} else if let ImportKind::Glob { is_prelude: true, .. } = import.kind {
self.r.prelude = Some(module);
return;
}
// Add to module's glob_importers
module.glob_importers.borrow_mut().push(import);
// Ensure that `resolutions` isn't borrowed during `try_define`,
// since it might get updated via a glob cycle.
let bindings = self
.r
.resolutions(module)
.borrow()
.iter()
.filter_map(|(key, resolution)| {
resolution.borrow().binding().map(|binding| (*key, binding))
})
.collect::<Vec<_>>();
for (mut key, binding) in bindings {
let scope = match key.ident.span.reverse_glob_adjust(module.expansion, import.span) {
Some(Some(def)) => self.r.macro_def_scope(def),
Some(None) => import.parent_scope.module,
None => continue,
};
if self.r.is_accessible_from(binding.pseudo_vis(), scope) {
let imported_binding = self.r.import(binding, import);
let _ = self.r.try_define(import.parent_scope.module, key, imported_binding);
}
}
// Record the destination of this import
self.r.record_partial_res(import.id, PartialRes::new(module.res().unwrap()));
}
// Miscellaneous post-processing, including recording re-exports,
// reporting conflicts, and reporting unresolved imports.
fn finalize_resolutions_in(&mut self, module: Module<'b>) {
// Since import resolution is finished, globs will not define any more names.
*module.globs.borrow_mut() = Vec::new();
let mut reexports = Vec::new();
module.for_each_child(self.r, |this, ident, ns, binding| {
// Filter away ambiguous imports and anything that has def-site
// hygiene.
// FIXME: Implement actual cross-crate hygiene.
let is_good_import =
binding.is_import() && !binding.is_ambiguity() && !ident.span.from_expansion();
if is_good_import || binding.is_macro_def() {
let res = binding.res().map_id(|id| this.local_def_id(id));
if res != def::Res::Err {
reexports.push(Export { ident, res, span: binding.span, vis: binding.vis });
}
}
if let NameBindingKind::Import { binding: orig_binding, import, .. } = binding.kind {
if ns == TypeNS
&& orig_binding.is_variant()
&& !orig_binding.vis.is_at_least(binding.vis, &*this)
{
let msg = match import.kind {
ImportKind::Single { .. } => {
format!("variant `{}` is private and cannot be re-exported", ident)
}
ImportKind::Glob { .. } => {
let msg = "enum is private and its variants \
cannot be re-exported"
.to_owned();
let error_id = (
DiagnosticMessageId::ErrorId(0), // no code?!
Some(binding.span),
msg.clone(),
);
let fresh =
this.session.one_time_diagnostics.borrow_mut().insert(error_id);
if !fresh {
return;
}
msg
}
ref s => bug!("unexpected import kind {:?}", s),
};
let mut err = this.session.struct_span_err(binding.span, &msg);
let imported_module = match import.imported_module.get() {
Some(ModuleOrUniformRoot::Module(module)) => module,
_ => bug!("module should exist"),
};
let parent_module = imported_module.parent.expect("parent should exist");
let resolutions = this.resolutions(parent_module).borrow();
let enum_path_segment_index = import.module_path.len() - 1;
let enum_ident = import.module_path[enum_path_segment_index].ident;
let key = this.new_key(enum_ident, TypeNS);
let enum_resolution = resolutions.get(&key).expect("resolution should exist");
let enum_span =
enum_resolution.borrow().binding.expect("binding should exist").span;
let enum_def_span = this.session.source_map().guess_head_span(enum_span);
let enum_def_snippet = this
.session
.source_map()
.span_to_snippet(enum_def_span)
.expect("snippet should exist");
// potentially need to strip extant `crate`/`pub(path)` for suggestion
let after_vis_index = enum_def_snippet
.find("enum")
.expect("`enum` keyword should exist in snippet");
let suggestion = format!("pub {}", &enum_def_snippet[after_vis_index..]);
this.session.diag_span_suggestion_once(
&mut err,
DiagnosticMessageId::ErrorId(0),
enum_def_span,
"consider making the enum public",
suggestion,
);
err.emit();
}
}
});
if !reexports.is_empty() {
if let Some(def_id) = module.def_id() {
// Call to `expect_local` should be fine because current
// code is only called for local modules.
self.r.export_map.insert(def_id.expect_local(), reexports);
}
}
}
}
fn import_path_to_string(names: &[Ident], import_kind: &ImportKind<'_>, span: Span) -> String {
let pos = names.iter().position(|p| span == p.span && p.name != kw::PathRoot);
let global = !names.is_empty() && names[0].name == kw::PathRoot;
if let Some(pos) = pos {
let names = if global { &names[1..pos + 1] } else { &names[..pos + 1] };
names_to_string(&names.iter().map(|ident| ident.name).collect::<Vec<_>>())
} else {
let names = if global { &names[1..] } else { names };
if names.is_empty() {
import_kind_to_string(import_kind)
} else {
format!(
"{}::{}",
names_to_string(&names.iter().map(|ident| ident.name).collect::<Vec<_>>()),
import_kind_to_string(import_kind),
)
}
}
}
fn import_kind_to_string(import_kind: &ImportKind<'_>) -> String {
match import_kind {
ImportKind::Single { source, .. } => source.to_string(),
ImportKind::Glob { .. } => "*".to_string(),
ImportKind::ExternCrate { .. } => "<extern crate>".to_string(),
ImportKind::MacroUse => "#[macro_use]".to_string(),
}
}
| 42.76775 | 145 | 0.500209 |
fe9c9e96fa58ad8d9139408487e178210c9810e7 | 3,613 | use derive_more::Deref;
use num_enum::TryFromPrimitive;
use std::ops::AddAssign;
use tap::Tap;
register!(
"input/day3.txt";
(input: input!(Bits)) -> u64 {
part1(&input);
part2(&mut input);
}
);
#[derive(Clone, Copy, Debug, TryFromPrimitive)]
#[repr(u8)]
pub enum Bit {
One = b'1',
Zero = b'0',
}
impl AddAssign<Bit> for isize {
fn add_assign(&mut self, rhs: Bit) {
*self += match rhs {
Bit::One => 1,
Bit::Zero => -1,
}
}
}
#[derive(Clone, Debug, Deref)]
pub struct Bits(Vec<Bit>);
impl FromIterator<Bit> for Bits {
fn from_iter<T: IntoIterator<Item = Bit>>(iter: T) -> Self {
Self(iter.into_iter().collect())
}
}
impl From<String> for Bits {
fn from(s: String) -> Self {
s.bytes().map(|b| Bit::try_from(b).unwrap()).collect()
}
}
impl<'a> TryFrom<&'a Bits> for u64 {
type Error = std::num::ParseIntError;
fn try_from(bits: &'a Bits) -> Result<Self, Self::Error> {
let bits = bits
.iter()
.map(|&b| char::from(b as u8))
.collect::<String>();
Self::from_str_radix(&bits, 2)
}
}
fn part1(items: &[Bits]) -> u64 {
let mut ones = vec![0_isize];
for bits in items {
for (pos, &bit) in bits.iter().enumerate() {
if pos >= ones.len() {
ones.resize(pos + 1, 0);
}
ones[pos] += bit;
}
}
let bits = ones
.into_iter()
.map(|count| if count > 0 { Bit::One } else { Bit::Zero })
.collect::<Bits>();
let significant_bits = bits.len();
let gamma = u64::try_from(&bits).unwrap();
let epsilon = (!gamma) & ((1 << significant_bits) - 1);
gamma * epsilon
}
fn part2(items: &mut [Bits]) -> u64 {
let o2 = find_rating_2(items, |count| count >= 0);
let co2 = find_rating_2(items, |count| count < 0);
o2 * co2
}
fn find_rating_2(mut items: &mut [Bits], select: impl Fn(isize) -> bool) -> u64 {
for pos in 0.. {
if let [result] = items {
return u64::try_from(&*result).unwrap();
}
let count = items
.iter()
.fold(0_isize, |count, bits| count.tap_mut(|c| *c += bits[pos]));
let number_of_ones = items
.iter_mut()
.partition_in_place(|n| matches!(n[pos], Bit::One));
let (ones, zeroes) = items.split_at_mut(number_of_ones);
items = if select(count) { ones } else { zeroes };
}
unreachable!()
}
#[cfg(test)]
mod tests {
use super::*;
use aoc::{Solution, SolutionExt};
use test::Bencher;
#[test]
fn test_ex() {
let items = r#"
00100
11110
10110
10111
10101
01111
00111
11100
10000
11001
00010
01010
"#;
let (res1, res2) = Solver::run_on(items);
assert_eq!(res1, 198);
assert_eq!(res2, 230);
}
#[test]
fn test() {
let (res1, res2) = Solver::run_on_input();
assert_eq!(res1, 4_160_394);
assert_eq!(res2, 4_125_600);
}
#[bench]
fn bench_parsing(b: &mut Bencher) {
let input = Solver::puzzle_input();
b.bytes = input.len() as u64;
b.iter(|| Solver::parse_input(input));
}
#[bench]
fn bench_pt1(b: &mut Bencher) {
let input = Solver::parse_input(Solver::puzzle_input());
b.iter(|| part1(&input));
}
#[bench]
fn bench_pt2(b: &mut Bencher) {
let input = Solver::parse_input(Solver::puzzle_input());
b.iter(|| {
let mut input = input.clone();
part2(&mut input)
});
}
}
| 21.634731 | 81 | 0.529477 |
0eccad1549cc1b07af40e68fe2d37ffc3e3138e6 | 7,034 | // Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate cgmath;
extern crate genmesh;
#[macro_use]
extern crate gfx;
extern crate gfx_app;
extern crate noise;
extern crate rand;
pub use gfx::format::{DepthStencil};
pub use gfx_app::{ColorFormat, DepthFormat};
use cgmath::{Deg, Matrix4, Point3, SquareMatrix, Vector3};
use genmesh::Vertices;
use genmesh::generators::{Plane, SharedVertex, IndexedPolygon};
use noise::{Seed, perlin2};
use rand::Rng;
use std::time::{Instant};
gfx_defines!{
vertex Vertex {
pos: [f32; 3] = "a_Pos",
color: [f32; 3] = "a_Color",
}
constant Locals {
model: [[f32; 4]; 4] = "u_Model",
view: [[f32; 4]; 4] = "u_View",
proj: [[f32; 4]; 4] = "u_Proj",
}
pipeline pipe {
vbuf: gfx::VertexBuffer<Vertex> = (),
locals: gfx::ConstantBuffer<Locals> = "Locals",
model: gfx::Global<[[f32; 4]; 4]> = "u_Model",
view: gfx::Global<[[f32; 4]; 4]> = "u_View",
proj: gfx::Global<[[f32; 4]; 4]> = "u_Proj",
out_color: gfx::RenderTarget<ColorFormat> = "Target0",
out_depth: gfx::DepthTarget<DepthFormat> =
gfx::preset::depth::LESS_EQUAL_WRITE,
}
}
fn calculate_color(height: f32) -> [f32; 3] {
if height > 8.0 {
[0.9, 0.9, 0.9] // white
} else if height > 0.0 {
[0.7, 0.7, 0.7] // greay
} else if height > -5.0 {
[0.2, 0.7, 0.2] // green
} else {
[0.2, 0.2, 0.7] // blue
}
}
struct App<R: gfx::Resources> {
pso: gfx::PipelineState<R, pipe::Meta>,
data: pipe::Data<R>,
slice: gfx::Slice<R>,
start_time: Instant,
}
impl<R: gfx::Resources> gfx_app::Application<R> for App<R> {
fn new<F: gfx::Factory<R>>(factory: &mut F, backend: gfx_app::shade::Backend,
window_targets: gfx_app::WindowTargets<R>) -> Self {
use gfx::traits::FactoryExt;
let vs = gfx_app::shade::Source {
glsl_150: include_bytes!("shader/terrain.glslv"), //v = vertex
hlsl_40: include_bytes!("data/vertex.fx"),
msl_11: include_bytes!("shader/terrain_vertex.metal"),
vulkan: include_bytes!("data/vert.spv"),
.. gfx_app::shade::Source::empty()
};
let hs = gfx_app::shade::Source {
glsl_150: include_bytes!("shader/terrain.glslc"), //c = tess control
hlsl_50: include_bytes!("data/hull.fx"),
msl_11: include_bytes!("shader/terrain_frag.metal"),
vulkan: include_bytes!("data/frag.spv"),
.. gfx_app::shade::Source::empty()
};
let ds = gfx_app::shade::Source {
glsl_150: include_bytes!("shader/terrain.glsle"), //e = tess evaluation
hlsl_50: include_bytes!("data/domain.fx"),
msl_11: include_bytes!("shader/terrain_frag.metal"),
vulkan: include_bytes!("data/frag.spv"),
.. gfx_app::shade::Source::empty()
};
let ps = gfx_app::shade::Source {
glsl_150: include_bytes!("shader/terrain.glslf"), //f = fragment
hlsl_40: include_bytes!("data/pixel.fx"),
msl_11: include_bytes!("shader/terrain_frag.metal"),
vulkan: include_bytes!("data/frag.spv"),
.. gfx_app::shade::Source::empty()
};
let rand_seed = rand::thread_rng().gen();
let seed = Seed::new(rand_seed);
let plane = Plane::subdivide(16, 16);
let vertex_data: Vec<Vertex> = plane.shared_vertex_iter()
.map(|(x, y)| {
let h = perlin2(&seed, &[x, y]) * 32.0;
Vertex {
pos: [25.0 * x, 25.0 * y, h],
color: calculate_color(h),
}
})
.collect();
let index_data: Vec<u32> = plane.indexed_polygon_iter()
//.triangulate() NO NEED THESE ARE QUAD PATCHES
.vertices()
.map(|i| i as u32)
.collect();
let (vbuf, slice) = factory.create_vertex_buffer_with_slice(&vertex_data, &index_data[..]);
let set = factory.create_shader_set_tessellation(
&vs.select(backend).unwrap(),
&hs.select(backend).unwrap(),
&ds.select(backend).unwrap(),
&ps.select(backend).unwrap()
).unwrap();
let mut fillmode = gfx::state::Rasterizer::new_fill();
fillmode.method = gfx::state::RasterMethod::Line(1);
App {
pso: factory.create_pipeline_state(&set,
gfx::Primitive::PatchList(4), fillmode, pipe::new()).unwrap(),
data: pipe::Data {
vbuf: vbuf,
locals: factory.create_constant_buffer(1),
model: Matrix4::identity().into(),
view: Matrix4::identity().into(),
proj: cgmath::perspective(
Deg(60.0f32), window_targets.aspect_ratio, 0.1, 1000.0
).into(),
out_color: window_targets.color,
out_depth: window_targets.depth,
},
slice: slice,
start_time: Instant::now(),
}
}
fn render<C: gfx::CommandBuffer<R>>(&mut self, encoder: &mut gfx::Encoder<R, C>) {
let elapsed = self.start_time.elapsed();
let time = elapsed.as_secs() as f32 + elapsed.subsec_nanos() as f32 / 1000_000_000.0;
let x = time.sin();
let y = time.cos();
let view = Matrix4::look_at(
Point3::new(x * 32.0, y * 32.0, 16.0),
Point3::new(0.0, 0.0, 0.0),
Vector3::unit_z(),
);
self.data.view = view.into();
let locals = Locals {
model: self.data.model,
view: self.data.view,
proj: self.data.proj,
};
encoder.update_buffer(&self.data.locals, &[locals], 0).unwrap();
encoder.clear(&self.data.out_color, [0.3, 0.3, 0.3, 1.0]);
encoder.clear_depth(&self.data.out_depth, 1.0);
encoder.draw(&self.slice, &self.pso, &self.data);
}
fn on_resize(&mut self, window_targets: gfx_app::WindowTargets<R>) {
self.data.out_color = window_targets.color;
self.data.out_depth = window_targets.depth;
self.data.proj = cgmath::perspective(
Deg(60.0f32), window_targets.aspect_ratio, 0.1, 1000.0
).into();
}
}
pub fn main() {
use gfx_app::Application;
App::launch_simple("Terrain tessellation example");
}
| 36.257732 | 99 | 0.565254 |
61dbc78fd67ec53f4ce39206d724dcd4097f5385 | 10,012 | use crate::asset::{format_lp_token_name, Asset, AssetInfo, PairInfo};
use crate::mock_querier::mock_dependencies;
use crate::querier::{
query_all_balances, query_balance, query_pair_info, query_supply, query_token_balance,
};
use crate::factory::PairType;
use crate::DecimalCheckedOps;
use cosmwasm_std::testing::MOCK_CONTRACT_ADDR;
use cosmwasm_std::{to_binary, Addr, BankMsg, Coin, CosmosMsg, Decimal, Uint128, WasmMsg};
use cw20::Cw20ExecuteMsg;
#[test]
fn token_balance_querier() {
let mut deps = mock_dependencies(&[]);
deps.querier.with_token_balances(&[(
&String::from("liquidity0000"),
&[(&String::from(MOCK_CONTRACT_ADDR), &Uint128::new(123u128))],
)]);
deps.querier.with_cw20_query_handler();
assert_eq!(
Uint128::new(123u128),
query_token_balance(
&deps.as_ref().querier,
Addr::unchecked("liquidity0000"),
Addr::unchecked(MOCK_CONTRACT_ADDR),
)
.unwrap()
);
deps.querier.with_default_query_handler()
}
#[test]
fn balance_querier() {
let deps = mock_dependencies(&[Coin {
denom: "uusd".to_string(),
amount: Uint128::new(200u128),
}]);
assert_eq!(
query_balance(
&deps.as_ref().querier,
Addr::unchecked(MOCK_CONTRACT_ADDR),
"uusd".to_string()
)
.unwrap(),
Uint128::new(200u128)
);
}
#[test]
fn all_balances_querier() {
let deps = mock_dependencies(&[
Coin {
denom: "uusd".to_string(),
amount: Uint128::new(200u128),
},
Coin {
denom: "ukrw".to_string(),
amount: Uint128::new(300u128),
},
]);
assert_eq!(
query_all_balances(&deps.as_ref().querier, Addr::unchecked(MOCK_CONTRACT_ADDR),).unwrap(),
vec![
Coin {
denom: "uusd".to_string(),
amount: Uint128::new(200u128),
},
Coin {
denom: "ukrw".to_string(),
amount: Uint128::new(300u128),
}
]
);
}
#[test]
fn supply_querier() {
let mut deps = mock_dependencies(&[]);
deps.querier.with_token_balances(&[(
&String::from("liquidity0000"),
&[
(&String::from(MOCK_CONTRACT_ADDR), &Uint128::new(123u128)),
(&String::from("addr00000"), &Uint128::new(123u128)),
(&String::from("addr00001"), &Uint128::new(123u128)),
(&String::from("addr00002"), &Uint128::new(123u128)),
],
)]);
deps.querier.with_cw20_query_handler();
assert_eq!(
query_supply(&deps.as_ref().querier, Addr::unchecked("liquidity0000")).unwrap(),
Uint128::new(492u128)
)
}
#[test]
fn test_asset_info() {
let token_info: AssetInfo = AssetInfo::Token {
contract_addr: Addr::unchecked("asset0000"),
};
let native_token_info: AssetInfo = AssetInfo::NativeToken {
denom: "uusd".to_string(),
};
assert_eq!(false, token_info.equal(&native_token_info));
assert_eq!(
false,
token_info.equal(&AssetInfo::Token {
contract_addr: Addr::unchecked("asset0001"),
})
);
assert_eq!(
true,
token_info.equal(&AssetInfo::Token {
contract_addr: Addr::unchecked("asset0000"),
})
);
assert_eq!(true, native_token_info.is_native_token());
assert_eq!(false, token_info.is_native_token());
let mut deps = mock_dependencies(&[Coin {
denom: "uusd".to_string(),
amount: Uint128::new(123),
}]);
deps.querier.with_token_balances(&[(
&String::from("asset0000"),
&[
(&String::from(MOCK_CONTRACT_ADDR), &Uint128::new(123u128)),
(&String::from("addr00000"), &Uint128::new(123u128)),
(&String::from("addr00001"), &Uint128::new(123u128)),
(&String::from("addr00002"), &Uint128::new(123u128)),
],
)]);
assert_eq!(
native_token_info
.query_pool(&deps.as_ref().querier, Addr::unchecked(MOCK_CONTRACT_ADDR))
.unwrap(),
Uint128::new(123u128)
);
deps.querier.with_cw20_query_handler();
assert_eq!(
token_info
.query_pool(&deps.as_ref().querier, Addr::unchecked(MOCK_CONTRACT_ADDR))
.unwrap(),
Uint128::new(123u128)
);
}
#[test]
fn test_asset() {
let mut deps = mock_dependencies(&[Coin {
denom: "uusd".to_string(),
amount: Uint128::new(123),
}]);
deps.querier.with_token_balances(&[(
&String::from("asset0000"),
&[
(&String::from(MOCK_CONTRACT_ADDR), &Uint128::new(123u128)),
(&String::from("addr00000"), &Uint128::new(123u128)),
(&String::from("addr00001"), &Uint128::new(123u128)),
(&String::from("addr00002"), &Uint128::new(123u128)),
],
)]);
deps.querier.with_tax(
Decimal::percent(1),
&[(&"uusd".to_string(), &Uint128::new(1000000u128))],
);
let token_asset = Asset {
amount: Uint128::new(123123u128),
info: AssetInfo::Token {
contract_addr: Addr::unchecked("asset0000"),
},
};
let native_token_asset = Asset {
amount: Uint128::new(123123u128),
info: AssetInfo::NativeToken {
denom: "uusd".to_string(),
},
};
assert_eq!(
token_asset.compute_tax(&deps.as_ref().querier).unwrap(),
Uint128::zero()
);
assert_eq!(
native_token_asset
.compute_tax(&deps.as_ref().querier)
.unwrap(),
Uint128::new(1220u128)
);
assert_eq!(
native_token_asset
.deduct_tax(&deps.as_ref().querier)
.unwrap(),
Coin {
denom: "uusd".to_string(),
amount: Uint128::new(121903u128),
}
);
assert_eq!(
token_asset
.into_msg(&deps.as_ref().querier, Addr::unchecked("addr0000"))
.unwrap(),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: String::from("asset0000"),
msg: to_binary(&Cw20ExecuteMsg::Transfer {
recipient: String::from("addr0000"),
amount: Uint128::new(123123u128),
})
.unwrap(),
funds: vec![],
})
);
assert_eq!(
native_token_asset
.into_msg(&deps.as_ref().querier, Addr::unchecked("addr0000"))
.unwrap(),
CosmosMsg::Bank(BankMsg::Send {
to_address: String::from("addr0000"),
amount: vec![Coin {
denom: "uusd".to_string(),
amount: Uint128::new(121903u128),
}]
})
);
}
#[test]
fn query_astroport_pair_contract() {
let mut deps = mock_dependencies(&[]);
deps.querier.with_astroport_pairs(&[(
&"asset0000uusd".to_string(),
&PairInfo {
asset_infos: [
AssetInfo::Token {
contract_addr: Addr::unchecked("asset0000"),
},
AssetInfo::NativeToken {
denom: "uusd".to_string(),
},
],
contract_addr: Addr::unchecked("pair0000"),
liquidity_token: Addr::unchecked("liquidity0000"),
pair_type: PairType::Xyk {},
},
)]);
let pair_info: PairInfo = query_pair_info(
&deps.as_ref().querier,
Addr::unchecked(MOCK_CONTRACT_ADDR),
&[
AssetInfo::Token {
contract_addr: Addr::unchecked("asset0000"),
},
AssetInfo::NativeToken {
denom: "uusd".to_string(),
},
],
)
.unwrap();
assert_eq!(pair_info.contract_addr, String::from("pair0000"),);
assert_eq!(pair_info.liquidity_token, String::from("liquidity0000"),);
}
#[test]
fn test_format_lp_token_name() {
let mut deps = mock_dependencies(&[]);
deps.querier.with_astroport_pairs(&[(
&"asset0000uusd".to_string(),
&PairInfo {
asset_infos: [
AssetInfo::Token {
contract_addr: Addr::unchecked("asset0000"),
},
AssetInfo::NativeToken {
denom: "uusd".to_string(),
},
],
contract_addr: Addr::unchecked("pair0000"),
liquidity_token: Addr::unchecked("liquidity0000"),
pair_type: PairType::Xyk {},
},
)]);
let pair_info: PairInfo = query_pair_info(
&deps.as_ref().querier,
Addr::unchecked(MOCK_CONTRACT_ADDR),
&[
AssetInfo::Token {
contract_addr: Addr::unchecked("asset0000"),
},
AssetInfo::NativeToken {
denom: "uusd".to_string(),
},
],
)
.unwrap();
deps.querier.with_token_balances(&[(
&String::from("asset0000"),
&[(&String::from(MOCK_CONTRACT_ADDR), &Uint128::new(123u128))],
)]);
deps.querier.with_cw20_query_handler();
let lp_name = format_lp_token_name(pair_info.asset_infos, &deps.as_ref().querier).unwrap();
assert_eq!(lp_name, "MAPP-UUSD-LP")
}
#[test]
fn test_decimal_checked_ops() {
for i in 0u32..100u32 {
let dec = Decimal::from_ratio(i, 1u32);
assert_eq!(dec + dec, dec.checked_add(dec).unwrap());
}
assert!(
Decimal::from_ratio(Uint128::MAX, Uint128::from(10u128.pow(18u32)))
.checked_add(Decimal::one())
.is_err()
);
for i in 0u128..100u128 {
let dec = Decimal::from_ratio(i, 1u128);
assert_eq!(
dec * Uint128::new(i),
dec.checked_mul(Uint128::new(i)).unwrap()
);
}
assert!(
Decimal::from_ratio(Uint128::MAX, Uint128::from(10u128.pow(18u32)))
.checked_mul(Uint128::from(10u128.pow(18u32) + 1))
.is_err()
);
}
| 28.202817 | 98 | 0.547143 |
dd98583f8ba47c938cab8edc070ae1c469299f82 | 60,400 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use super::array_reader::ArrayReader;
use crate::arrow::schema::parquet_to_arrow_field;
use crate::basic::Encoding;
use crate::data_type::{ByteArray, ByteArrayType};
use crate::decoding::{Decoder, DeltaByteArrayDecoder};
use crate::errors::{ParquetError, Result};
use crate::{
column::page::{Page, PageIterator},
memory::ByteBufferPtr,
schema::types::{ColumnDescPtr, ColumnDescriptor},
};
use arrow::{
array::{ArrayRef, Int16Array},
buffer::MutableBuffer,
datatypes::{DataType as ArrowType, ToByteSlice},
};
use std::cmp::max;
use std::{any::Any, collections::VecDeque, marker::PhantomData};
use std::{cell::RefCell, rc::Rc};
struct UnzipIter<Source, Target, State> {
shared_state: Rc<RefCell<State>>,
select_item_buffer: fn(&mut State) -> &mut VecDeque<Target>,
consume_source_item: fn(source_item: Source, state: &mut State) -> Target,
}
impl<Source, Target, State> UnzipIter<Source, Target, State> {
fn new(
shared_state: Rc<RefCell<State>>,
item_buffer_selector: fn(&mut State) -> &mut VecDeque<Target>,
source_item_consumer: fn(source_item: Source, state: &mut State) -> Target,
) -> Self {
Self {
shared_state,
select_item_buffer: item_buffer_selector,
consume_source_item: source_item_consumer,
}
}
}
trait UnzipIterState<T> {
type SourceIter: Iterator<Item = T>;
fn source_iter(&mut self) -> &mut Self::SourceIter;
}
impl<Source, Target, State: UnzipIterState<Source>> Iterator
for UnzipIter<Source, Target, State>
{
type Item = Target;
fn next(&mut self) -> Option<Self::Item> {
let mut inner = self.shared_state.borrow_mut();
// try to get one from the stored data
(self.select_item_buffer)(&mut *inner)
.pop_front()
.or_else(||
// nothing stored, we need a new element.
inner.source_iter().next().map(|s| {
(self.consume_source_item)(s, &mut inner)
}))
}
}
struct PageBufferUnzipIterState<V, L, It> {
iter: It,
value_iter_buffer: VecDeque<V>,
def_level_iter_buffer: VecDeque<L>,
rep_level_iter_buffer: VecDeque<L>,
}
impl<V, L, It: Iterator<Item = (V, L, L)>> UnzipIterState<(V, L, L)>
for PageBufferUnzipIterState<V, L, It>
{
type SourceIter = It;
#[inline]
fn source_iter(&mut self) -> &mut Self::SourceIter {
&mut self.iter
}
}
type ValueUnzipIter<V, L, It> =
UnzipIter<(V, L, L), V, PageBufferUnzipIterState<V, L, It>>;
type LevelUnzipIter<V, L, It> =
UnzipIter<(V, L, L), L, PageBufferUnzipIterState<V, L, It>>;
type PageUnzipResult<V, L, It> = (
ValueUnzipIter<V, L, It>,
LevelUnzipIter<V, L, It>,
LevelUnzipIter<V, L, It>,
);
fn unzip_iter<V, L, It: Iterator<Item = (V, L, L)>>(it: It) -> PageUnzipResult<V, L, It> {
let shared_data = Rc::new(RefCell::new(PageBufferUnzipIterState {
iter: it,
value_iter_buffer: VecDeque::new(),
def_level_iter_buffer: VecDeque::new(),
rep_level_iter_buffer: VecDeque::new(),
}));
let value_iter = UnzipIter::new(
shared_data.clone(),
|state| &mut state.value_iter_buffer,
|(v, d, r), state| {
state.def_level_iter_buffer.push_back(d);
state.rep_level_iter_buffer.push_back(r);
v
},
);
let def_level_iter = UnzipIter::new(
shared_data.clone(),
|state| &mut state.def_level_iter_buffer,
|(v, d, r), state| {
state.value_iter_buffer.push_back(v);
state.rep_level_iter_buffer.push_back(r);
d
},
);
let rep_level_iter = UnzipIter::new(
shared_data,
|state| &mut state.rep_level_iter_buffer,
|(v, d, r), state| {
state.value_iter_buffer.push_back(v);
state.def_level_iter_buffer.push_back(d);
r
},
);
(value_iter, def_level_iter, rep_level_iter)
}
pub trait ArrayConverter {
fn convert_value_bytes(
&self,
value_decoder: &mut impl ValueDecoder,
num_values: usize,
) -> Result<arrow::array::ArrayData>;
}
pub struct ArrowArrayReader<'a, C: ArrayConverter + 'a> {
column_desc: ColumnDescPtr,
data_type: ArrowType,
def_level_decoder: Box<dyn ValueDecoder + 'a>,
rep_level_decoder: Box<dyn ValueDecoder + 'a>,
value_decoder: Box<dyn ValueDecoder + 'a>,
last_def_levels: Option<Int16Array>,
last_rep_levels: Option<Int16Array>,
array_converter: C,
}
pub(crate) struct ColumnChunkContext {
dictionary_values: Option<Vec<ByteBufferPtr>>,
}
impl ColumnChunkContext {
fn new() -> Self {
Self {
dictionary_values: None,
}
}
fn set_dictionary(&mut self, dictionary_values: Vec<ByteBufferPtr>) {
self.dictionary_values = Some(dictionary_values);
}
}
type PageDecoderTuple = (
Box<dyn ValueDecoder>,
Box<dyn ValueDecoder>,
Box<dyn ValueDecoder>,
);
impl<'a, C: ArrayConverter + 'a> ArrowArrayReader<'a, C> {
pub fn try_new<P: PageIterator + 'a>(
column_chunk_iterator: P,
column_desc: ColumnDescPtr,
array_converter: C,
arrow_type: Option<ArrowType>,
) -> Result<Self> {
let data_type = match arrow_type {
Some(t) => t,
None => parquet_to_arrow_field(column_desc.as_ref())?
.data_type()
.clone(),
};
type PageIteratorItem = Result<(Page, Rc<RefCell<ColumnChunkContext>>)>;
let page_iter = column_chunk_iterator
// build iterator of pages across column chunks
.flat_map(|x| -> Box<dyn Iterator<Item = PageIteratorItem>> {
// attach column chunk context
let context = Rc::new(RefCell::new(ColumnChunkContext::new()));
match x {
Ok(page_reader) => Box::new(
page_reader.map(move |pr| pr.map(|p| (p, context.clone()))),
),
// errors from reading column chunks / row groups are propagated to page level
Err(e) => Box::new(std::iter::once(Err(e))),
}
});
// capture a clone of column_desc in closure so that it can outlive current function
let map_page_fn_factory = |column_desc: ColumnDescPtr| {
move |x: Result<(Page, Rc<RefCell<ColumnChunkContext>>)>| {
x.and_then(|(page, context)| {
Self::map_page(page, context, column_desc.as_ref())
})
}
};
let map_page_fn = map_page_fn_factory(column_desc.clone());
// map page iterator into tuple of buffer iterators for (values, def levels, rep levels)
// errors from lower levels are surfaced through the value decoder iterator
let decoder_iter = page_iter.map(map_page_fn).map(|x| match x {
Ok(iter_tuple) => iter_tuple,
// errors from reading pages are propagated to decoder iterator level
Err(e) => Self::map_page_error(e),
});
// split tuple iterator into separate iterators for (values, def levels, rep levels)
let (value_iter, def_level_iter, rep_level_iter) = unzip_iter(decoder_iter);
Ok(Self {
column_desc,
data_type,
def_level_decoder: Box::new(CompositeValueDecoder::new(def_level_iter)),
rep_level_decoder: Box::new(CompositeValueDecoder::new(rep_level_iter)),
value_decoder: Box::new(CompositeValueDecoder::new(value_iter)),
last_def_levels: None,
last_rep_levels: None,
array_converter,
})
}
#[inline]
fn def_levels_available(column_desc: &ColumnDescriptor) -> bool {
column_desc.max_def_level() > 0
}
#[inline]
fn rep_levels_available(column_desc: &ColumnDescriptor) -> bool {
column_desc.max_rep_level() > 0
}
fn map_page_error(err: ParquetError) -> PageDecoderTuple {
(
Box::new(<dyn ValueDecoder>::once(Err(err.clone()))),
Box::new(<dyn ValueDecoder>::once(Err(err.clone()))),
Box::new(<dyn ValueDecoder>::once(Err(err))),
)
}
// Split Result<Page> into Result<(Iterator<Values>, Iterator<DefLevels>, Iterator<RepLevels>)>
// this method could fail, e.g. if the page encoding is not supported
fn map_page(
page: Page,
column_chunk_context: Rc<RefCell<ColumnChunkContext>>,
column_desc: &ColumnDescriptor,
) -> Result<PageDecoderTuple> {
use crate::encodings::levels::LevelDecoder;
match page {
Page::DictionaryPage {
buf,
num_values,
encoding,
..
} => {
let mut column_chunk_context = column_chunk_context.borrow_mut();
if column_chunk_context.dictionary_values.is_some() {
return Err(general_err!(
"Column chunk cannot have more than one dictionary"
));
}
// create plain decoder for dictionary values
let mut dict_decoder = Self::get_dictionary_page_decoder(
buf,
num_values as usize,
encoding,
column_desc,
)?;
// decode and cache dictionary values
let dictionary_values = dict_decoder.read_dictionary_values()?;
column_chunk_context.set_dictionary(dictionary_values);
// a dictionary page doesn't return any values
Ok((
Box::new(<dyn ValueDecoder>::empty()),
Box::new(<dyn ValueDecoder>::empty()),
Box::new(<dyn ValueDecoder>::empty()),
))
}
Page::DataPage {
buf,
num_values,
encoding,
def_level_encoding,
rep_level_encoding,
statistics: _,
} => {
let mut buffer_ptr = buf;
// create rep level decoder iterator
let rep_level_iter: Box<dyn ValueDecoder> =
if Self::rep_levels_available(&column_desc) {
let mut rep_decoder = LevelDecoder::v1(
rep_level_encoding,
column_desc.max_rep_level(),
);
let rep_level_byte_len =
rep_decoder.set_data(num_values as usize, buffer_ptr.all());
// advance buffer pointer
buffer_ptr = buffer_ptr.start_from(rep_level_byte_len);
Box::new(LevelValueDecoder::new(rep_decoder))
} else {
Box::new(<dyn ValueDecoder>::once(Err(ParquetError::General(
"rep levels are not available".to_string(),
))))
};
// create def level decoder iterator
let def_level_iter: Box<dyn ValueDecoder> =
if Self::def_levels_available(&column_desc) {
let mut def_decoder = LevelDecoder::v1(
def_level_encoding,
column_desc.max_def_level(),
);
let def_levels_byte_len =
def_decoder.set_data(num_values as usize, buffer_ptr.all());
// advance buffer pointer
buffer_ptr = buffer_ptr.start_from(def_levels_byte_len);
Box::new(LevelValueDecoder::new(def_decoder))
} else {
Box::new(<dyn ValueDecoder>::once(Err(ParquetError::General(
"def levels are not available".to_string(),
))))
};
// create value decoder iterator
let value_iter = Self::get_value_decoder(
buffer_ptr,
num_values as usize,
encoding,
column_desc,
column_chunk_context,
)?;
Ok((value_iter, def_level_iter, rep_level_iter))
}
Page::DataPageV2 {
buf,
num_values,
encoding,
num_nulls,
num_rows: _,
def_levels_byte_len,
rep_levels_byte_len,
is_compressed: _,
statistics: _,
} => {
let mut offset = 0;
// create rep level decoder iterator
let rep_level_iter: Box<dyn ValueDecoder> =
if Self::rep_levels_available(&column_desc) {
let rep_levels_byte_len = rep_levels_byte_len as usize;
let mut rep_decoder =
LevelDecoder::v2(column_desc.max_rep_level());
rep_decoder.set_data_range(
num_values as usize,
&buf,
offset,
rep_levels_byte_len,
);
offset += rep_levels_byte_len;
Box::new(LevelValueDecoder::new(rep_decoder))
} else {
Box::new(<dyn ValueDecoder>::once(Err(ParquetError::General(
"rep levels are not available".to_string(),
))))
};
// create def level decoder iterator
let def_level_iter: Box<dyn ValueDecoder> =
if Self::def_levels_available(&column_desc) {
let def_levels_byte_len = def_levels_byte_len as usize;
let mut def_decoder =
LevelDecoder::v2(column_desc.max_def_level());
def_decoder.set_data_range(
num_values as usize,
&buf,
offset,
def_levels_byte_len,
);
offset += def_levels_byte_len;
Box::new(LevelValueDecoder::new(def_decoder))
} else {
Box::new(<dyn ValueDecoder>::once(Err(ParquetError::General(
"def levels are not available".to_string(),
))))
};
// create value decoder iterator
let values_buffer = buf.start_from(offset);
let value_iter = Self::get_value_decoder(
values_buffer,
max(num_values - num_nulls, 0) as usize,
encoding,
column_desc,
column_chunk_context,
)?;
Ok((value_iter, def_level_iter, rep_level_iter))
}
}
}
fn get_dictionary_page_decoder(
values_buffer: ByteBufferPtr,
num_values: usize,
mut encoding: Encoding,
column_desc: &ColumnDescriptor,
) -> Result<Box<dyn DictionaryValueDecoder>> {
if encoding == Encoding::PLAIN || encoding == Encoding::PLAIN_DICTIONARY {
encoding = Encoding::RLE_DICTIONARY
}
if encoding == Encoding::RLE_DICTIONARY {
Ok(
Self::get_plain_value_decoder(values_buffer, num_values, column_desc)
.into_dictionary_decoder(),
)
} else {
Err(nyi_err!(
"Invalid/Unsupported encoding type for dictionary: {}",
encoding
))
}
}
fn get_value_decoder(
values_buffer: ByteBufferPtr,
num_values: usize,
mut encoding: Encoding,
column_desc: &ColumnDescriptor,
column_chunk_context: Rc<RefCell<ColumnChunkContext>>,
) -> Result<Box<dyn ValueDecoder>> {
if encoding == Encoding::PLAIN_DICTIONARY {
encoding = Encoding::RLE_DICTIONARY;
}
match encoding {
Encoding::PLAIN => {
Ok(
Self::get_plain_value_decoder(values_buffer, num_values, column_desc)
.into_value_decoder(),
)
}
Encoding::RLE_DICTIONARY => {
if column_chunk_context.borrow().dictionary_values.is_some() {
let value_bit_len = Self::get_column_physical_bit_len(column_desc);
let dictionary_decoder: Box<dyn ValueDecoder> = if value_bit_len == 0
{
Box::new(VariableLenDictionaryDecoder::new(
column_chunk_context,
values_buffer,
num_values,
))
} else {
Box::new(FixedLenDictionaryDecoder::new(
column_chunk_context,
values_buffer,
num_values,
value_bit_len,
))
};
Ok(dictionary_decoder)
} else {
Err(general_err!("Dictionary values have not been initialized."))
}
}
// Encoding::RLE => Box::new(RleValueDecoder::new()),
// Encoding::DELTA_BINARY_PACKED => Box::new(DeltaBitPackDecoder::new()),
// Encoding::DELTA_LENGTH_BYTE_ARRAY => Box::new(DeltaLengthByteArrayDecoder::new()),
Encoding::DELTA_BYTE_ARRAY => Ok(Box::new(DeltaByteArrayValueDecoder::new(
values_buffer,
num_values,
)?)),
e => return Err(nyi_err!("Encoding {} is not supported", e)),
}
}
fn get_column_physical_bit_len(column_desc: &ColumnDescriptor) -> usize {
use crate::basic::Type as PhysicalType;
// parquet only supports a limited number of physical types
// later converters cast to a more specific arrow / logical type if necessary
match column_desc.physical_type() {
PhysicalType::BOOLEAN => 1,
PhysicalType::INT32 | PhysicalType::FLOAT => 32,
PhysicalType::INT64 | PhysicalType::DOUBLE => 64,
PhysicalType::INT96 => 96,
PhysicalType::BYTE_ARRAY => 0,
PhysicalType::FIXED_LEN_BYTE_ARRAY => column_desc.type_length() as usize * 8,
}
}
fn get_plain_value_decoder(
values_buffer: ByteBufferPtr,
num_values: usize,
column_desc: &ColumnDescriptor,
) -> Box<dyn PlainValueDecoder> {
let value_bit_len = Self::get_column_physical_bit_len(column_desc);
if value_bit_len == 0 {
Box::new(VariableLenPlainDecoder::new(values_buffer, num_values))
} else {
Box::new(FixedLenPlainDecoder::new(
values_buffer,
num_values,
value_bit_len,
))
}
}
fn build_level_array(
level_decoder: &mut impl ValueDecoder,
batch_size: usize,
) -> Result<Int16Array> {
use arrow::datatypes::Int16Type;
let level_converter = PrimitiveArrayConverter::<Int16Type>::new();
let array_data =
level_converter.convert_value_bytes(level_decoder, batch_size)?;
Ok(Int16Array::from(array_data))
}
}
impl<C: ArrayConverter> ArrayReader for ArrowArrayReader<'static, C> {
fn as_any(&self) -> &dyn Any {
self
}
fn get_data_type(&self) -> &ArrowType {
&self.data_type
}
fn next_batch(&mut self, batch_size: usize) -> Result<ArrayRef> {
if Self::rep_levels_available(&self.column_desc) {
// read rep levels if available
let rep_level_array =
Self::build_level_array(&mut self.rep_level_decoder, batch_size)?;
self.last_rep_levels = Some(rep_level_array);
}
// check if def levels are available
let (values_to_read, null_bitmap_array) =
if !Self::def_levels_available(&self.column_desc) {
// if no def levels - just read (up to) batch_size values
(batch_size, None)
} else {
// if def levels are available - they determine how many values will be read
// decode def levels, return first error if any
let def_level_array =
Self::build_level_array(&mut self.def_level_decoder, batch_size)?;
let def_level_count = def_level_array.len();
// use eq_scalar to efficiently build null bitmap array from def levels
let null_bitmap_array = arrow::compute::eq_scalar(
&def_level_array,
self.column_desc.max_def_level(),
)?;
self.last_def_levels = Some(def_level_array);
// efficiently calculate values to read
let values_to_read = null_bitmap_array
.values()
.count_set_bits_offset(0, def_level_count);
let maybe_null_bitmap = if values_to_read != null_bitmap_array.len() {
Some(null_bitmap_array)
} else {
// shortcut if no NULLs
None
};
(values_to_read, maybe_null_bitmap)
};
// read a batch of values
// converter only creates a no-null / all value array data
let mut value_array_data = self
.array_converter
.convert_value_bytes(&mut self.value_decoder, values_to_read)?;
if let Some(null_bitmap_array) = null_bitmap_array {
// Only if def levels are available - insert null values efficiently using MutableArrayData.
// This will require value bytes to be copied again, but converter requirements are reduced.
// With a small number of NULLs, this will only be a few copies of large byte sequences.
let actual_batch_size = null_bitmap_array.len();
// use_nulls is false, because null_bitmap_array is already calculated and re-used
let mut mutable = arrow::array::MutableArrayData::new(
vec![&value_array_data],
false,
actual_batch_size,
);
// SlicesIterator slices only the true values, NULLs are inserted to fill any gaps
arrow::compute::SlicesIterator::new(&null_bitmap_array).for_each(
|(start, end)| {
// the gap needs to be filled with NULLs
if start > mutable.len() {
let nulls_to_add = start - mutable.len();
mutable.extend_nulls(nulls_to_add);
}
// fill values, adjust start and end with NULL count so far
let nulls_added = mutable.null_count();
mutable.extend(0, start - nulls_added, end - nulls_added);
},
);
// any remaining part is NULLs
if mutable.len() < actual_batch_size {
let nulls_to_add = actual_batch_size - mutable.len();
mutable.extend_nulls(nulls_to_add);
}
value_array_data = mutable
.into_builder()
.null_bit_buffer(null_bitmap_array.values().clone())
.build();
}
let mut array = arrow::array::make_array(value_array_data);
if array.data_type() != &self.data_type {
// cast array to self.data_type if necessary
array = arrow::compute::cast(&array, &self.data_type)?
}
Ok(array)
}
fn get_def_levels(&self) -> Option<&[i16]> {
self.last_def_levels.as_ref().map(|x| x.values())
}
fn get_rep_levels(&self) -> Option<&[i16]> {
self.last_rep_levels.as_ref().map(|x| x.values())
}
}
use crate::encodings::rle::RleDecoder;
pub trait ValueDecoder {
fn read_value_bytes(
&mut self,
num_values: usize,
read_bytes: &mut dyn FnMut(&[u8], usize),
) -> Result<usize>;
}
trait DictionaryValueDecoder {
fn read_dictionary_values(&mut self) -> Result<Vec<ByteBufferPtr>>;
}
trait PlainValueDecoder: ValueDecoder + DictionaryValueDecoder {
fn into_value_decoder(self: Box<Self>) -> Box<dyn ValueDecoder>;
fn into_dictionary_decoder(self: Box<Self>) -> Box<dyn DictionaryValueDecoder>;
}
impl<T> PlainValueDecoder for T
where
T: ValueDecoder + DictionaryValueDecoder + 'static,
{
fn into_value_decoder(self: Box<T>) -> Box<dyn ValueDecoder> {
self
}
fn into_dictionary_decoder(self: Box<T>) -> Box<dyn DictionaryValueDecoder> {
self
}
}
impl dyn ValueDecoder {
fn empty() -> impl ValueDecoder {
SingleValueDecoder::new(Ok(0))
}
fn once(value: Result<usize>) -> impl ValueDecoder {
SingleValueDecoder::new(value)
}
}
impl ValueDecoder for Box<dyn ValueDecoder> {
#[inline]
fn read_value_bytes(
&mut self,
num_values: usize,
read_bytes: &mut dyn FnMut(&[u8], usize),
) -> Result<usize> {
self.as_mut().read_value_bytes(num_values, read_bytes)
}
}
struct SingleValueDecoder {
value: Result<usize>,
}
impl SingleValueDecoder {
fn new(value: Result<usize>) -> Self {
Self { value }
}
}
impl ValueDecoder for SingleValueDecoder {
fn read_value_bytes(
&mut self,
_num_values: usize,
_read_bytes: &mut dyn FnMut(&[u8], usize),
) -> Result<usize> {
self.value.clone()
}
}
struct CompositeValueDecoder<I: Iterator<Item = Box<dyn ValueDecoder>>> {
current_decoder: Option<Box<dyn ValueDecoder>>,
decoder_iter: I,
}
impl<I: Iterator<Item = Box<dyn ValueDecoder>>> CompositeValueDecoder<I> {
fn new(mut decoder_iter: I) -> Self {
let current_decoder = decoder_iter.next();
Self {
current_decoder,
decoder_iter,
}
}
}
impl<I: Iterator<Item = Box<dyn ValueDecoder>>> ValueDecoder
for CompositeValueDecoder<I>
{
fn read_value_bytes(
&mut self,
num_values: usize,
read_bytes: &mut dyn FnMut(&[u8], usize),
) -> Result<usize> {
let mut values_to_read = num_values;
while values_to_read > 0 {
let value_decoder = match self.current_decoder.as_mut() {
Some(d) => d,
// no more decoders
None => break,
};
while values_to_read > 0 {
let values_read =
value_decoder.read_value_bytes(values_to_read, read_bytes)?;
if values_read > 0 {
values_to_read -= values_read;
} else {
// no more values in current decoder
self.current_decoder = self.decoder_iter.next();
break;
}
}
}
Ok(num_values - values_to_read)
}
}
struct LevelValueDecoder {
level_decoder: crate::encodings::levels::LevelDecoder,
level_value_buffer: Vec<i16>,
}
impl LevelValueDecoder {
fn new(level_decoder: crate::encodings::levels::LevelDecoder) -> Self {
Self {
level_decoder,
level_value_buffer: vec![0i16; 2048],
}
}
}
impl ValueDecoder for LevelValueDecoder {
fn read_value_bytes(
&mut self,
num_values: usize,
read_bytes: &mut dyn FnMut(&[u8], usize),
) -> Result<usize> {
let value_size = std::mem::size_of::<i16>();
let mut total_values_read = 0;
while total_values_read < num_values {
let values_to_read = std::cmp::min(
num_values - total_values_read,
self.level_value_buffer.len(),
);
let values_read = match self
.level_decoder
.get(&mut self.level_value_buffer[..values_to_read])
{
Ok(values_read) => values_read,
Err(e) => return Err(e),
};
if values_read > 0 {
let level_value_bytes =
&self.level_value_buffer.to_byte_slice()[..values_read * value_size];
read_bytes(level_value_bytes, values_read);
total_values_read += values_read;
} else {
break;
}
}
Ok(total_values_read)
}
}
pub(crate) struct FixedLenPlainDecoder {
data: ByteBufferPtr,
num_values: usize,
value_bit_len: usize,
}
impl FixedLenPlainDecoder {
pub(crate) fn new(
data: ByteBufferPtr,
num_values: usize,
value_bit_len: usize,
) -> Self {
Self {
data,
num_values,
value_bit_len,
}
}
}
impl DictionaryValueDecoder for FixedLenPlainDecoder {
fn read_dictionary_values(&mut self) -> Result<Vec<ByteBufferPtr>> {
let value_byte_len = self.value_bit_len / 8;
let available_values = self.data.len() / value_byte_len;
let values_to_read = std::cmp::min(available_values, self.num_values);
let byte_len = values_to_read * value_byte_len;
let values = vec![self.data.range(0, byte_len)];
self.num_values = 0;
self.data.set_range(self.data.start(), 0);
Ok(values)
}
}
impl ValueDecoder for FixedLenPlainDecoder {
fn read_value_bytes(
&mut self,
num_values: usize,
read_bytes: &mut dyn FnMut(&[u8], usize),
) -> Result<usize> {
let available_values = self.data.len() * 8 / self.value_bit_len;
if available_values > 0 {
let values_to_read = std::cmp::min(available_values, num_values);
let byte_len = values_to_read * self.value_bit_len / 8;
read_bytes(&self.data.data()[..byte_len], values_to_read);
self.data
.set_range(self.data.start() + byte_len, self.data.len() - byte_len);
Ok(values_to_read)
} else {
Ok(0)
}
}
}
pub(crate) struct VariableLenPlainDecoder {
data: ByteBufferPtr,
num_values: usize,
position: usize,
}
impl VariableLenPlainDecoder {
pub(crate) fn new(data: ByteBufferPtr, num_values: usize) -> Self {
Self {
data,
num_values,
position: 0,
}
}
}
impl DictionaryValueDecoder for VariableLenPlainDecoder {
fn read_dictionary_values(&mut self) -> Result<Vec<ByteBufferPtr>> {
const LEN_SIZE: usize = std::mem::size_of::<u32>();
let data = self.data.data();
let data_len = data.len();
let values_to_read = self.num_values;
let mut values = Vec::with_capacity(values_to_read);
let mut values_read = 0;
while self.position < data_len && values_read < values_to_read {
let len: usize =
read_num_bytes!(u32, LEN_SIZE, data[self.position..]) as usize;
self.position += LEN_SIZE;
if data_len < self.position + len {
return Err(eof_err!("Not enough bytes to decode"));
}
values.push(self.data.range(self.position, len));
self.position += len;
values_read += 1;
}
self.num_values -= values_read;
Ok(values)
}
}
impl ValueDecoder for VariableLenPlainDecoder {
fn read_value_bytes(
&mut self,
num_values: usize,
read_bytes: &mut dyn FnMut(&[u8], usize),
) -> Result<usize> {
const LEN_SIZE: usize = std::mem::size_of::<u32>();
let data = self.data.data();
let data_len = data.len();
let values_to_read = std::cmp::min(self.num_values, num_values);
let mut values_read = 0;
while self.position < data_len && values_read < values_to_read {
let len: usize =
read_num_bytes!(u32, LEN_SIZE, data[self.position..]) as usize;
self.position += LEN_SIZE;
if data_len < self.position + len {
return Err(eof_err!("Not enough bytes to decode"));
}
read_bytes(&data[self.position..][..len], 1);
self.position += len;
values_read += 1;
}
self.num_values -= values_read;
Ok(values_read)
}
}
pub(crate) struct FixedLenDictionaryDecoder {
context_ref: Rc<RefCell<ColumnChunkContext>>,
key_data_bufer: ByteBufferPtr,
num_values: usize,
rle_decoder: RleDecoder,
value_byte_len: usize,
keys_buffer: Vec<i32>,
}
impl FixedLenDictionaryDecoder {
pub(crate) fn new(
column_chunk_context: Rc<RefCell<ColumnChunkContext>>,
key_data_bufer: ByteBufferPtr,
num_values: usize,
value_bit_len: usize,
) -> Self {
assert!(
value_bit_len % 8 == 0,
"value_bit_size must be a multiple of 8"
);
// First byte in `data` is bit width
let bit_width = key_data_bufer.data()[0];
let mut rle_decoder = RleDecoder::new(bit_width);
rle_decoder.set_data(key_data_bufer.start_from(1));
Self {
context_ref: column_chunk_context,
key_data_bufer,
num_values,
rle_decoder,
value_byte_len: value_bit_len / 8,
keys_buffer: vec![0; 2048],
}
}
}
impl ValueDecoder for FixedLenDictionaryDecoder {
fn read_value_bytes(
&mut self,
num_values: usize,
read_bytes: &mut dyn FnMut(&[u8], usize),
) -> Result<usize> {
if self.num_values == 0 {
return Ok(0);
}
let context = self.context_ref.borrow();
let values = context.dictionary_values.as_ref().unwrap();
let input_value_bytes = values[0].data();
// read no more than available values or requested values
let values_to_read = std::cmp::min(self.num_values, num_values);
let mut values_read = 0;
while values_read < values_to_read {
// read values in batches of up to self.keys_buffer.len()
let keys_to_read =
std::cmp::min(values_to_read - values_read, self.keys_buffer.len());
let keys_read = match self
.rle_decoder
.get_batch(&mut self.keys_buffer[..keys_to_read])
{
Ok(keys_read) => keys_read,
Err(e) => return Err(e),
};
if keys_read == 0 {
self.num_values = 0;
return Ok(values_read);
}
for i in 0..keys_read {
let key = self.keys_buffer[i] as usize;
read_bytes(
&input_value_bytes[key * self.value_byte_len..]
[..self.value_byte_len],
1,
);
}
values_read += keys_read;
}
self.num_values -= values_read;
Ok(values_read)
}
}
pub(crate) struct VariableLenDictionaryDecoder {
context_ref: Rc<RefCell<ColumnChunkContext>>,
key_data_bufer: ByteBufferPtr,
num_values: usize,
rle_decoder: RleDecoder,
keys_buffer: Vec<i32>,
}
impl VariableLenDictionaryDecoder {
pub(crate) fn new(
column_chunk_context: Rc<RefCell<ColumnChunkContext>>,
key_data_bufer: ByteBufferPtr,
num_values: usize,
) -> Self {
// First byte in `data` is bit width
let bit_width = key_data_bufer.data()[0];
let mut rle_decoder = RleDecoder::new(bit_width);
rle_decoder.set_data(key_data_bufer.start_from(1));
Self {
context_ref: column_chunk_context,
key_data_bufer,
num_values,
rle_decoder,
keys_buffer: vec![0; 2048],
}
}
}
impl ValueDecoder for VariableLenDictionaryDecoder {
fn read_value_bytes(
&mut self,
num_values: usize,
read_bytes: &mut dyn FnMut(&[u8], usize),
) -> Result<usize> {
if self.num_values == 0 {
return Ok(0);
}
let context = self.context_ref.borrow();
let values = context.dictionary_values.as_ref().unwrap();
let values_to_read = std::cmp::min(self.num_values, num_values);
let mut values_read = 0;
while values_read < values_to_read {
// read values in batches of up to self.keys_buffer.len()
let keys_to_read =
std::cmp::min(values_to_read - values_read, self.keys_buffer.len());
let keys_read = match self
.rle_decoder
.get_batch(&mut self.keys_buffer[..keys_to_read])
{
Ok(keys_read) => keys_read,
Err(e) => return Err(e),
};
if keys_read == 0 {
self.num_values = 0;
return Ok(values_read);
}
for i in 0..keys_read {
let key = self.keys_buffer[i] as usize;
read_bytes(values[key].data(), 1);
}
values_read += keys_read;
}
self.num_values -= values_read;
Ok(values_read)
}
}
pub(crate) struct DeltaByteArrayValueDecoder {
decoder: DeltaByteArrayDecoder<ByteArrayType>,
}
impl DeltaByteArrayValueDecoder {
pub fn new(data: ByteBufferPtr, num_values: usize) -> Result<Self> {
let mut decoder = DeltaByteArrayDecoder::new();
decoder.set_data(data, num_values)?;
Ok(Self { decoder })
}
}
impl ValueDecoder for DeltaByteArrayValueDecoder {
fn read_value_bytes(
&mut self,
mut num_values: usize,
read_bytes: &mut dyn FnMut(&[u8], usize),
) -> Result<usize> {
num_values = std::cmp::min(num_values, self.decoder.values_left());
let mut values_read = 0;
while values_read < num_values {
let mut buf = [ByteArray::new()];
let num_read = self.decoder.get(&mut buf)?;
assert_eq!(num_read, 1);
read_bytes(buf[0].data(), 1);
values_read += 1;
}
Ok(values_read)
}
}
use arrow::datatypes::ArrowPrimitiveType;
pub struct PrimitiveArrayConverter<T: ArrowPrimitiveType> {
_phantom_data: PhantomData<T>,
}
impl<T: ArrowPrimitiveType> PrimitiveArrayConverter<T> {
pub fn new() -> Self {
Self {
_phantom_data: PhantomData,
}
}
}
impl<T: ArrowPrimitiveType> ArrayConverter for PrimitiveArrayConverter<T> {
fn convert_value_bytes(
&self,
value_decoder: &mut impl ValueDecoder,
num_values: usize,
) -> Result<arrow::array::ArrayData> {
let value_size = T::get_byte_width();
let values_byte_capacity = num_values * value_size;
let mut values_buffer = MutableBuffer::new(values_byte_capacity);
value_decoder.read_value_bytes(num_values, &mut |value_bytes, _| {
values_buffer.extend_from_slice(value_bytes);
})?;
// calculate actual data_len, which may be different from the iterator's upper bound
let value_count = values_buffer.len() / value_size;
let array_data = arrow::array::ArrayData::builder(T::DATA_TYPE)
.len(value_count)
.add_buffer(values_buffer.into())
.build();
Ok(array_data)
}
}
pub struct StringArrayConverter {}
impl StringArrayConverter {
pub fn new() -> Self {
Self {}
}
}
impl ArrayConverter for StringArrayConverter {
fn convert_value_bytes(
&self,
value_decoder: &mut impl ValueDecoder,
num_values: usize,
) -> Result<arrow::array::ArrayData> {
use arrow::datatypes::ArrowNativeType;
let offset_size = std::mem::size_of::<i32>();
let mut offsets_buffer = MutableBuffer::new((num_values + 1) * offset_size);
// allocate initial capacity of 1 byte for each item
let values_byte_capacity = num_values;
let mut values_buffer = MutableBuffer::new(values_byte_capacity);
let mut length_so_far = i32::default();
offsets_buffer.push(length_so_far);
value_decoder.read_value_bytes(num_values, &mut |value_bytes, values_read| {
debug_assert_eq!(
values_read, 1,
"offset length value buffers can only contain bytes for a single value"
);
length_so_far +=
<i32 as ArrowNativeType>::from_usize(value_bytes.len()).unwrap();
// this should be safe because a ValueDecoder should not read more than num_values
unsafe {
offsets_buffer.push_unchecked(length_so_far);
}
values_buffer.extend_from_slice(value_bytes);
})?;
// calculate actual data_len, which may be different from the iterator's upper bound
let data_len = (offsets_buffer.len() / offset_size) - 1;
let array_data = arrow::array::ArrayData::builder(ArrowType::Utf8)
.len(data_len)
.add_buffer(offsets_buffer.into())
.add_buffer(values_buffer.into())
.build();
Ok(array_data)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::arrow::{ArrowReader, ParquetFileArrowReader};
use crate::basic::ConvertedType;
use crate::column::page::Page;
use crate::column::writer::ColumnWriter;
use crate::data_type::ByteArray;
use crate::data_type::ByteArrayType;
use crate::file::properties::WriterProperties;
use crate::file::reader::SerializedFileReader;
use crate::file::serialized_reader::SliceableCursor;
use crate::file::writer::{FileWriter, SerializedFileWriter, TryClone};
use crate::schema::parser::parse_message_type;
use crate::schema::types::SchemaDescriptor;
use crate::util::test_common::page_util::{
DataPageBuilder, DataPageBuilderImpl, InMemoryPageIterator,
};
use crate::{
basic::Encoding, column::page::PageReader, schema::types::SchemaDescPtr,
};
use arrow::array::{PrimitiveArray, StringArray};
use arrow::datatypes::Int32Type as ArrowInt32;
use rand::{distributions::uniform::SampleUniform, thread_rng, Rng};
use std::io::{Cursor, Seek, SeekFrom, Write};
use std::sync::{Arc, Mutex};
/// Iterator for testing reading empty columns
struct EmptyPageIterator {
schema: SchemaDescPtr,
}
impl EmptyPageIterator {
fn new(schema: SchemaDescPtr) -> Self {
EmptyPageIterator { schema }
}
}
impl Iterator for EmptyPageIterator {
type Item = Result<Box<dyn PageReader>>;
fn next(&mut self) -> Option<Self::Item> {
None
}
}
impl PageIterator for EmptyPageIterator {
fn schema(&mut self) -> Result<SchemaDescPtr> {
Ok(self.schema.clone())
}
fn column_schema(&mut self) -> Result<ColumnDescPtr> {
Ok(self.schema.column(0))
}
}
#[test]
fn test_array_reader_empty_pages() {
// Construct column schema
let message_type = "
message test_schema {
REQUIRED INT32 leaf;
}
";
let schema = parse_message_type(message_type)
.map(|t| Arc::new(SchemaDescriptor::new(Arc::new(t))))
.unwrap();
let column_desc = schema.column(0);
let page_iterator = EmptyPageIterator::new(schema);
let converter = PrimitiveArrayConverter::<arrow::datatypes::Int32Type>::new();
let mut array_reader =
ArrowArrayReader::try_new(page_iterator, column_desc, converter, None)
.unwrap();
// expect no values to be read
let array = array_reader.next_batch(50).unwrap();
assert!(array.is_empty());
}
fn make_column_chunks<T: crate::data_type::DataType>(
column_desc: ColumnDescPtr,
encoding: Encoding,
num_levels: usize,
min_value: T::T,
max_value: T::T,
def_levels: &mut Vec<i16>,
rep_levels: &mut Vec<i16>,
values: &mut Vec<T::T>,
page_lists: &mut Vec<Vec<Page>>,
use_v2: bool,
num_chunks: usize,
) where
T::T: PartialOrd + SampleUniform + Copy,
{
for _i in 0..num_chunks {
let mut pages = VecDeque::new();
let mut data = Vec::new();
let mut page_def_levels = Vec::new();
let mut page_rep_levels = Vec::new();
crate::util::test_common::make_pages::<T>(
column_desc.clone(),
encoding,
1,
num_levels,
min_value,
max_value,
&mut page_def_levels,
&mut page_rep_levels,
&mut data,
&mut pages,
use_v2,
);
def_levels.append(&mut page_def_levels);
rep_levels.append(&mut page_rep_levels);
values.append(&mut data);
page_lists.push(Vec::from(pages));
}
}
#[test]
fn test_primitive_array_reader_data() {
// Construct column schema
let message_type = "
message test_schema {
REQUIRED INT32 leaf;
}
";
let schema = parse_message_type(message_type)
.map(|t| Arc::new(SchemaDescriptor::new(Arc::new(t))))
.unwrap();
let column_desc = schema.column(0);
// Construct page iterator
{
let mut data = Vec::new();
let mut page_lists = Vec::new();
make_column_chunks::<crate::data_type::Int32Type>(
column_desc.clone(),
Encoding::PLAIN,
100,
1,
200,
&mut Vec::new(),
&mut Vec::new(),
&mut data,
&mut page_lists,
true,
2,
);
let page_iterator =
InMemoryPageIterator::new(schema, column_desc.clone(), page_lists);
let converter = PrimitiveArrayConverter::<arrow::datatypes::Int32Type>::new();
let mut array_reader =
ArrowArrayReader::try_new(page_iterator, column_desc, converter, None)
.unwrap();
// Read first 50 values, which are all from the first column chunk
let array = array_reader.next_batch(50).unwrap();
let array = array
.as_any()
.downcast_ref::<PrimitiveArray<ArrowInt32>>()
.unwrap();
assert_eq!(
&PrimitiveArray::<ArrowInt32>::from(data[0..50].to_vec()),
array
);
// Read next 100 values, the first 50 ones are from the first column chunk,
// and the last 50 ones are from the second column chunk
let array = array_reader.next_batch(100).unwrap();
let array = array
.as_any()
.downcast_ref::<PrimitiveArray<ArrowInt32>>()
.unwrap();
assert_eq!(
&PrimitiveArray::<ArrowInt32>::from(data[50..150].to_vec()),
array
);
// Try to read 100 values, however there are only 50 values
let array = array_reader.next_batch(100).unwrap();
let array = array
.as_any()
.downcast_ref::<PrimitiveArray<ArrowInt32>>()
.unwrap();
assert_eq!(
&PrimitiveArray::<ArrowInt32>::from(data[150..200].to_vec()),
array
);
}
}
#[test]
fn test_primitive_array_reader_def_and_rep_levels() {
// Construct column schema
let message_type = "
message test_schema {
REPEATED Group test_mid {
OPTIONAL INT32 leaf;
}
}
";
let schema = parse_message_type(message_type)
.map(|t| Arc::new(SchemaDescriptor::new(Arc::new(t))))
.unwrap();
let column_desc = schema.column(0);
// Construct page iterator
{
let mut def_levels = Vec::new();
let mut rep_levels = Vec::new();
let mut page_lists = Vec::new();
make_column_chunks::<crate::data_type::Int32Type>(
column_desc.clone(),
Encoding::PLAIN,
100,
1,
200,
&mut def_levels,
&mut rep_levels,
&mut Vec::new(),
&mut page_lists,
true,
2,
);
let page_iterator =
InMemoryPageIterator::new(schema, column_desc.clone(), page_lists);
let converter = PrimitiveArrayConverter::<arrow::datatypes::Int32Type>::new();
let mut array_reader =
ArrowArrayReader::try_new(page_iterator, column_desc, converter, None)
.unwrap();
let mut accu_len: usize = 0;
// Read first 50 values, which are all from the first column chunk
let array = array_reader.next_batch(50).unwrap();
assert_eq!(
Some(&def_levels[accu_len..(accu_len + array.len())]),
array_reader.get_def_levels()
);
assert_eq!(
Some(&rep_levels[accu_len..(accu_len + array.len())]),
array_reader.get_rep_levels()
);
accu_len += array.len();
// Read next 100 values, the first 50 ones are from the first column chunk,
// and the last 50 ones are from the second column chunk
let array = array_reader.next_batch(100).unwrap();
assert_eq!(
Some(&def_levels[accu_len..(accu_len + array.len())]),
array_reader.get_def_levels()
);
assert_eq!(
Some(&rep_levels[accu_len..(accu_len + array.len())]),
array_reader.get_rep_levels()
);
accu_len += array.len();
// Try to read 100 values, however there are only 50 values
let array = array_reader.next_batch(100).unwrap();
assert_eq!(
Some(&def_levels[accu_len..(accu_len + array.len())]),
array_reader.get_def_levels()
);
assert_eq!(
Some(&rep_levels[accu_len..(accu_len + array.len())]),
array_reader.get_rep_levels()
);
assert_eq!(accu_len + array.len(), 200);
}
}
#[test]
fn test_arrow_array_reader_string() {
// Construct column schema
let message_type = "
message test_schema {
REPEATED Group test_mid {
OPTIONAL BYTE_ARRAY leaf (UTF8);
}
}
";
let num_pages = 2;
let values_per_page = 100;
let str_base = "Hello World";
let schema = parse_message_type(message_type)
.map(|t| Arc::new(SchemaDescriptor::new(Arc::new(t))))
.unwrap();
let column_desc = schema.column(0);
let max_def_level = column_desc.max_def_level();
let max_rep_level = column_desc.max_rep_level();
assert_eq!(max_def_level, 2);
assert_eq!(max_rep_level, 1);
let mut rng = thread_rng();
let mut pages: Vec<Vec<Page>> = Vec::new();
let mut rep_levels = Vec::with_capacity(num_pages * values_per_page);
let mut def_levels = Vec::with_capacity(num_pages * values_per_page);
let mut all_values = Vec::with_capacity(num_pages * values_per_page);
for i in 0..num_pages {
let mut values = Vec::with_capacity(values_per_page);
for _ in 0..values_per_page {
let def_level = rng.gen_range(0..max_def_level + 1);
let rep_level = rng.gen_range(0..max_rep_level + 1);
if def_level == max_def_level {
let len = rng.gen_range(1..str_base.len());
let slice = &str_base[..len];
values.push(ByteArray::from(slice));
all_values.push(Some(slice.to_string()));
} else {
all_values.push(None)
}
rep_levels.push(rep_level);
def_levels.push(def_level)
}
let range = i * values_per_page..(i + 1) * values_per_page;
let mut pb =
DataPageBuilderImpl::new(column_desc.clone(), values.len() as u32, true);
pb.add_rep_levels(max_rep_level, &rep_levels.as_slice()[range.clone()]);
pb.add_def_levels(max_def_level, &def_levels.as_slice()[range]);
pb.add_values::<ByteArrayType>(Encoding::PLAIN, values.as_slice());
let data_page = pb.consume();
pages.push(vec![data_page]);
}
let page_iterator = InMemoryPageIterator::new(schema, column_desc.clone(), pages);
let converter = StringArrayConverter::new();
let mut array_reader =
ArrowArrayReader::try_new(page_iterator, column_desc, converter, None)
.unwrap();
let mut accu_len: usize = 0;
let array = array_reader.next_batch(values_per_page / 2).unwrap();
assert_eq!(array.len(), values_per_page / 2);
assert_eq!(
Some(&def_levels[accu_len..(accu_len + array.len())]),
array_reader.get_def_levels()
);
assert_eq!(
Some(&rep_levels[accu_len..(accu_len + array.len())]),
array_reader.get_rep_levels()
);
accu_len += array.len();
// Read next values_per_page values, the first values_per_page/2 ones are from the first column chunk,
// and the last values_per_page/2 ones are from the second column chunk
let array = array_reader.next_batch(values_per_page).unwrap();
assert_eq!(array.len(), values_per_page);
assert_eq!(
Some(&def_levels[accu_len..(accu_len + array.len())]),
array_reader.get_def_levels()
);
assert_eq!(
Some(&rep_levels[accu_len..(accu_len + array.len())]),
array_reader.get_rep_levels()
);
let strings = array.as_any().downcast_ref::<StringArray>().unwrap();
for i in 0..array.len() {
if array.is_valid(i) {
assert_eq!(
all_values[i + accu_len].as_ref().unwrap().as_str(),
strings.value(i)
)
} else {
assert_eq!(all_values[i + accu_len], None)
}
}
accu_len += array.len();
// Try to read values_per_page values, however there are only values_per_page/2 values
let array = array_reader.next_batch(values_per_page).unwrap();
assert_eq!(array.len(), values_per_page / 2);
assert_eq!(
Some(&def_levels[accu_len..(accu_len + array.len())]),
array_reader.get_def_levels()
);
assert_eq!(
Some(&rep_levels[accu_len..(accu_len + array.len())]),
array_reader.get_rep_levels()
);
}
/// Allows to write parquet into memory. Intended only for use in tests.
#[derive(Clone)]
struct VecWriter {
data: Arc<Mutex<Cursor<Vec<u8>>>>,
}
impl VecWriter {
pub fn new() -> VecWriter {
VecWriter {
data: Arc::new(Mutex::new(Cursor::new(Vec::new()))),
}
}
pub fn consume(self) -> Vec<u8> {
Arc::try_unwrap(self.data)
.unwrap()
.into_inner()
.unwrap()
.into_inner()
}
}
impl TryClone for VecWriter {
fn try_clone(&self) -> std::io::Result<Self> {
Ok(self.clone())
}
}
impl Seek for VecWriter {
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
self.data.lock().unwrap().seek(pos)
}
fn stream_position(&mut self) -> std::io::Result<u64> {
self.data.lock().unwrap().stream_position()
}
}
impl Write for VecWriter {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.data.lock().unwrap().write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
self.data.lock().unwrap().flush()
}
}
#[test]
fn test_string_delta_byte_array() {
use crate::basic;
use crate::schema::types::Type;
let data = VecWriter::new();
let schema = Arc::new(
Type::group_type_builder("string_test")
.with_fields(&mut vec![Arc::new(
Type::primitive_type_builder("c", basic::Type::BYTE_ARRAY)
.with_converted_type(ConvertedType::UTF8)
.build()
.unwrap(),
)])
.build()
.unwrap(),
);
// Disable dictionary and use the fallback encoding.
let p = Arc::new(
WriterProperties::builder()
.set_dictionary_enabled(false)
.set_encoding(Encoding::DELTA_BYTE_ARRAY)
.build(),
);
// Write a few strings.
let mut w = SerializedFileWriter::new(data.clone(), schema, p).unwrap();
let mut rg = w.next_row_group().unwrap();
let mut c = rg.next_column().unwrap().unwrap();
match &mut c {
ColumnWriter::ByteArrayColumnWriter(c) => {
c.write_batch(
&[ByteArray::from("foo"), ByteArray::from("bar")],
Some(&[1, 1]),
Some(&[0, 0]),
)
.unwrap();
}
_ => panic!("unexpected column"),
};
rg.close_column(c).unwrap();
w.close_row_group(rg).unwrap();
w.close().unwrap();
std::mem::drop(w);
// Check we can read them back.
let r = SerializedFileReader::new(SliceableCursor::new(Arc::new(data.consume())))
.unwrap();
let mut r = ParquetFileArrowReader::new(Arc::new(r));
let batch = r
.get_record_reader_by_columns([0], 1024)
.unwrap()
.next()
.unwrap()
.unwrap();
assert_eq!(batch.columns().len(), 1);
let strings = batch
.column(0)
.as_any()
.downcast_ref::<StringArray>()
.unwrap();
assert_eq!(
strings.into_iter().collect::<Vec<_>>(),
vec![Some("foo"), Some("bar")]
);
}
}
| 34.994206 | 110 | 0.5525 |
f4b39997627fb2dacfbc756b49d9d848c223791e | 12,183 | //! Utilities for fetching player data from the OSRS hiscores.
use crate::{
config::OsrsConfig,
error::OsrsError,
utils::{
http,
skill::{Skill, SKILLS},
},
};
use anyhow::Context;
use csv::ReaderBuilder;
use serde::Deserialize;
use std::{collections::HashMap, convert::TryInto};
/// URL of the hiscore. Must also provider a ?player=<username> param.
const HISCORE_URL: &str =
"https://secure.runescape.com/m=hiscore_oldschool/index_lite.ws";
/// The list of minigames tracked in the hiscore. This order is very important
/// because it corresponds to the order they are in the response.
const MINIGAMES: &[&str] = &[
// I'm *pretty sure* these first 3 are just placeholders to delimit skills
// vs minigames
"1?",
"2?",
"3?",
"Clue Scroll (All)",
"Clue Scroll (Beginner)",
"Clue Scroll (Easy)",
"Clue Scroll (Medium)",
"Clue Scroll (Hard)",
"Clue Scroll (Elite)",
"Clue Scroll (Master)",
"LMS - Rank",
"Soul Wars Zeal",
"Abyssal Sire",
"Alchemical Hydra",
"Barrows Chests",
"Bryophyta",
"Callisto",
"Cerberus",
"Chambers of Xeric",
"Chambers of Xeric: Challenge Mode",
"Chaos Elemental",
"Chaos Fanatic",
"Commander Zilyana",
"Corporeal Best",
"Dagannoth Prime",
"Dagannoth Rex",
"Dagannoth Supreme",
"Crazy Archaeologist",
"Deranged Archaeologist",
"General Graardor",
"Giant Mole",
"Grotesque Guardians",
"Hespori",
"Kalphite Queen",
"King Black Dragon",
"Kraken",
"Kree'Arra",
"K'ril Tsutsaroth",
"Mimic",
"Nightmare",
"Phosani's Nightmare",
"Obor",
"Sarachnis",
"Scorpia",
"Skotizo",
"Tempoross",
"The Guantlet",
"The Corrupted Guantlet",
"Theatre of Blood",
"Theatre of Blood: Hard Mode",
"Thermonuclear Smoke Devil",
"TzKal-Zuk",
"TzTok-Jad",
"Venenatis",
"Vet'ion",
"Vorkath",
"Wintertodt",
"Zalcano",
"Zulrah",
];
/// One row in the hiscores CSV response.
#[derive(Copy, Clone, Debug, Deserialize)]
struct HiscoreItem {
// These are isize instead of usize because Jagex uses -1 for "missing"
/// Player's rank in the category.
rank: isize,
/// For skills, the level. For everything else, the completion #.
score: isize,
/// Total experience points. Only present for skills.
#[serde(default)]
xp: Option<isize>,
}
/// One skill for a player in the hiscores.
#[derive(Clone, Debug)]
pub struct HiscoreSkill {
/// The skill name.
pub skill: Skill,
/// The player's rank in this skill (higher is better).
pub rank: usize,
/// The player's level in the skill.
pub level: usize,
/// The player's total xp in the skill.
pub xp: usize,
}
/// A minigame/boss/other stat tracked on the hiscores. This captures everything
/// other than skills.
#[derive(Clone, Debug)]
pub struct HiscoreMinigame {
/// The minigame/boss name
pub name: &'static str,
/// The player's rank in this minigame
pub rank: usize,
/// The minigame score/completion count/kill count
pub score: usize,
}
/// Hiscore results for a player.
#[derive(Clone, Debug)]
pub struct HiscorePlayer {
/// Data on all skills for the player, keyed by skill name
skills: HashMap<Skill, HiscoreSkill>,
/// Data on all minigames/bosses for the player, keyed by minigame/boss
/// name
minigames: HashMap<&'static str, HiscoreMinigame>,
}
impl HiscorePlayer {
/// Load a player's data from the hiscore.
pub fn load(username: &str) -> anyhow::Result<Self> {
// It's important that we convert to an iterator *now*, so that the
// two blocks below use the same iterator, and each row will only be
// consumed once
let mut items = load_hiscore_items(username)?.into_iter();
let skills: HashMap<Skill, HiscoreSkill> = SKILLS
.iter()
.zip(&mut items)
.filter_map(|(&skill, item)| {
Some((
skill,
HiscoreSkill {
skill,
// If any of these are -1, that means the player is
// unranked in this skill
rank: item.rank.try_into().ok()?,
level: item.score.try_into().ok()?,
xp: item.xp.unwrap().try_into().ok()?,
},
))
})
.collect();
let minigames: HashMap<&'static str, HiscoreMinigame> = MINIGAMES
.iter()
.zip(&mut items)
.filter_map(|(&name, item)| {
Some((
name,
HiscoreMinigame {
name,
// If any of these are -1, that means the player is
// unranked in this minigame
rank: item.rank.try_into().ok()?,
score: item.score.try_into().ok()?,
},
))
})
.collect();
Ok(Self { skills, minigames })
}
/// Load a player's stats from a combination of a command line argument
/// and the config. If a name was supplied on the command line, use that,
/// otherwise fall back to the config. If there's no username present there
/// either, then return an error.
///
/// This is useful for many commands that accept a `--player` argument.
pub fn load_from_args(
cfg: &OsrsConfig,
username_override: &[String],
) -> anyhow::Result<Self> {
let username: String = match (username_override, &cfg.default_player) {
// No arg provided, empty default - error
(&[], None) => Err(anyhow::Error::from(OsrsError::ArgsError(
"No player given".into(),
))),
// No arg provided, but we have a default - use the default
(&[], Some(default_player)) => Ok(default_player.clone()),
// Arg was provided, return that
(&[_, ..], _) => Ok(username_override.join(" ")),
}?;
Self::load(&username)
}
/// Get data for a single skill from the player
pub fn skill(&self, skill: Skill) -> &HiscoreSkill {
self.skills.get(&skill).unwrap()
}
/// Get a list of all skills for this player, in the standard order (i.e.)
/// the order shown in the hiscores/in-game skill panel). Any skill for
/// which the player is not ranked will not be included here.
pub fn skills(&self) -> Vec<&HiscoreSkill> {
// We can't just use self.skills.values() because they have to be in
// the correct order
SKILLS
.iter()
.filter_map(|skill| self.skills.get(skill))
.collect()
}
/// Get a list of minigame scores for the player. Any minigame for which
/// the player has no score will not be included here.
pub fn minigames(&self) -> Vec<&HiscoreMinigame> {
MINIGAMES
.iter()
// Any minigame that the user has no entry for will be missing here
.filter_map(|name| self.minigames.get(name))
.collect()
}
}
/// Load a list of hiscore entries for a player from the OSRS API. The API
/// response is a list of CSV entries formatted as `rank,level,xp` for skills
/// followed by `rank,score` for minigames/bosses. Entries are unlabeled so
/// each oen is identified only by its position in the list.
fn load_hiscore_items(username: &str) -> anyhow::Result<Vec<HiscoreItem>> {
// Fetch data from the API
let body = http::agent()
.get(HISCORE_URL)
.query("player", username)
.call()?
.into_string()?;
// Parse the response as a CSV
let mut rdr = ReaderBuilder::new()
.has_headers(false)
.flexible(true)
.from_reader(body.as_bytes());
rdr.deserialize()
// Iterator magic to convert Vec<Result> -> Result<Vec>
// If any item fails, this whole thing will fail
.collect::<Result<Vec<HiscoreItem>, csv::Error>>()
.context("Error parsing hiscore data")
}
/// A helper function for getting a particular skill level that is overridable
/// via the command line. Many commands rely on a singular level (e.g. farming
/// calculators rely on farming level). These commands tend to support passing
/// the level in a variety of ways:
/// 1. Directly on the command line
/// 2. Pass username, look up the level on hiscores
/// 3. Use username in the config, look up the level on hiscores
///
/// This function applies those options, in that order, and returns the
/// appropriate level. You give this function all the info you got from the
/// user, and it spits out the level you should use. If none of the three
/// options are available, return an error.
pub fn get_level_from_args(
cfg: &OsrsConfig,
skill: Skill,
username_override: &[String],
level_override: Option<usize>,
) -> anyhow::Result<usize> {
if let Some(level) = level_override {
// Level override was given, use it
Ok(level)
} else {
// Look up by player name. This will try the username override first,
// then fall back on the cfg. If neither is present, we'll error out
let player = HiscorePlayer::load_from_args(cfg, username_override)?;
Ok(player.skill(skill).level)
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Make sure our parsing logic lines up with the current response format
/// of the hiscores. We expect this test to break any time they add more
/// lines to the hiscore response (which is typically when they release a
/// new minigame/boss). Typically the fix is as easy as adding the new row
/// to the `MINIGAMES` constant
#[test]
fn test_hiscore_response_parse() {
let username = "Hey Jase"; // Sorry buddy you're the guinea pig
// Load the raw CSV data
let raw_response = load_hiscore_items(username).unwrap();
// Also load via our parsing logic
let player = HiscorePlayer::load("Hey Jase").unwrap();
assert_eq!(
SKILLS.len() + MINIGAMES.len(),
raw_response.len(),
"Unexpected number of rows in hiscore response. \
Skill or minigame list needs to be updated."
);
// Make sure that the skill values all line up correctly
for (i, skill) in player.skills().into_iter().enumerate() {
let raw_row = raw_response[i];
assert_eq!(
skill.rank as isize, raw_row.rank,
"Incorrect rank for skill {}",
skill.skill
);
assert_eq!(
skill.level as isize, raw_row.score,
"Incorrect level for skill {}",
skill.skill
);
assert_eq!(
Some(skill.xp as isize),
raw_row.xp,
"Incorrect XP for skill {}",
skill.skill
);
}
// Make sure each minigame *that has data* appears in the player data.
// Minigames with an insufficient score will appear as `-1` instead of
// being populated, and we expect those to be excluded from the parsed
// data. We want to skip over those in our check here.
let parsed_minigames = player.minigames();
let mut skipped = 0;
for (i, raw_row) in raw_response[SKILLS.len()..].iter().enumerate() {
if raw_row.rank == -1 {
skipped += 1;
} else {
let parsed_minigame = parsed_minigames[i - skipped];
assert_eq!(
parsed_minigame.rank as isize, raw_row.rank,
"Incorrect rank for minigame {}",
parsed_minigame.name
);
assert_eq!(
parsed_minigame.score as isize, raw_row.score,
"Incorrect score for minigame {}",
parsed_minigame.name
);
}
}
}
}
| 34.31831 | 80 | 0.581466 |
8ac379348b035eccfe7681500d1b958efe641d69 | 280,399 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub mod application_gateways {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
application_gateway_name: &str,
subscription_id: &str,
) -> std::result::Result<ApplicationGateway, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/applicationGateways/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
application_gateway_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ApplicationGateway =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
application_gateway_name: &str,
parameters: &ApplicationGateway,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/applicationGateways/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
application_gateway_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: ApplicationGateway =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ApplicationGateway =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Created201(ApplicationGateway),
Ok200(ApplicationGateway),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
application_gateway_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/applicationGateways/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
application_gateway_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::OK => Ok(delete::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
Ok200,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<ApplicationGatewayListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/applicationGateways",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ApplicationGatewayListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_all(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<ApplicationGatewayListResult, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/applicationGateways",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).context(list_all::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_all::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_all::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list_all::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ApplicationGatewayListResult =
serde_json::from_slice(rsp_body).context(list_all::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list_all::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list_all {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn start(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
application_gateway_name: &str,
subscription_id: &str,
) -> std::result::Result<start::Response, start::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/applicationGateways/{}/start",
operation_config.base_path(),
subscription_id,
resource_group_name,
application_gateway_name
);
let mut url = url::Url::parse(url_str).context(start::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(start::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(start::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(start::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(start::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(start::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
start::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod start {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn stop(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
application_gateway_name: &str,
subscription_id: &str,
) -> std::result::Result<stop::Response, stop::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/applicationGateways/{}/stop",
operation_config.base_path(),
subscription_id,
resource_group_name,
application_gateway_name
);
let mut url = url::Url::parse(url_str).context(stop::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(stop::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(stop::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(stop::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(stop::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(stop::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
stop::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod stop {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod express_route_circuit_authorizations {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
authorization_name: &str,
subscription_id: &str,
) -> std::result::Result<ExpressRouteCircuitAuthorization, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}/authorizations/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name,
authorization_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitAuthorization =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
authorization_name: &str,
authorization_parameters: &ExpressRouteCircuitAuthorization,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}/authorizations/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name,
authorization_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitAuthorization =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitAuthorization =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Created201(ExpressRouteCircuitAuthorization),
Ok200(ExpressRouteCircuitAuthorization),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
authorization_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}/authorizations/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name,
authorization_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Accepted202,
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
subscription_id: &str,
) -> std::result::Result<AuthorizationListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}/authorizations",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AuthorizationListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod express_route_circuit_peerings {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
peering_name: &str,
subscription_id: &str,
) -> std::result::Result<ExpressRouteCircuitPeering, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}/peerings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name,
peering_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitPeering =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
peering_name: &str,
peering_parameters: &ExpressRouteCircuitPeering,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}/peerings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name,
peering_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitPeering =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitPeering =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(ExpressRouteCircuitPeering),
Created201(ExpressRouteCircuitPeering),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
peering_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}/peerings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name,
peering_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
subscription_id: &str,
) -> std::result::Result<ExpressRouteCircuitPeeringListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}/peerings",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitPeeringListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod express_route_circuits {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
subscription_id: &str,
) -> std::result::Result<ExpressRouteCircuit, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuit =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
parameters: &ExpressRouteCircuit,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuit =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuit =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Created201(ExpressRouteCircuit),
Ok200(ExpressRouteCircuit),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::OK => Ok(delete::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
NoContent204,
Accepted202,
Ok200,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_arp_table(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
peering_name: &str,
device_path: &str,
subscription_id: &str,
) -> std::result::Result<list_arp_table::Response, list_arp_table::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}/peerings/{}/arpTables/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name,
peering_name,
device_path
);
let mut url = url::Url::parse(url_str).context(list_arp_table::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_arp_table::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_arp_table::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(list_arp_table::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitsArpTableListResult =
serde_json::from_slice(rsp_body).context(list_arp_table::DeserializeError { body: rsp_body.clone() })?;
Ok(list_arp_table::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(list_arp_table::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
list_arp_table::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list_arp_table {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(ExpressRouteCircuitsArpTableListResult),
Accepted202,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_routes_table(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
peering_name: &str,
device_path: &str,
subscription_id: &str,
) -> std::result::Result<list_routes_table::Response, list_routes_table::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}/peerings/{}/routeTables/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name,
peering_name,
device_path
);
let mut url = url::Url::parse(url_str).context(list_routes_table::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_routes_table::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_routes_table::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(list_routes_table::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitsRoutesTableListResult =
serde_json::from_slice(rsp_body).context(list_routes_table::DeserializeError { body: rsp_body.clone() })?;
Ok(list_routes_table::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(list_routes_table::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
list_routes_table::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list_routes_table {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(ExpressRouteCircuitsRoutesTableListResult),
Accepted202,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_routes_table_summary(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
peering_name: &str,
device_path: &str,
subscription_id: &str,
) -> std::result::Result<list_routes_table_summary::Response, list_routes_table_summary::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}/peerings/{}/routeTablesSummary/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name,
peering_name,
device_path
);
let mut url = url::Url::parse(url_str).context(list_routes_table_summary::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_routes_table_summary::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_routes_table_summary::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(list_routes_table_summary::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitsRoutesTableSummaryListResult =
serde_json::from_slice(rsp_body).context(list_routes_table_summary::DeserializeError { body: rsp_body.clone() })?;
Ok(list_routes_table_summary::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(list_routes_table_summary::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
list_routes_table_summary::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list_routes_table_summary {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(ExpressRouteCircuitsRoutesTableSummaryListResult),
Accepted202,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn get_stats(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
subscription_id: &str,
) -> std::result::Result<ExpressRouteCircuitStats, get_stats::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}/stats",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name
);
let mut url = url::Url::parse(url_str).context(get_stats::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get_stats::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get_stats::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get_stats::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitStats =
serde_json::from_slice(rsp_body).context(get_stats::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get_stats::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get_stats {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn get_peering_stats(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
circuit_name: &str,
peering_name: &str,
subscription_id: &str,
) -> std::result::Result<ExpressRouteCircuitStats, get_peering_stats::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits/{}/peerings/{}/stats",
operation_config.base_path(),
subscription_id,
resource_group_name,
circuit_name,
peering_name
);
let mut url = url::Url::parse(url_str).context(get_peering_stats::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get_peering_stats::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get_peering_stats::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(get_peering_stats::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitStats =
serde_json::from_slice(rsp_body).context(get_peering_stats::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get_peering_stats::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get_peering_stats {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<ExpressRouteCircuitListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/expressRouteCircuits",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_all(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<ExpressRouteCircuitListResult, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/expressRouteCircuits",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).context(list_all::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_all::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_all::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list_all::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteCircuitListResult =
serde_json::from_slice(rsp_body).context(list_all::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list_all::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list_all {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod express_route_service_providers {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<ExpressRouteServiceProviderListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/expressRouteServiceProviders",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExpressRouteServiceProviderListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod load_balancers {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
load_balancer_name: &str,
subscription_id: &str,
expand: Option<&str>,
) -> std::result::Result<LoadBalancer, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
load_balancer_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LoadBalancer = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
load_balancer_name: &str,
parameters: &LoadBalancer,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
load_balancer_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: LoadBalancer =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LoadBalancer =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Created201(LoadBalancer),
Ok200(LoadBalancer),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
load_balancer_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
load_balancer_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::OK => Ok(delete::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
NoContent204,
Accepted202,
Ok200,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_all(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<LoadBalancerListResult, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/loadBalancers",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).context(list_all::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_all::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_all::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list_all::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LoadBalancerListResult =
serde_json::from_slice(rsp_body).context(list_all::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list_all::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list_all {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<LoadBalancerListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LoadBalancerListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod local_network_gateways {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
local_network_gateway_name: &str,
subscription_id: &str,
) -> std::result::Result<LocalNetworkGateway, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/localNetworkGateways/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
local_network_gateway_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LocalNetworkGateway =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
local_network_gateway_name: &str,
parameters: &LocalNetworkGateway,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/localNetworkGateways/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
local_network_gateway_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: LocalNetworkGateway =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LocalNetworkGateway =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Created201(LocalNetworkGateway),
Ok200(LocalNetworkGateway),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
local_network_gateway_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/localNetworkGateways/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
local_network_gateway_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
NoContent204,
Ok200,
Accepted202,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<LocalNetworkGatewayListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/localNetworkGateways",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LocalNetworkGatewayListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod network_interfaces {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
network_interface_name: &str,
subscription_id: &str,
expand: Option<&str>,
) -> std::result::Result<NetworkInterface, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/networkInterfaces/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
network_interface_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: NetworkInterface =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
network_interface_name: &str,
parameters: &NetworkInterface,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/networkInterfaces/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
network_interface_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: NetworkInterface =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: NetworkInterface =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Created201(NetworkInterface),
Ok200(NetworkInterface),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
network_interface_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/networkInterfaces/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
network_interface_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::OK => Ok(delete::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
NoContent204,
Accepted202,
Ok200,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_virtual_machine_scale_set_vm_network_interfaces(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_machine_scale_set_name: &str,
virtualmachine_index: &str,
subscription_id: &str,
) -> std::result::Result<NetworkInterfaceListResult, list_virtual_machine_scale_set_vm_network_interfaces::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines/{}/networkInterfaces" , operation_config . base_path () , subscription_id , resource_group_name , virtual_machine_scale_set_name , virtualmachine_index) ;
let mut url = url::Url::parse(url_str).context(list_virtual_machine_scale_set_vm_network_interfaces::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_virtual_machine_scale_set_vm_network_interfaces::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(list_virtual_machine_scale_set_vm_network_interfaces::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(list_virtual_machine_scale_set_vm_network_interfaces::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: NetworkInterfaceListResult = serde_json::from_slice(rsp_body)
.context(list_virtual_machine_scale_set_vm_network_interfaces::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list_virtual_machine_scale_set_vm_network_interfaces::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list_virtual_machine_scale_set_vm_network_interfaces {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_virtual_machine_scale_set_network_interfaces(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_machine_scale_set_name: &str,
subscription_id: &str,
) -> std::result::Result<NetworkInterfaceListResult, list_virtual_machine_scale_set_network_interfaces::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.Compute/virtualMachineScaleSets/{}/networkInterfaces",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_machine_scale_set_name
);
let mut url = url::Url::parse(url_str).context(list_virtual_machine_scale_set_network_interfaces::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_virtual_machine_scale_set_network_interfaces::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(list_virtual_machine_scale_set_network_interfaces::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(list_virtual_machine_scale_set_network_interfaces::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: NetworkInterfaceListResult = serde_json::from_slice(rsp_body)
.context(list_virtual_machine_scale_set_network_interfaces::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list_virtual_machine_scale_set_network_interfaces::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list_virtual_machine_scale_set_network_interfaces {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn get_virtual_machine_scale_set_network_interface(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_machine_scale_set_name: &str,
virtualmachine_index: &str,
network_interface_name: &str,
subscription_id: &str,
expand: Option<&str>,
) -> std::result::Result<NetworkInterface, get_virtual_machine_scale_set_network_interface::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines/{}/networkInterfaces/{}" , operation_config . base_path () , subscription_id , resource_group_name , virtual_machine_scale_set_name , virtualmachine_index , network_interface_name) ;
let mut url = url::Url::parse(url_str).context(get_virtual_machine_scale_set_network_interface::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get_virtual_machine_scale_set_network_interface::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(get_virtual_machine_scale_set_network_interface::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(get_virtual_machine_scale_set_network_interface::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: NetworkInterface = serde_json::from_slice(rsp_body)
.context(get_virtual_machine_scale_set_network_interface::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get_virtual_machine_scale_set_network_interface::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get_virtual_machine_scale_set_network_interface {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_all(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<NetworkInterfaceListResult, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/networkInterfaces",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).context(list_all::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_all::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_all::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list_all::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: NetworkInterfaceListResult =
serde_json::from_slice(rsp_body).context(list_all::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list_all::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list_all {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<NetworkInterfaceListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/networkInterfaces",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: NetworkInterfaceListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod network_security_groups {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
network_security_group_name: &str,
subscription_id: &str,
expand: Option<&str>,
) -> std::result::Result<NetworkSecurityGroup, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/networkSecurityGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
network_security_group_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: NetworkSecurityGroup =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
network_security_group_name: &str,
parameters: &NetworkSecurityGroup,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/networkSecurityGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
network_security_group_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: NetworkSecurityGroup =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: NetworkSecurityGroup =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Created201(NetworkSecurityGroup),
Ok200(NetworkSecurityGroup),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
network_security_group_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/networkSecurityGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
network_security_group_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Accepted202,
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_all(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<NetworkSecurityGroupListResult, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/networkSecurityGroups",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).context(list_all::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_all::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_all::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list_all::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: NetworkSecurityGroupListResult =
serde_json::from_slice(rsp_body).context(list_all::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list_all::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list_all {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<NetworkSecurityGroupListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/networkSecurityGroups",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: NetworkSecurityGroupListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod public_ip_addresses {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
public_ip_address_name: &str,
subscription_id: &str,
expand: Option<&str>,
) -> std::result::Result<PublicIpAddress, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/publicIPAddresses/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
public_ip_address_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PublicIpAddress =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
public_ip_address_name: &str,
parameters: &PublicIpAddress,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/publicIPAddresses/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
public_ip_address_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: PublicIpAddress =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PublicIpAddress =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Created201(PublicIpAddress),
Ok200(PublicIpAddress),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
public_ip_address_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/publicIPAddresses/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
public_ip_address_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::OK => Ok(delete::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
NoContent204,
Accepted202,
Ok200,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_all(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<PublicIpAddressListResult, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/publicIPAddresses",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).context(list_all::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_all::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_all::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list_all::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PublicIpAddressListResult =
serde_json::from_slice(rsp_body).context(list_all::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list_all::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list_all {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<PublicIpAddressListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/publicIPAddresses",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PublicIpAddressListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod route_tables {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
route_table_name: &str,
subscription_id: &str,
expand: Option<&str>,
) -> std::result::Result<RouteTable, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/routeTables/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
route_table_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RouteTable = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
route_table_name: &str,
parameters: &RouteTable,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/routeTables/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
route_table_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RouteTable =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: RouteTable =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(RouteTable),
Created201(RouteTable),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
route_table_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/routeTables/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
route_table_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
NoContent204,
Ok200,
Accepted202,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<RouteTableListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/routeTables",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RouteTableListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_all(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<RouteTableListResult, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/routeTables",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).context(list_all::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_all::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_all::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list_all::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RouteTableListResult =
serde_json::from_slice(rsp_body).context(list_all::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list_all::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list_all {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod routes {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
route_table_name: &str,
route_name: &str,
subscription_id: &str,
) -> std::result::Result<Route, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/routeTables/{}/routes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
route_table_name,
route_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Route = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
route_table_name: &str,
route_name: &str,
route_parameters: &Route,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/routeTables/{}/routes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
route_table_name,
route_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Route =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: Route =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(Route),
Created201(Route),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
route_table_name: &str,
route_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/routeTables/{}/routes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
route_table_name,
route_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Accepted202,
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
route_table_name: &str,
subscription_id: &str,
) -> std::result::Result<RouteListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/routeTables/{}/routes",
operation_config.base_path(),
subscription_id,
resource_group_name,
route_table_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RouteListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod security_rules {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
network_security_group_name: &str,
security_rule_name: &str,
subscription_id: &str,
) -> std::result::Result<SecurityRule, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/networkSecurityGroups/{}/securityRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
network_security_group_name,
security_rule_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SecurityRule = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
network_security_group_name: &str,
security_rule_name: &str,
security_rule_parameters: &SecurityRule,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/networkSecurityGroups/{}/securityRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
network_security_group_name,
security_rule_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SecurityRule =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: SecurityRule =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(SecurityRule),
Created201(SecurityRule),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
network_security_group_name: &str,
security_rule_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/networkSecurityGroups/{}/securityRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
network_security_group_name,
security_rule_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::OK => Ok(delete::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
NoContent204,
Accepted202,
Ok200,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
network_security_group_name: &str,
subscription_id: &str,
) -> std::result::Result<SecurityRuleListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/networkSecurityGroups/{}/securityRules",
operation_config.base_path(),
subscription_id,
resource_group_name,
network_security_group_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SecurityRuleListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod subnets {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_name: &str,
subnet_name: &str,
subscription_id: &str,
expand: Option<&str>,
) -> std::result::Result<Subnet, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_name,
subnet_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Subnet = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_name: &str,
subnet_name: &str,
subnet_parameters: &Subnet,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_name,
subnet_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Subnet =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: Subnet =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(Subnet),
Created201(Subnet),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_name: &str,
subnet_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_name,
subnet_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
Accepted202,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_name: &str,
subscription_id: &str,
) -> std::result::Result<SubnetListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: SubnetListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod usages {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn list(
operation_config: &crate::OperationConfig,
location: &str,
subscription_id: &str,
) -> std::result::Result<UsagesListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/locations/{}/usages",
operation_config.base_path(),
subscription_id,
location
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: UsagesListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod virtual_network_gateway_connections {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_gateway_connection_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualNetworkGatewayConnection, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/connections/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_gateway_connection_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetworkGatewayConnection =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_gateway_connection_name: &str,
parameters: &VirtualNetworkGatewayConnection,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/connections/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_gateway_connection_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetworkGatewayConnection =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetworkGatewayConnection =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(VirtualNetworkGatewayConnection),
Created201(VirtualNetworkGatewayConnection),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_gateway_connection_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/connections/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_gateway_connection_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualNetworkGatewayConnectionListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/connections",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetworkGatewayConnectionListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn reset_shared_key(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_gateway_connection_name: &str,
parameters: &ConnectionResetSharedKey,
subscription_id: &str,
) -> std::result::Result<reset_shared_key::Response, reset_shared_key::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/connections/{}/sharedkey/reset",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_gateway_connection_name
);
let mut url = url::Url::parse(url_str).context(reset_shared_key::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(reset_shared_key::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(reset_shared_key::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(reset_shared_key::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ConnectionResetSharedKey =
serde_json::from_slice(rsp_body).context(reset_shared_key::DeserializeError { body: rsp_body.clone() })?;
Ok(reset_shared_key::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(reset_shared_key::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
reset_shared_key::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod reset_shared_key {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(ConnectionResetSharedKey),
Accepted202,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn get_shared_key(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_gateway_connection_name: &str,
subscription_id: &str,
) -> std::result::Result<ConnectionSharedKeyResult, get_shared_key::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/connections/{}/sharedkey",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_gateway_connection_name
);
let mut url = url::Url::parse(url_str).context(get_shared_key::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get_shared_key::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get_shared_key::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(get_shared_key::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ConnectionSharedKeyResult =
serde_json::from_slice(rsp_body).context(get_shared_key::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get_shared_key::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get_shared_key {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn set_shared_key(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_gateway_connection_name: &str,
parameters: &ConnectionSharedKey,
subscription_id: &str,
) -> std::result::Result<set_shared_key::Response, set_shared_key::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/connections/{}/sharedkey",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_gateway_connection_name
);
let mut url = url::Url::parse(url_str).context(set_shared_key::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(set_shared_key::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(set_shared_key::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(set_shared_key::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: ConnectionSharedKey =
serde_json::from_slice(rsp_body).context(set_shared_key::DeserializeError { body: rsp_body.clone() })?;
Ok(set_shared_key::Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ConnectionSharedKey =
serde_json::from_slice(rsp_body).context(set_shared_key::DeserializeError { body: rsp_body.clone() })?;
Ok(set_shared_key::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
set_shared_key::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod set_shared_key {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Created201(ConnectionSharedKey),
Ok200(ConnectionSharedKey),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod virtual_network_gateways {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_gateway_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualNetworkGateway, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworkGateways/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_gateway_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetworkGateway =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_gateway_name: &str,
parameters: &VirtualNetworkGateway,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworkGateways/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_gateway_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetworkGateway =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetworkGateway =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(VirtualNetworkGateway),
Created201(VirtualNetworkGateway),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_gateway_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworkGateways/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_gateway_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::OK => Ok(delete::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
NoContent204,
Accepted202,
Ok200,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualNetworkGatewayListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworkGateways",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetworkGatewayListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn reset(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_gateway_name: &str,
parameters: &VirtualNetworkGateway,
subscription_id: &str,
) -> std::result::Result<reset::Response, reset::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworkGateways/{}/reset",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_gateway_name
);
let mut url = url::Url::parse(url_str).context(reset::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(reset::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(reset::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(reset::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(reset::Response::Accepted202),
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetworkGateway =
serde_json::from_slice(rsp_body).context(reset::DeserializeError { body: rsp_body.clone() })?;
Ok(reset::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
reset::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod reset {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Accepted202,
Ok200(VirtualNetworkGateway),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn generatevpnclientpackage(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_gateway_name: &str,
parameters: &VpnClientParameters,
subscription_id: &str,
) -> std::result::Result<String, generatevpnclientpackage::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworkGateways/{}/generatevpnclientpackage",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_gateway_name
);
let mut url = url::Url::parse(url_str).context(generatevpnclientpackage::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(generatevpnclientpackage::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(generatevpnclientpackage::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(generatevpnclientpackage::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: String =
serde_json::from_slice(rsp_body).context(generatevpnclientpackage::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
generatevpnclientpackage::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod generatevpnclientpackage {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod virtual_networks {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_name: &str,
subscription_id: &str,
expand: Option<&str>,
) -> std::result::Result<VirtualNetwork, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetwork =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
get::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_name: &str,
parameters: &VirtualNetwork,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetwork =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetwork =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
create_or_update::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(VirtualNetwork),
Created201(VirtualNetwork),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
virtual_network_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
virtual_network_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::OK => Ok(delete::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
delete::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
Ok200,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_all(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<VirtualNetworkListResult, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/virtualNetworks",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).context(list_all::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_all::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_all::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list_all::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetworkListResult =
serde_json::from_slice(rsp_body).context(list_all::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list_all::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list_all {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<VirtualNetworkListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VirtualNetworkListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
list::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub async fn check_dns_name_availability(
operation_config: &crate::OperationConfig,
location: &str,
domain_name_label: Option<&str>,
subscription_id: &str,
) -> std::result::Result<DnsNameAvailabilityResult, check_dns_name_availability::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/locations/{}/CheckDnsNameAvailability",
operation_config.base_path(),
subscription_id,
location
);
let mut url = url::Url::parse(url_str).context(check_dns_name_availability::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(check_dns_name_availability::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(domain_name_label) = domain_name_label {
url.query_pairs_mut().append_pair("domainNameLabel", domain_name_label);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(check_dns_name_availability::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(check_dns_name_availability::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DnsNameAvailabilityResult =
serde_json::from_slice(rsp_body).context(check_dns_name_availability::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
check_dns_name_availability::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
}
.fail()
}
}
}
pub mod check_dns_name_availability {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
ParseUrlError { source: url::ParseError },
BuildRequestError { source: http::Error },
ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> },
SerializeError { source: Box<dyn std::error::Error + Sync + Send> },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
| 47.086314 | 326 | 0.591586 |
28115c3659be0ec8a639a974b6b3e11d041a4bca | 110,133 | // Copyright © 2016-2017 winapi-rs developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
//! This module defines the 32-Bit Windows types and constants that are defined by NT, but exposed
//! through the Win32 API.
use ctypes::{__int64, __uint64, c_char, c_int, c_long, c_short, c_ulong, c_void, wchar_t};
use shared::basetsd::{DWORD64, KAFFINITY, LONG_PTR, PDWORD64, SIZE_T, ULONG_PTR};
use shared::guiddef::{CLSID, GUID};
use shared::minwindef::{BYTE, DWORD, PDWORD, ULONG, WORD};
use vc::excpt::EXCEPTION_DISPOSITION;
pub const ANYSIZE_ARRAY: usize = 1;
#[cfg(target_arch = "x86")]
IFDEF!{
pub const MAX_NATURAL_ALIGNMENT: usize = 4;
pub const MEMORY_ALLOCATION_ALIGNMENT: usize = 8;
}
#[cfg(target_arch = "x86_64")]
IFDEF!{
pub const MAX_NATURAL_ALIGNMENT: usize = 8;
pub const MEMORY_ALLOCATION_ALIGNMENT: usize = 16;
}
pub const SYSTEM_CACHE_ALIGNMENT_SIZE: usize = 64;
pub type PVOID = *mut c_void;
pub type PVOID64 = u64; // This is a 64-bit pointer, even when in 32-bit
pub type VOID = c_void;
pub type CHAR = c_char;
pub type SHORT = c_short;
pub type LONG = c_long;
pub type INT = c_int;
pub type WCHAR = wchar_t;
pub type PWCHAR = *mut WCHAR;
pub type LPWCH = *mut WCHAR;
pub type PWCH = *mut WCHAR;
pub type LPCWCH = *const WCHAR;
pub type PCWCH = *const WCHAR;
pub type NWPSTR = *mut WCHAR;
pub type LPWSTR = *mut WCHAR;
pub type PWSTR = *mut WCHAR;
pub type PZPWSTR = *mut PWSTR;
pub type PCZPWSTR = *const PWSTR;
pub type LPUWSTR = *mut WCHAR; // Unaligned pointer
pub type PUWSTR = *mut WCHAR; // Unaligned pointer
pub type LPCWSTR = *const WCHAR;
pub type PCWSTR = *const WCHAR;
pub type PZPCWSTR = *mut PCWSTR;
pub type PCZPCWSTR = *const PCWSTR;
pub type LPCUWSTR = *const WCHAR; // Unaligned pointer
pub type PCUWSTR = *const WCHAR; // Unaligned pointer
pub type PZZWSTR = *mut WCHAR;
pub type PCZZWSTR = *const WCHAR;
pub type PUZZWSTR = *mut WCHAR; // Unaligned pointer
pub type PCUZZWSTR = *const WCHAR; // Unaligned pointer
pub type PNZWCH = *mut WCHAR;
pub type PCNZWCH = *const WCHAR;
pub type PUNZWCH = *mut WCHAR; // Unaligned pointer
pub type PCUNZWCH = *const WCHAR; // Unaligned pointer
pub type LPCWCHAR = *const WCHAR;
pub type PCWCHAR = *const WCHAR;
pub type LPCUWCHAR = *const WCHAR; // Unaligned pointer
pub type PCUWCHAR = *const WCHAR; // Unaligned pointer
pub type UCSCHAR = c_ulong;
pub const UCSCHAR_INVALID_CHARACTER: UCSCHAR = 0xffffffff;
pub const MIN_UCSCHAR: UCSCHAR = 0;
pub const MAX_UCSCHAR: UCSCHAR = 0x0010FFFF;
pub type PUCSCHAR = *mut UCSCHAR;
pub type PCUCSCHAR = *const UCSCHAR;
pub type PUCSSTR = *mut UCSCHAR;
pub type PUUCSSTR = *mut UCSCHAR; // Unaligned pointer
pub type PCUCSSTR = *const UCSCHAR;
pub type PCUUCSSTR = *const UCSCHAR; // Unaligned pointer
pub type PUUCSCHAR = *mut UCSCHAR; // Unaligned pointer
pub type PCUUCSCHAR = *const UCSCHAR; // Unaligned pointer
pub type PCHAR = *mut CHAR;
pub type LPCH = *mut CHAR;
pub type PCH = *mut CHAR;
pub type LPCCH = *const CHAR;
pub type PCCH = *const CHAR;
pub type NPSTR = *mut CHAR;
pub type LPSTR = *mut CHAR;
pub type PSTR = *mut CHAR;
pub type PZPSTR = *mut PSTR;
pub type PCZPSTR = *const PSTR;
pub type LPCSTR = *const CHAR;
pub type PCSTR = *const CHAR;
pub type PZPCSTR = *mut PCSTR;
pub type PCZPCSTR = *const PCSTR;
pub type PZZSTR = *mut CHAR;
pub type PCZZSTR = *const CHAR;
pub type PNZCH = *mut CHAR;
pub type PCNZCH = *const CHAR;
// Skipping TCHAR things
pub type PSHORT = *mut SHORT;
pub type PLONG = *mut LONG;
pub const ALL_PROCESSOR_GROUPS: WORD = 0xffff;
STRUCT!{struct PROCESSOR_NUMBER {
Group: WORD,
Number: BYTE,
Reserved: BYTE,
}}
pub type PPROCESSOR_NUMBER = *mut PROCESSOR_NUMBER;
STRUCT!{struct GROUP_AFFINITY {
Mask: KAFFINITY,
Group: WORD,
Reserved: [WORD; 3],
}}
pub type PGROUP_AFFINITY = *mut GROUP_AFFINITY;
#[cfg(target_arch = "x86")]
pub const MAXIMUM_PROC_PER_GROUP: BYTE = 32;
#[cfg(target_arch = "x86_64")]
pub const MAXIMUM_PROC_PER_GROUP: BYTE = 64;
pub const MAXIMUM_PROCESSORS: BYTE = MAXIMUM_PROC_PER_GROUP;
pub type HANDLE = *mut c_void;
pub type PHANDLE = *mut HANDLE;
pub type FCHAR = BYTE;
pub type FSHORT = WORD;
pub type FLONG = DWORD;
pub type HRESULT = c_long;
pub type CCHAR = c_char;
pub type LCID = DWORD;
pub type PLCID = PDWORD;
pub type LANGID = WORD;
ENUM!{enum COMPARTMENT_ID {
UNSPECIFIED_COMPARTMENT_ID = 0,
DEFAULT_COMPARTMENT_ID,
}}
pub type PCOMPARTMENT_ID = *mut COMPARTMENT_ID;
pub const APPLICATION_ERROR_MASK: DWORD = 0x20000000;
pub const ERROR_SEVERITY_SUCCESS: DWORD = 0x00000000;
pub const ERROR_SEVERITY_INFORMATIONAL: DWORD = 0x40000000;
pub const ERROR_SEVERITY_WARNING: DWORD = 0x80000000;
pub const ERROR_SEVERITY_ERROR: DWORD = 0xC0000000;
STRUCT!{struct FLOAT128 {
LowPart: __int64,
HighPart: __int64,
}}
pub type PFLOAT128 = *mut FLOAT128;
pub type LONGLONG = __int64;
pub type ULONGLONG = __uint64;
pub const MAXLONGLONG: LONGLONG = 0x7fffffffffffffff;
pub type PLONGLONG = *mut LONGLONG;
pub type PULONGLONG = *mut ULONGLONG;
pub type USN = LONGLONG;
pub type LARGE_INTEGER = LONGLONG;
pub type PLARGE_INTEGER = *mut LARGE_INTEGER;
pub type ULARGE_INTEGER = ULONGLONG;
pub type PULARGE_INTEGER = *mut ULARGE_INTEGER;
pub type RTL_REFERENCE_COUNT = LONG_PTR;
pub type PRTL_REFERENCE_COUNT = *mut LONG_PTR;
STRUCT!{struct LUID {
LowPart: DWORD,
HighPart: LONG,
}}
pub type PLUID = *mut LUID;
pub type DWORDLONG = ULONGLONG;
pub type PDWORDLONG = *mut DWORDLONG;
pub const ANSI_NULL: CHAR = 0;
pub const UNICODE_NULL: WCHAR = 0;
pub const UNICODE_STRING_MAX_BYTES: WORD = 65534;
pub const UNICODE_STRING_MAX_CHARS: WORD = 32767;
pub type BOOLEAN = BYTE;
pub type PBOOLEAN = *mut BOOLEAN;
STRUCT!{struct LIST_ENTRY {
Flink: *mut LIST_ENTRY,
Blink: *mut LIST_ENTRY,
}}
pub type PLIST_ENTRY = *mut LIST_ENTRY;
pub type PRLIST_ENTRY = *mut LIST_ENTRY; // Restricted pointer
STRUCT!{struct SINGLE_LIST_ENTRY {
Next: *mut SINGLE_LIST_ENTRY,
}}
pub type PSINGLE_LIST_ENTRY = *mut SINGLE_LIST_ENTRY;
STRUCT!{struct LIST_ENTRY32 {
Flink: DWORD,
Blink: DWORD,
}}
pub type PLIST_ENTRY32 = *mut LIST_ENTRY32;
STRUCT!{struct LIST_ENTRY64 {
Flink: ULONGLONG,
Blink: ULONGLONG,
}}
pub type PLIST_ENTRY64 = *mut LIST_ENTRY64;
STRUCT!{struct OBJECTID {
Lineage: GUID,
Uniquifier: DWORD,
}}
pub const MINCHAR: CHAR = 0x80u8 as CHAR;
pub const MAXCHAR: CHAR = 0x7f;
pub const MINSHORT: SHORT = 0x8000u16 as SHORT;
pub const MAXSHORT: SHORT = 0x7fff;
pub const MINLONG: LONG = 0x80000000u32 as LONG;
pub const MAXLONG: LONG = 0x7fffffff;
pub const MAXBYTE: BYTE = 0xff;
pub const MAXWORD: WORD = 0xffff;
pub const MAXDWORD: DWORD = 0xffffffff;
FN!{stdcall PEXCEPTION_ROUTINE(
ExceptionRecord: *mut EXCEPTION_RECORD,
EstablisherFrame: PVOID,
ContextRecord: *mut CONTEXT,
DispatcherContext: PVOID,
) -> EXCEPTION_DISPOSITION}
pub const VER_SERVER_NT: DWORD = 0x80000000;
pub const VER_WORKSTATION_NT: DWORD = 0x40000000;
pub const VER_SUITE_SMALLBUSINESS: DWORD = 0x00000001;
pub const VER_SUITE_ENTERPRISE: DWORD = 0x00000002;
pub const VER_SUITE_BACKOFFICE: DWORD = 0x00000004;
pub const VER_SUITE_COMMUNICATIONS: DWORD = 0x00000008;
pub const VER_SUITE_TERMINAL: DWORD = 0x00000010;
pub const VER_SUITE_SMALLBUSINESS_RESTRICTED: DWORD = 0x00000020;
pub const VER_SUITE_EMBEDDEDNT: DWORD = 0x00000040;
pub const VER_SUITE_DATACENTER: DWORD = 0x00000080;
pub const VER_SUITE_SINGLEUSERTS: DWORD = 0x00000100;
pub const VER_SUITE_PERSONAL: DWORD = 0x00000200;
pub const VER_SUITE_BLADE: DWORD = 0x00000400;
pub const VER_SUITE_EMBEDDED_RESTRICTED: DWORD = 0x00000800;
pub const VER_SUITE_SECURITY_APPLIANCE: DWORD = 0x00001000;
pub const VER_SUITE_STORAGE_SERVER: DWORD = 0x00002000;
pub const VER_SUITE_COMPUTE_SERVER: DWORD = 0x00004000;
pub const VER_SUITE_WH_SERVER: DWORD = 0x00008000;
pub const PRODUCT_UNDEFINED: DWORD = 0x00000000;
pub const PRODUCT_ULTIMATE: DWORD = 0x00000001;
pub const PRODUCT_HOME_BASIC: DWORD = 0x00000002;
pub const PRODUCT_HOME_PREMIUM: DWORD = 0x00000003;
pub const PRODUCT_ENTERPRISE: DWORD = 0x00000004;
pub const PRODUCT_HOME_BASIC_N: DWORD = 0x00000005;
pub const PRODUCT_BUSINESS: DWORD = 0x00000006;
pub const PRODUCT_STANDARD_SERVER: DWORD = 0x00000007;
pub const PRODUCT_DATACENTER_SERVER: DWORD = 0x00000008;
pub const PRODUCT_SMALLBUSINESS_SERVER: DWORD = 0x00000009;
pub const PRODUCT_ENTERPRISE_SERVER: DWORD = 0x0000000A;
pub const PRODUCT_STARTER: DWORD = 0x0000000B;
pub const PRODUCT_DATACENTER_SERVER_CORE: DWORD = 0x0000000C;
pub const PRODUCT_STANDARD_SERVER_CORE: DWORD = 0x0000000D;
pub const PRODUCT_ENTERPRISE_SERVER_CORE: DWORD = 0x0000000E;
pub const PRODUCT_ENTERPRISE_SERVER_IA64: DWORD = 0x0000000F;
pub const PRODUCT_BUSINESS_N: DWORD = 0x00000010;
pub const PRODUCT_WEB_SERVER: DWORD = 0x00000011;
pub const PRODUCT_CLUSTER_SERVER: DWORD = 0x00000012;
pub const PRODUCT_HOME_SERVER: DWORD = 0x00000013;
pub const PRODUCT_STORAGE_EXPRESS_SERVER: DWORD = 0x00000014;
pub const PRODUCT_STORAGE_STANDARD_SERVER: DWORD = 0x00000015;
pub const PRODUCT_STORAGE_WORKGROUP_SERVER: DWORD = 0x00000016;
pub const PRODUCT_STORAGE_ENTERPRISE_SERVER: DWORD = 0x00000017;
pub const PRODUCT_SERVER_FOR_SMALLBUSINESS: DWORD = 0x00000018;
pub const PRODUCT_SMALLBUSINESS_SERVER_PREMIUM: DWORD = 0x00000019;
pub const PRODUCT_HOME_PREMIUM_N: DWORD = 0x0000001A;
pub const PRODUCT_ENTERPRISE_N: DWORD = 0x0000001B;
pub const PRODUCT_ULTIMATE_N: DWORD = 0x0000001C;
pub const PRODUCT_WEB_SERVER_CORE: DWORD = 0x0000001D;
pub const PRODUCT_MEDIUMBUSINESS_SERVER_MANAGEMENT: DWORD = 0x0000001E;
pub const PRODUCT_MEDIUMBUSINESS_SERVER_SECURITY: DWORD = 0x0000001F;
pub const PRODUCT_MEDIUMBUSINESS_SERVER_MESSAGING: DWORD = 0x00000020;
pub const PRODUCT_SERVER_FOUNDATION: DWORD = 0x00000021;
pub const PRODUCT_HOME_PREMIUM_SERVER: DWORD = 0x00000022;
pub const PRODUCT_SERVER_FOR_SMALLBUSINESS_V: DWORD = 0x00000023;
pub const PRODUCT_STANDARD_SERVER_V: DWORD = 0x00000024;
pub const PRODUCT_DATACENTER_SERVER_V: DWORD = 0x00000025;
pub const PRODUCT_ENTERPRISE_SERVER_V: DWORD = 0x00000026;
pub const PRODUCT_DATACENTER_SERVER_CORE_V: DWORD = 0x00000027;
pub const PRODUCT_STANDARD_SERVER_CORE_V: DWORD = 0x00000028;
pub const PRODUCT_ENTERPRISE_SERVER_CORE_V: DWORD = 0x00000029;
pub const PRODUCT_HYPERV: DWORD = 0x0000002A;
pub const PRODUCT_STORAGE_EXPRESS_SERVER_CORE: DWORD = 0x0000002B;
pub const PRODUCT_STORAGE_STANDARD_SERVER_CORE: DWORD = 0x0000002C;
pub const PRODUCT_STORAGE_WORKGROUP_SERVER_CORE: DWORD = 0x0000002D;
pub const PRODUCT_STORAGE_ENTERPRISE_SERVER_CORE: DWORD = 0x0000002E;
pub const PRODUCT_STARTER_N: DWORD = 0x0000002F;
pub const PRODUCT_PROFESSIONAL: DWORD = 0x00000030;
pub const PRODUCT_PROFESSIONAL_N: DWORD = 0x00000031;
pub const PRODUCT_SB_SOLUTION_SERVER: DWORD = 0x00000032;
pub const PRODUCT_SERVER_FOR_SB_SOLUTIONS: DWORD = 0x00000033;
pub const PRODUCT_STANDARD_SERVER_SOLUTIONS: DWORD = 0x00000034;
pub const PRODUCT_STANDARD_SERVER_SOLUTIONS_CORE: DWORD = 0x00000035;
pub const PRODUCT_SB_SOLUTION_SERVER_EM: DWORD = 0x00000036;
pub const PRODUCT_SERVER_FOR_SB_SOLUTIONS_EM: DWORD = 0x00000037;
pub const PRODUCT_SOLUTION_EMBEDDEDSERVER: DWORD = 0x00000038;
pub const PRODUCT_SOLUTION_EMBEDDEDSERVER_CORE: DWORD = 0x00000039;
pub const PRODUCT_PROFESSIONAL_EMBEDDED: DWORD = 0x0000003A;
pub const PRODUCT_ESSENTIALBUSINESS_SERVER_MGMT: DWORD = 0x0000003B;
pub const PRODUCT_ESSENTIALBUSINESS_SERVER_ADDL: DWORD = 0x0000003C;
pub const PRODUCT_ESSENTIALBUSINESS_SERVER_MGMTSVC: DWORD = 0x0000003D;
pub const PRODUCT_ESSENTIALBUSINESS_SERVER_ADDLSVC: DWORD = 0x0000003E;
pub const PRODUCT_SMALLBUSINESS_SERVER_PREMIUM_CORE: DWORD = 0x0000003F;
pub const PRODUCT_CLUSTER_SERVER_V: DWORD = 0x00000040;
pub const PRODUCT_EMBEDDED: DWORD = 0x00000041;
pub const PRODUCT_STARTER_E: DWORD = 0x00000042;
pub const PRODUCT_HOME_BASIC_E: DWORD = 0x00000043;
pub const PRODUCT_HOME_PREMIUM_E: DWORD = 0x00000044;
pub const PRODUCT_PROFESSIONAL_E: DWORD = 0x00000045;
pub const PRODUCT_ENTERPRISE_E: DWORD = 0x00000046;
pub const PRODUCT_ULTIMATE_E: DWORD = 0x00000047;
pub const PRODUCT_ENTERPRISE_EVALUATION: DWORD = 0x00000048;
pub const PRODUCT_MULTIPOINT_STANDARD_SERVER: DWORD = 0x0000004C;
pub const PRODUCT_MULTIPOINT_PREMIUM_SERVER: DWORD = 0x0000004D;
pub const PRODUCT_STANDARD_EVALUATION_SERVER: DWORD = 0x0000004F;
pub const PRODUCT_DATACENTER_EVALUATION_SERVER: DWORD = 0x00000050;
pub const PRODUCT_ENTERPRISE_N_EVALUATION: DWORD = 0x00000054;
pub const PRODUCT_EMBEDDED_AUTOMOTIVE: DWORD = 0x00000055;
pub const PRODUCT_EMBEDDED_INDUSTRY_A: DWORD = 0x00000056;
pub const PRODUCT_THINPC: DWORD = 0x00000057;
pub const PRODUCT_EMBEDDED_A: DWORD = 0x00000058;
pub const PRODUCT_EMBEDDED_INDUSTRY: DWORD = 0x00000059;
pub const PRODUCT_EMBEDDED_E: DWORD = 0x0000005A;
pub const PRODUCT_EMBEDDED_INDUSTRY_E: DWORD = 0x0000005B;
pub const PRODUCT_EMBEDDED_INDUSTRY_A_E: DWORD = 0x0000005C;
pub const PRODUCT_STORAGE_WORKGROUP_EVALUATION_SERVER: DWORD = 0x0000005F;
pub const PRODUCT_STORAGE_STANDARD_EVALUATION_SERVER: DWORD = 0x00000060;
pub const PRODUCT_CORE_ARM: DWORD = 0x00000061;
pub const PRODUCT_CORE_N: DWORD = 0x00000062;
pub const PRODUCT_CORE_COUNTRYSPECIFIC: DWORD = 0x00000063;
pub const PRODUCT_CORE_SINGLELANGUAGE: DWORD = 0x00000064;
pub const PRODUCT_CORE: DWORD = 0x00000065;
pub const PRODUCT_PROFESSIONAL_WMC: DWORD = 0x00000067;
pub const PRODUCT_MOBILE_CORE: DWORD = 0x00000068;
pub const PRODUCT_EMBEDDED_INDUSTRY_EVAL: DWORD = 0x00000069;
pub const PRODUCT_EMBEDDED_INDUSTRY_E_EVAL: DWORD = 0x0000006A;
pub const PRODUCT_EMBEDDED_EVAL: DWORD = 0x0000006B;
pub const PRODUCT_EMBEDDED_E_EVAL: DWORD = 0x0000006C;
pub const PRODUCT_NANO_SERVER: DWORD = 0x0000006D;
pub const PRODUCT_CLOUD_STORAGE_SERVER: DWORD = 0x0000006E;
pub const PRODUCT_CORE_CONNECTED: DWORD = 0x0000006F;
pub const PRODUCT_PROFESSIONAL_STUDENT: DWORD = 0x00000070;
pub const PRODUCT_CORE_CONNECTED_N: DWORD = 0x00000071;
pub const PRODUCT_PROFESSIONAL_STUDENT_N: DWORD = 0x00000072;
pub const PRODUCT_CORE_CONNECTED_SINGLELANGUAGE: DWORD = 0x00000073;
pub const PRODUCT_CORE_CONNECTED_COUNTRYSPECIFIC: DWORD = 0x00000074;
pub const PRODUCT_CONNECTED_CAR: DWORD = 0x00000075;
pub const PRODUCT_INDUSTRY_HANDHELD: DWORD = 0x00000076;
pub const PRODUCT_PPI_PRO: DWORD = 0x00000077;
pub const PRODUCT_ARM64_SERVER: DWORD = 0x00000078;
pub const PRODUCT_EDUCATION: DWORD = 0x00000079;
pub const PRODUCT_EDUCATION_N: DWORD = 0x0000007A;
pub const PRODUCT_IOTUAP: DWORD = 0x0000007B;
pub const PRODUCT_CLOUD_HOST_INFRASTRUCTURE_SERVER: DWORD = 0x0000007C;
pub const PRODUCT_ENTERPRISE_S: DWORD = 0x0000007D;
pub const PRODUCT_ENTERPRISE_S_N: DWORD = 0x0000007E;
pub const PRODUCT_PROFESSIONAL_S: DWORD = 0x0000007F;
pub const PRODUCT_PROFESSIONAL_S_N: DWORD = 0x00000080;
pub const PRODUCT_ENTERPRISE_S_EVALUATION: DWORD = 0x00000081;
pub const PRODUCT_ENTERPRISE_S_N_EVALUATION: DWORD = 0x00000082;
pub const PRODUCT_HOLOGRAPHIC: DWORD = 0x00000087;
pub const PRODUCT_PRO_SINGLE_LANGUAGE: DWORD = 0x0000008A;
pub const PRODUCT_PRO_CHINA: DWORD = 0x0000008B;
pub const PRODUCT_ENTERPRISE_SUBSCRIPTION: DWORD = 0x0000008C;
pub const PRODUCT_ENTERPRISE_SUBSCRIPTION_N: DWORD = 0x0000008D;
pub const PRODUCT_DATACENTER_NANO_SERVER: DWORD = 0x0000008F;
pub const PRODUCT_STANDARD_NANO_SERVER: DWORD = 0x00000090;
pub const PRODUCT_DATACENTER_A_SERVER_CORE: DWORD = 0x00000091;
pub const PRODUCT_STANDARD_A_SERVER_CORE: DWORD = 0x00000092;
pub const PRODUCT_DATACENTER_WS_SERVER_CORE: DWORD = 0x00000093;
pub const PRODUCT_STANDARD_WS_SERVER_CORE: DWORD = 0x00000094;
pub const PRODUCT_UTILITY_VM: DWORD = 0x00000095;
pub const PRODUCT_DATACENTER_EVALUATION_SERVER_CORE: DWORD = 0x0000009F;
pub const PRODUCT_STANDARD_EVALUATION_SERVER_CORE: DWORD = 0x000000A0;
pub const PRODUCT_PRO_WORKSTATION: DWORD = 0x000000A1;
pub const PRODUCT_PRO_WORKSTATION_N: DWORD = 0x000000A2;
pub const PRODUCT_PRO_FOR_EDUCATION: DWORD = 0x000000A4;
pub const PRODUCT_PRO_FOR_EDUCATION_N: DWORD = 0x000000A5;
pub const PRODUCT_AZURE_SERVER_CORE: DWORD = 0x000000A8;
pub const PRODUCT_AZURE_NANO_SERVER: DWORD = 0x000000A9;
pub const PRODUCT_UNLICENSED: DWORD = 0xABCDABCD;
pub const LANG_NEUTRAL: WORD = 0x00;
pub const LANG_INVARIANT: WORD = 0x7f;
pub const LANG_AFRIKAANS: WORD = 0x36;
pub const LANG_ALBANIAN: WORD = 0x1c;
pub const LANG_ALSATIAN: WORD = 0x84;
pub const LANG_AMHARIC: WORD = 0x5e;
pub const LANG_ARABIC: WORD = 0x01;
pub const LANG_ARMENIAN: WORD = 0x2b;
pub const LANG_ASSAMESE: WORD = 0x4d;
pub const LANG_AZERI: WORD = 0x2c;
pub const LANG_AZERBAIJANI: WORD = 0x2c;
pub const LANG_BANGLA: WORD = 0x45;
pub const LANG_BASHKIR: WORD = 0x6d;
pub const LANG_BASQUE: WORD = 0x2d;
pub const LANG_BELARUSIAN: WORD = 0x23;
pub const LANG_BENGALI: WORD = 0x45;
pub const LANG_BRETON: WORD = 0x7e;
pub const LANG_BOSNIAN: WORD = 0x1a;
pub const LANG_BOSNIAN_NEUTRAL: WORD = 0x781a;
pub const LANG_BULGARIAN: WORD = 0x02;
pub const LANG_CATALAN: WORD = 0x03;
pub const LANG_CENTRAL_KURDISH: WORD = 0x92;
pub const LANG_CHEROKEE: WORD = 0x5c;
pub const LANG_CHINESE: WORD = 0x04;
pub const LANG_CHINESE_SIMPLIFIED: WORD = 0x04;
pub const LANG_CHINESE_TRADITIONAL: WORD = 0x7c04;
pub const LANG_CORSICAN: WORD = 0x83;
pub const LANG_CROATIAN: WORD = 0x1a;
pub const LANG_CZECH: WORD = 0x05;
pub const LANG_DANISH: WORD = 0x06;
pub const LANG_DARI: WORD = 0x8c;
pub const LANG_DIVEHI: WORD = 0x65;
pub const LANG_DUTCH: WORD = 0x13;
pub const LANG_ENGLISH: WORD = 0x09;
pub const LANG_ESTONIAN: WORD = 0x25;
pub const LANG_FAEROESE: WORD = 0x38;
pub const LANG_FARSI: WORD = 0x29;
pub const LANG_FILIPINO: WORD = 0x64;
pub const LANG_FINNISH: WORD = 0x0b;
pub const LANG_FRENCH: WORD = 0x0c;
pub const LANG_FRISIAN: WORD = 0x62;
pub const LANG_FULAH: WORD = 0x67;
pub const LANG_GALICIAN: WORD = 0x56;
pub const LANG_GEORGIAN: WORD = 0x37;
pub const LANG_GERMAN: WORD = 0x07;
pub const LANG_GREEK: WORD = 0x08;
pub const LANG_GREENLANDIC: WORD = 0x6f;
pub const LANG_GUJARATI: WORD = 0x47;
pub const LANG_HAUSA: WORD = 0x68;
pub const LANG_HAWAIIAN: WORD = 0x75;
pub const LANG_HEBREW: WORD = 0x0d;
pub const LANG_HINDI: WORD = 0x39;
pub const LANG_HUNGARIAN: WORD = 0x0e;
pub const LANG_ICELANDIC: WORD = 0x0f;
pub const LANG_IGBO: WORD = 0x70;
pub const LANG_INDONESIAN: WORD = 0x21;
pub const LANG_INUKTITUT: WORD = 0x5d;
pub const LANG_IRISH: WORD = 0x3c;
pub const LANG_ITALIAN: WORD = 0x10;
pub const LANG_JAPANESE: WORD = 0x11;
pub const LANG_KANNADA: WORD = 0x4b;
pub const LANG_KASHMIRI: WORD = 0x60;
pub const LANG_KAZAK: WORD = 0x3f;
pub const LANG_KHMER: WORD = 0x53;
pub const LANG_KICHE: WORD = 0x86;
pub const LANG_KINYARWANDA: WORD = 0x87;
pub const LANG_KONKANI: WORD = 0x57;
pub const LANG_KOREAN: WORD = 0x12;
pub const LANG_KYRGYZ: WORD = 0x40;
pub const LANG_LAO: WORD = 0x54;
pub const LANG_LATVIAN: WORD = 0x26;
pub const LANG_LITHUANIAN: WORD = 0x27;
pub const LANG_LOWER_SORBIAN: WORD = 0x2e;
pub const LANG_LUXEMBOURGISH: WORD = 0x6e;
pub const LANG_MACEDONIAN: WORD = 0x2f;
pub const LANG_MALAY: WORD = 0x3e;
pub const LANG_MALAYALAM: WORD = 0x4c;
pub const LANG_MALTESE: WORD = 0x3a;
pub const LANG_MANIPURI: WORD = 0x58;
pub const LANG_MAORI: WORD = 0x81;
pub const LANG_MAPUDUNGUN: WORD = 0x7a;
pub const LANG_MARATHI: WORD = 0x4e;
pub const LANG_MOHAWK: WORD = 0x7c;
pub const LANG_MONGOLIAN: WORD = 0x50;
pub const LANG_NEPALI: WORD = 0x61;
pub const LANG_NORWEGIAN: WORD = 0x14;
pub const LANG_OCCITAN: WORD = 0x82;
pub const LANG_ODIA: WORD = 0x48;
pub const LANG_ORIYA: WORD = 0x48;
pub const LANG_PASHTO: WORD = 0x63;
pub const LANG_PERSIAN: WORD = 0x29;
pub const LANG_POLISH: WORD = 0x15;
pub const LANG_PORTUGUESE: WORD = 0x16;
pub const LANG_PULAR: WORD = 0x67;
pub const LANG_PUNJABI: WORD = 0x46;
pub const LANG_QUECHUA: WORD = 0x6b;
pub const LANG_ROMANIAN: WORD = 0x18;
pub const LANG_ROMANSH: WORD = 0x17;
pub const LANG_RUSSIAN: WORD = 0x19;
pub const LANG_SAKHA: WORD = 0x85;
pub const LANG_SAMI: WORD = 0x3b;
pub const LANG_SANSKRIT: WORD = 0x4f;
pub const LANG_SCOTTISH_GAELIC: WORD = 0x91;
pub const LANG_SERBIAN: WORD = 0x1a;
pub const LANG_SERBIAN_NEUTRAL: WORD = 0x7c1a;
pub const LANG_SINDHI: WORD = 0x59;
pub const LANG_SINHALESE: WORD = 0x5b;
pub const LANG_SLOVAK: WORD = 0x1b;
pub const LANG_SLOVENIAN: WORD = 0x24;
pub const LANG_SOTHO: WORD = 0x6c;
pub const LANG_SPANISH: WORD = 0x0a;
pub const LANG_SWAHILI: WORD = 0x41;
pub const LANG_SWEDISH: WORD = 0x1d;
pub const LANG_SYRIAC: WORD = 0x5a;
pub const LANG_TAJIK: WORD = 0x28;
pub const LANG_TAMAZIGHT: WORD = 0x5f;
pub const LANG_TAMIL: WORD = 0x49;
pub const LANG_TATAR: WORD = 0x44;
pub const LANG_TELUGU: WORD = 0x4a;
pub const LANG_THAI: WORD = 0x1e;
pub const LANG_TIBETAN: WORD = 0x51;
pub const LANG_TIGRIGNA: WORD = 0x73;
pub const LANG_TIGRINYA: WORD = 0x73;
pub const LANG_TSWANA: WORD = 0x32;
pub const LANG_TURKISH: WORD = 0x1f;
pub const LANG_TURKMEN: WORD = 0x42;
pub const LANG_UIGHUR: WORD = 0x80;
pub const LANG_UKRAINIAN: WORD = 0x22;
pub const LANG_UPPER_SORBIAN: WORD = 0x2e;
pub const LANG_URDU: WORD = 0x20;
pub const LANG_UZBEK: WORD = 0x43;
pub const LANG_VALENCIAN: WORD = 0x03;
pub const LANG_VIETNAMESE: WORD = 0x2a;
pub const LANG_WELSH: WORD = 0x52;
pub const LANG_WOLOF: WORD = 0x88;
pub const LANG_XHOSA: WORD = 0x34;
pub const LANG_YAKUT: WORD = 0x85;
pub const LANG_YI: WORD = 0x78;
pub const LANG_YORUBA: WORD = 0x6a;
pub const LANG_ZULU: WORD = 0x35;
pub const SUBLANG_NEUTRAL: WORD = 0x00;
pub const SUBLANG_DEFAULT: WORD = 0x01;
pub const SUBLANG_SYS_DEFAULT: WORD = 0x02;
pub const SUBLANG_CUSTOM_DEFAULT: WORD = 0x03;
pub const SUBLANG_CUSTOM_UNSPECIFIED: WORD = 0x04;
pub const SUBLANG_UI_CUSTOM_DEFAULT: WORD = 0x05;
pub const SUBLANG_AFRIKAANS_SOUTH_AFRICA: WORD = 0x01;
pub const SUBLANG_ALBANIAN_ALBANIA: WORD = 0x01;
pub const SUBLANG_ALSATIAN_FRANCE: WORD = 0x01;
pub const SUBLANG_AMHARIC_ETHIOPIA: WORD = 0x01;
pub const SUBLANG_ARABIC_SAUDI_ARABIA: WORD = 0x01;
pub const SUBLANG_ARABIC_IRAQ: WORD = 0x02;
pub const SUBLANG_ARABIC_EGYPT: WORD = 0x03;
pub const SUBLANG_ARABIC_LIBYA: WORD = 0x04;
pub const SUBLANG_ARABIC_ALGERIA: WORD = 0x05;
pub const SUBLANG_ARABIC_MOROCCO: WORD = 0x06;
pub const SUBLANG_ARABIC_TUNISIA: WORD = 0x07;
pub const SUBLANG_ARABIC_OMAN: WORD = 0x08;
pub const SUBLANG_ARABIC_YEMEN: WORD = 0x09;
pub const SUBLANG_ARABIC_SYRIA: WORD = 0x0a;
pub const SUBLANG_ARABIC_JORDAN: WORD = 0x0b;
pub const SUBLANG_ARABIC_LEBANON: WORD = 0x0c;
pub const SUBLANG_ARABIC_KUWAIT: WORD = 0x0d;
pub const SUBLANG_ARABIC_UAE: WORD = 0x0e;
pub const SUBLANG_ARABIC_BAHRAIN: WORD = 0x0f;
pub const SUBLANG_ARABIC_QATAR: WORD = 0x10;
pub const SUBLANG_ARMENIAN_ARMENIA: WORD = 0x01;
pub const SUBLANG_ASSAMESE_INDIA: WORD = 0x01;
pub const SUBLANG_AZERI_LATIN: WORD = 0x01;
pub const SUBLANG_AZERI_CYRILLIC: WORD = 0x02;
pub const SUBLANG_AZERBAIJANI_AZERBAIJAN_LATIN: WORD = 0x01;
pub const SUBLANG_AZERBAIJANI_AZERBAIJAN_CYRILLIC: WORD = 0x02;
pub const SUBLANG_BANGLA_INDIA: WORD = 0x01;
pub const SUBLANG_BANGLA_BANGLADESH: WORD = 0x02;
pub const SUBLANG_BASHKIR_RUSSIA: WORD = 0x01;
pub const SUBLANG_BASQUE_BASQUE: WORD = 0x01;
pub const SUBLANG_BELARUSIAN_BELARUS: WORD = 0x01;
pub const SUBLANG_BENGALI_INDIA: WORD = 0x01;
pub const SUBLANG_BENGALI_BANGLADESH: WORD = 0x02;
pub const SUBLANG_BOSNIAN_BOSNIA_HERZEGOVINA_LATIN: WORD = 0x05;
pub const SUBLANG_BOSNIAN_BOSNIA_HERZEGOVINA_CYRILLIC: WORD = 0x08;
pub const SUBLANG_BRETON_FRANCE: WORD = 0x01;
pub const SUBLANG_BULGARIAN_BULGARIA: WORD = 0x01;
pub const SUBLANG_CATALAN_CATALAN: WORD = 0x01;
pub const SUBLANG_CENTRAL_KURDISH_IRAQ: WORD = 0x01;
pub const SUBLANG_CHEROKEE_CHEROKEE: WORD = 0x01;
pub const SUBLANG_CHINESE_TRADITIONAL: WORD = 0x01;
pub const SUBLANG_CHINESE_SIMPLIFIED: WORD = 0x02;
pub const SUBLANG_CHINESE_HONGKONG: WORD = 0x03;
pub const SUBLANG_CHINESE_SINGAPORE: WORD = 0x04;
pub const SUBLANG_CHINESE_MACAU: WORD = 0x05;
pub const SUBLANG_CORSICAN_FRANCE: WORD = 0x01;
pub const SUBLANG_CZECH_CZECH_REPUBLIC: WORD = 0x01;
pub const SUBLANG_CROATIAN_CROATIA: WORD = 0x01;
pub const SUBLANG_CROATIAN_BOSNIA_HERZEGOVINA_LATIN: WORD = 0x04;
pub const SUBLANG_DANISH_DENMARK: WORD = 0x01;
pub const SUBLANG_DARI_AFGHANISTAN: WORD = 0x01;
pub const SUBLANG_DIVEHI_MALDIVES: WORD = 0x01;
pub const SUBLANG_DUTCH: WORD = 0x01;
pub const SUBLANG_DUTCH_BELGIAN: WORD = 0x02;
pub const SUBLANG_ENGLISH_US: WORD = 0x01;
pub const SUBLANG_ENGLISH_UK: WORD = 0x02;
pub const SUBLANG_ENGLISH_AUS: WORD = 0x03;
pub const SUBLANG_ENGLISH_CAN: WORD = 0x04;
pub const SUBLANG_ENGLISH_NZ: WORD = 0x05;
pub const SUBLANG_ENGLISH_EIRE: WORD = 0x06;
pub const SUBLANG_ENGLISH_SOUTH_AFRICA: WORD = 0x07;
pub const SUBLANG_ENGLISH_JAMAICA: WORD = 0x08;
pub const SUBLANG_ENGLISH_CARIBBEAN: WORD = 0x09;
pub const SUBLANG_ENGLISH_BELIZE: WORD = 0x0a;
pub const SUBLANG_ENGLISH_TRINIDAD: WORD = 0x0b;
pub const SUBLANG_ENGLISH_ZIMBABWE: WORD = 0x0c;
pub const SUBLANG_ENGLISH_PHILIPPINES: WORD = 0x0d;
pub const SUBLANG_ENGLISH_INDIA: WORD = 0x10;
pub const SUBLANG_ENGLISH_MALAYSIA: WORD = 0x11;
pub const SUBLANG_ENGLISH_SINGAPORE: WORD = 0x12;
pub const SUBLANG_ESTONIAN_ESTONIA: WORD = 0x01;
pub const SUBLANG_FAEROESE_FAROE_ISLANDS: WORD = 0x01;
pub const SUBLANG_FILIPINO_PHILIPPINES: WORD = 0x01;
pub const SUBLANG_FINNISH_FINLAND: WORD = 0x01;
pub const SUBLANG_FRENCH: WORD = 0x01;
pub const SUBLANG_FRENCH_BELGIAN: WORD = 0x02;
pub const SUBLANG_FRENCH_CANADIAN: WORD = 0x03;
pub const SUBLANG_FRENCH_SWISS: WORD = 0x04;
pub const SUBLANG_FRENCH_LUXEMBOURG: WORD = 0x05;
pub const SUBLANG_FRENCH_MONACO: WORD = 0x06;
pub const SUBLANG_FRISIAN_NETHERLANDS: WORD = 0x01;
pub const SUBLANG_FULAH_SENEGAL: WORD = 0x02;
pub const SUBLANG_GALICIAN_GALICIAN: WORD = 0x01;
pub const SUBLANG_GEORGIAN_GEORGIA: WORD = 0x01;
pub const SUBLANG_GERMAN: WORD = 0x01;
pub const SUBLANG_GERMAN_SWISS: WORD = 0x02;
pub const SUBLANG_GERMAN_AUSTRIAN: WORD = 0x03;
pub const SUBLANG_GERMAN_LUXEMBOURG: WORD = 0x04;
pub const SUBLANG_GERMAN_LIECHTENSTEIN: WORD = 0x05;
pub const SUBLANG_GREEK_GREECE: WORD = 0x01;
pub const SUBLANG_GREENLANDIC_GREENLAND: WORD = 0x01;
pub const SUBLANG_GUJARATI_INDIA: WORD = 0x01;
pub const SUBLANG_HAUSA_NIGERIA_LATIN: WORD = 0x01;
pub const SUBLANG_HAWAIIAN_US: WORD = 0x01;
pub const SUBLANG_HEBREW_ISRAEL: WORD = 0x01;
pub const SUBLANG_HINDI_INDIA: WORD = 0x01;
pub const SUBLANG_HUNGARIAN_HUNGARY: WORD = 0x01;
pub const SUBLANG_ICELANDIC_ICELAND: WORD = 0x01;
pub const SUBLANG_IGBO_NIGERIA: WORD = 0x01;
pub const SUBLANG_INDONESIAN_INDONESIA: WORD = 0x01;
pub const SUBLANG_INUKTITUT_CANADA: WORD = 0x01;
pub const SUBLANG_INUKTITUT_CANADA_LATIN: WORD = 0x02;
pub const SUBLANG_IRISH_IRELAND: WORD = 0x02;
pub const SUBLANG_ITALIAN: WORD = 0x01;
pub const SUBLANG_ITALIAN_SWISS: WORD = 0x02;
pub const SUBLANG_JAPANESE_JAPAN: WORD = 0x01;
pub const SUBLANG_KANNADA_INDIA: WORD = 0x01;
pub const SUBLANG_KASHMIRI_SASIA: WORD = 0x02;
pub const SUBLANG_KASHMIRI_INDIA: WORD = 0x02;
pub const SUBLANG_KAZAK_KAZAKHSTAN: WORD = 0x01;
pub const SUBLANG_KHMER_CAMBODIA: WORD = 0x01;
pub const SUBLANG_KICHE_GUATEMALA: WORD = 0x01;
pub const SUBLANG_KINYARWANDA_RWANDA: WORD = 0x01;
pub const SUBLANG_KONKANI_INDIA: WORD = 0x01;
pub const SUBLANG_KOREAN: WORD = 0x01;
pub const SUBLANG_KYRGYZ_KYRGYZSTAN: WORD = 0x01;
pub const SUBLANG_LAO_LAO: WORD = 0x01;
pub const SUBLANG_LATVIAN_LATVIA: WORD = 0x01;
pub const SUBLANG_LITHUANIAN: WORD = 0x01;
pub const SUBLANG_LOWER_SORBIAN_GERMANY: WORD = 0x02;
pub const SUBLANG_LUXEMBOURGISH_LUXEMBOURG: WORD = 0x01;
pub const SUBLANG_MACEDONIAN_MACEDONIA: WORD = 0x01;
pub const SUBLANG_MALAY_MALAYSIA: WORD = 0x01;
pub const SUBLANG_MALAY_BRUNEI_DARUSSALAM: WORD = 0x02;
pub const SUBLANG_MALAYALAM_INDIA: WORD = 0x01;
pub const SUBLANG_MALTESE_MALTA: WORD = 0x01;
pub const SUBLANG_MAORI_NEW_ZEALAND: WORD = 0x01;
pub const SUBLANG_MAPUDUNGUN_CHILE: WORD = 0x01;
pub const SUBLANG_MARATHI_INDIA: WORD = 0x01;
pub const SUBLANG_MOHAWK_MOHAWK: WORD = 0x01;
pub const SUBLANG_MONGOLIAN_CYRILLIC_MONGOLIA: WORD = 0x01;
pub const SUBLANG_MONGOLIAN_PRC: WORD = 0x02;
pub const SUBLANG_NEPALI_INDIA: WORD = 0x02;
pub const SUBLANG_NEPALI_NEPAL: WORD = 0x01;
pub const SUBLANG_NORWEGIAN_BOKMAL: WORD = 0x01;
pub const SUBLANG_NORWEGIAN_NYNORSK: WORD = 0x02;
pub const SUBLANG_OCCITAN_FRANCE: WORD = 0x01;
pub const SUBLANG_ODIA_INDIA: WORD = 0x01;
pub const SUBLANG_ORIYA_INDIA: WORD = 0x01;
pub const SUBLANG_PASHTO_AFGHANISTAN: WORD = 0x01;
pub const SUBLANG_PERSIAN_IRAN: WORD = 0x01;
pub const SUBLANG_POLISH_POLAND: WORD = 0x01;
pub const SUBLANG_PORTUGUESE: WORD = 0x02;
pub const SUBLANG_PORTUGUESE_BRAZILIAN: WORD = 0x01;
pub const SUBLANG_PULAR_SENEGAL: WORD = 0x02;
pub const SUBLANG_PUNJABI_INDIA: WORD = 0x01;
pub const SUBLANG_PUNJABI_PAKISTAN: WORD = 0x02;
pub const SUBLANG_QUECHUA_BOLIVIA: WORD = 0x01;
pub const SUBLANG_QUECHUA_ECUADOR: WORD = 0x02;
pub const SUBLANG_QUECHUA_PERU: WORD = 0x03;
pub const SUBLANG_ROMANIAN_ROMANIA: WORD = 0x01;
pub const SUBLANG_ROMANSH_SWITZERLAND: WORD = 0x01;
pub const SUBLANG_RUSSIAN_RUSSIA: WORD = 0x01;
pub const SUBLANG_SAKHA_RUSSIA: WORD = 0x01;
pub const SUBLANG_SAMI_NORTHERN_NORWAY: WORD = 0x01;
pub const SUBLANG_SAMI_NORTHERN_SWEDEN: WORD = 0x02;
pub const SUBLANG_SAMI_NORTHERN_FINLAND: WORD = 0x03;
pub const SUBLANG_SAMI_LULE_NORWAY: WORD = 0x04;
pub const SUBLANG_SAMI_LULE_SWEDEN: WORD = 0x05;
pub const SUBLANG_SAMI_SOUTHERN_NORWAY: WORD = 0x06;
pub const SUBLANG_SAMI_SOUTHERN_SWEDEN: WORD = 0x07;
pub const SUBLANG_SAMI_SKOLT_FINLAND: WORD = 0x08;
pub const SUBLANG_SAMI_INARI_FINLAND: WORD = 0x09;
pub const SUBLANG_SANSKRIT_INDIA: WORD = 0x01;
pub const SUBLANG_SCOTTISH_GAELIC: WORD = 0x01;
pub const SUBLANG_SERBIAN_BOSNIA_HERZEGOVINA_LATIN: WORD = 0x06;
pub const SUBLANG_SERBIAN_BOSNIA_HERZEGOVINA_CYRILLIC: WORD = 0x07;
pub const SUBLANG_SERBIAN_MONTENEGRO_LATIN: WORD = 0x0b;
pub const SUBLANG_SERBIAN_MONTENEGRO_CYRILLIC: WORD = 0x0c;
pub const SUBLANG_SERBIAN_SERBIA_LATIN: WORD = 0x09;
pub const SUBLANG_SERBIAN_SERBIA_CYRILLIC: WORD = 0x0a;
pub const SUBLANG_SERBIAN_CROATIA: WORD = 0x01;
pub const SUBLANG_SERBIAN_LATIN: WORD = 0x02;
pub const SUBLANG_SERBIAN_CYRILLIC: WORD = 0x03;
pub const SUBLANG_SINDHI_INDIA: WORD = 0x01;
pub const SUBLANG_SINDHI_PAKISTAN: WORD = 0x02;
pub const SUBLANG_SINDHI_AFGHANISTAN: WORD = 0x02;
pub const SUBLANG_SINHALESE_SRI_LANKA: WORD = 0x01;
pub const SUBLANG_SOTHO_NORTHERN_SOUTH_AFRICA: WORD = 0x01;
pub const SUBLANG_SLOVAK_SLOVAKIA: WORD = 0x01;
pub const SUBLANG_SLOVENIAN_SLOVENIA: WORD = 0x01;
pub const SUBLANG_SPANISH: WORD = 0x01;
pub const SUBLANG_SPANISH_MEXICAN: WORD = 0x02;
pub const SUBLANG_SPANISH_MODERN: WORD = 0x03;
pub const SUBLANG_SPANISH_GUATEMALA: WORD = 0x04;
pub const SUBLANG_SPANISH_COSTA_RICA: WORD = 0x05;
pub const SUBLANG_SPANISH_PANAMA: WORD = 0x06;
pub const SUBLANG_SPANISH_DOMINICAN_REPUBLIC: WORD = 0x07;
pub const SUBLANG_SPANISH_VENEZUELA: WORD = 0x08;
pub const SUBLANG_SPANISH_COLOMBIA: WORD = 0x09;
pub const SUBLANG_SPANISH_PERU: WORD = 0x0a;
pub const SUBLANG_SPANISH_ARGENTINA: WORD = 0x0b;
pub const SUBLANG_SPANISH_ECUADOR: WORD = 0x0c;
pub const SUBLANG_SPANISH_CHILE: WORD = 0x0d;
pub const SUBLANG_SPANISH_URUGUAY: WORD = 0x0e;
pub const SUBLANG_SPANISH_PARAGUAY: WORD = 0x0f;
pub const SUBLANG_SPANISH_BOLIVIA: WORD = 0x10;
pub const SUBLANG_SPANISH_EL_SALVADOR: WORD = 0x11;
pub const SUBLANG_SPANISH_HONDURAS: WORD = 0x12;
pub const SUBLANG_SPANISH_NICARAGUA: WORD = 0x13;
pub const SUBLANG_SPANISH_PUERTO_RICO: WORD = 0x14;
pub const SUBLANG_SPANISH_US: WORD = 0x15;
pub const SUBLANG_SWAHILI_KENYA: WORD = 0x01;
pub const SUBLANG_SWEDISH: WORD = 0x01;
pub const SUBLANG_SWEDISH_FINLAND: WORD = 0x02;
pub const SUBLANG_SYRIAC_SYRIA: WORD = 0x01;
pub const SUBLANG_TAJIK_TAJIKISTAN: WORD = 0x01;
pub const SUBLANG_TAMAZIGHT_ALGERIA_LATIN: WORD = 0x02;
pub const SUBLANG_TAMAZIGHT_MOROCCO_TIFINAGH: WORD = 0x04;
pub const SUBLANG_TAMIL_INDIA: WORD = 0x01;
pub const SUBLANG_TAMIL_SRI_LANKA: WORD = 0x02;
pub const SUBLANG_TATAR_RUSSIA: WORD = 0x01;
pub const SUBLANG_TELUGU_INDIA: WORD = 0x01;
pub const SUBLANG_THAI_THAILAND: WORD = 0x01;
pub const SUBLANG_TIBETAN_PRC: WORD = 0x01;
pub const SUBLANG_TIGRIGNA_ERITREA: WORD = 0x02;
pub const SUBLANG_TIGRINYA_ERITREA: WORD = 0x02;
pub const SUBLANG_TIGRINYA_ETHIOPIA: WORD = 0x01;
pub const SUBLANG_TSWANA_BOTSWANA: WORD = 0x02;
pub const SUBLANG_TSWANA_SOUTH_AFRICA: WORD = 0x01;
pub const SUBLANG_TURKISH_TURKEY: WORD = 0x01;
pub const SUBLANG_TURKMEN_TURKMENISTAN: WORD = 0x01;
pub const SUBLANG_UIGHUR_PRC: WORD = 0x01;
pub const SUBLANG_UKRAINIAN_UKRAINE: WORD = 0x01;
pub const SUBLANG_UPPER_SORBIAN_GERMANY: WORD = 0x01;
pub const SUBLANG_URDU_PAKISTAN: WORD = 0x01;
pub const SUBLANG_URDU_INDIA: WORD = 0x02;
pub const SUBLANG_UZBEK_LATIN: WORD = 0x01;
pub const SUBLANG_UZBEK_CYRILLIC: WORD = 0x02;
pub const SUBLANG_VALENCIAN_VALENCIA: WORD = 0x02;
pub const SUBLANG_VIETNAMESE_VIETNAM: WORD = 0x01;
pub const SUBLANG_WELSH_UNITED_KINGDOM: WORD = 0x01;
pub const SUBLANG_WOLOF_SENEGAL: WORD = 0x01;
pub const SUBLANG_XHOSA_SOUTH_AFRICA: WORD = 0x01;
pub const SUBLANG_YAKUT_RUSSIA: WORD = 0x01;
pub const SUBLANG_YI_PRC: WORD = 0x01;
pub const SUBLANG_YORUBA_NIGERIA: WORD = 0x01;
pub const SUBLANG_ZULU_SOUTH_AFRICA: WORD = 0x01;
pub const SORT_DEFAULT: WORD = 0x0;
pub const SORT_INVARIANT_MATH: WORD = 0x1;
pub const SORT_JAPANESE_XJIS: WORD = 0x0;
pub const SORT_JAPANESE_UNICODE: WORD = 0x1;
pub const SORT_JAPANESE_RADICALSTROKE: WORD = 0x4;
pub const SORT_CHINESE_BIG5: WORD = 0x0;
pub const SORT_CHINESE_PRCP: WORD = 0x0;
pub const SORT_CHINESE_UNICODE: WORD = 0x1;
pub const SORT_CHINESE_PRC: WORD = 0x2;
pub const SORT_CHINESE_BOPOMOFO: WORD = 0x3;
pub const SORT_CHINESE_RADICALSTROKE: WORD = 0x4;
pub const SORT_KOREAN_KSC: WORD = 0x0;
pub const SORT_KOREAN_UNICODE: WORD = 0x1;
pub const SORT_GERMAN_PHONE_BOOK: WORD = 0x1;
pub const SORT_HUNGARIAN_DEFAULT: WORD = 0x0;
pub const SORT_HUNGARIAN_TECHNICAL: WORD = 0x1;
pub const SORT_GEORGIAN_TRADITIONAL: WORD = 0x0;
pub const SORT_GEORGIAN_MODERN: WORD = 0x1;
macro_rules! MAKELANGID { ($p:expr, $s:expr) => (($s << 10) | $p) }
#[inline]
pub fn MAKELANGID(p: WORD, s: WORD) -> LANGID {
(s << 10) | p
}
#[inline]
pub fn PRIMARYLANGID(lgid: LANGID) -> WORD {
lgid & 0x3ff
}
#[inline]
pub fn SUBLANGID(lgid: LANGID) -> WORD {
lgid >> 10
}
pub const NLS_VALID_LOCALE_MASK: DWORD = 0x000fffff;
macro_rules! MAKELCID {
($lgid:expr, $srtid:expr) => ((($srtid as DWORD) << 16) | ($lgid as DWORD))
}
#[inline]
pub fn MAKELCID(lgid: LANGID, srtid: WORD) -> LCID {
((srtid as DWORD) << 16) | (lgid as DWORD)
}
#[inline]
pub fn MAKESORTLCID(lgid: LANGID, srtid: WORD, ver: WORD) -> LCID {
MAKELCID(lgid, srtid) | ((ver as DWORD) << 20)
}
#[inline]
pub fn LANGIDFROMLCID(lcid: LCID) -> LANGID {
lcid as LANGID
}
#[inline]
pub fn SORTIDFROMLCID(lcid: LCID) -> WORD {
((lcid >> 16) & 0xf) as WORD
}
#[inline]
pub fn SORTVERSIONFROMLCID(lcid: LCID) -> WORD {
((lcid >> 16) & 0xf) as WORD
}
pub const LOCALE_NAME_MAX_LENGTH: usize = 85;
pub const LANG_SYSTEM_DEFAULT: LANGID = MAKELANGID!(LANG_NEUTRAL, SUBLANG_SYS_DEFAULT);
pub const LANG_USER_DEFAULT: LANGID = MAKELANGID!(LANG_NEUTRAL, SUBLANG_DEFAULT);
pub const LOCALE_SYSTEM_DEFAULT: LCID = MAKELCID!(LANG_SYSTEM_DEFAULT, SORT_DEFAULT);
pub const LOCALE_USER_DEFAULT: LCID = MAKELCID!(LANG_USER_DEFAULT, SORT_DEFAULT);
pub const LOCALE_CUSTOM_DEFAULT: LCID
= MAKELCID!(MAKELANGID!(LANG_NEUTRAL, SUBLANG_CUSTOM_DEFAULT), SORT_DEFAULT);
pub const LOCALE_CUSTOM_UNSPECIFIED: LCID
= MAKELCID!(MAKELANGID!(LANG_NEUTRAL, SUBLANG_CUSTOM_UNSPECIFIED), SORT_DEFAULT);
pub const LOCALE_CUSTOM_UI_DEFAULT: LCID
= MAKELCID!(MAKELANGID!(LANG_NEUTRAL, SUBLANG_UI_CUSTOM_DEFAULT), SORT_DEFAULT);
pub const LOCALE_NEUTRAL: LCID
= MAKELCID!(MAKELANGID!(LANG_NEUTRAL, SUBLANG_NEUTRAL), SORT_DEFAULT);
pub const LOCALE_INVARIANT: LCID
= MAKELCID!(MAKELANGID!(LANG_INVARIANT, SUBLANG_NEUTRAL), SORT_DEFAULT);
pub const LOCALE_TRANSIENT_KEYBOARD1: LCID = 0x2000;
pub const LOCALE_TRANSIENT_KEYBOARD2: LCID = 0x2400;
pub const LOCALE_TRANSIENT_KEYBOARD3: LCID = 0x2800;
pub const LOCALE_TRANSIENT_KEYBOARD4: LCID = 0x2c00;
pub const LOCALE_UNASSIGNED_LCID: LCID = LOCALE_CUSTOM_UNSPECIFIED;
pub const STATUS_WAIT_0: DWORD = 0x00000000;
pub const STATUS_ABANDONED_WAIT_0: DWORD = 0x00000080;
pub const STATUS_USER_APC: DWORD = 0x000000C0;
pub const STATUS_TIMEOUT: DWORD = 0x00000102;
pub const STATUS_PENDING: DWORD = 0x00000103;
pub const DBG_EXCEPTION_HANDLED: DWORD = 0x00010001;
pub const DBG_CONTINUE: DWORD = 0x00010002;
pub const STATUS_SEGMENT_NOTIFICATION: DWORD = 0x40000005;
pub const STATUS_FATAL_APP_EXIT: DWORD = 0x40000015;
pub const DBG_REPLY_LATER: DWORD = 0x40010001;
pub const DBG_TERMINATE_THREAD: DWORD = 0x40010003;
pub const DBG_TERMINATE_PROCESS: DWORD = 0x40010004;
pub const DBG_CONTROL_C: DWORD = 0x40010005;
pub const DBG_PRINTEXCEPTION_C: DWORD = 0x40010006;
pub const DBG_RIPEXCEPTION: DWORD = 0x40010007;
pub const DBG_CONTROL_BREAK: DWORD = 0x40010008;
pub const DBG_COMMAND_EXCEPTION: DWORD = 0x40010009;
pub const DBG_PRINTEXCEPTION_WIDE_C: DWORD = 0x4001000A;
pub const STATUS_GUARD_PAGE_VIOLATION: DWORD = 0x80000001;
pub const STATUS_DATATYPE_MISALIGNMENT: DWORD = 0x80000002;
pub const STATUS_BREAKPOINT: DWORD = 0x80000003;
pub const STATUS_SINGLE_STEP: DWORD = 0x80000004;
pub const STATUS_LONGJUMP: DWORD = 0x80000026;
pub const STATUS_UNWIND_CONSOLIDATE: DWORD = 0x80000029;
pub const DBG_EXCEPTION_NOT_HANDLED: DWORD = 0x80010001;
pub const STATUS_ACCESS_VIOLATION: DWORD = 0xC0000005;
pub const STATUS_IN_PAGE_ERROR: DWORD = 0xC0000006;
pub const STATUS_INVALID_HANDLE: DWORD = 0xC0000008;
pub const STATUS_INVALID_PARAMETER: DWORD = 0xC000000D;
pub const STATUS_NO_MEMORY: DWORD = 0xC0000017;
pub const STATUS_ILLEGAL_INSTRUCTION: DWORD = 0xC000001D;
pub const STATUS_NONCONTINUABLE_EXCEPTION: DWORD = 0xC0000025;
pub const STATUS_INVALID_DISPOSITION: DWORD = 0xC0000026;
pub const STATUS_ARRAY_BOUNDS_EXCEEDED: DWORD = 0xC000008C;
pub const STATUS_FLOAT_DENORMAL_OPERAND: DWORD = 0xC000008D;
pub const STATUS_FLOAT_DIVIDE_BY_ZERO: DWORD = 0xC000008E;
pub const STATUS_FLOAT_INEXACT_RESULT: DWORD = 0xC000008F;
pub const STATUS_FLOAT_INVALID_OPERATION: DWORD = 0xC0000090;
pub const STATUS_FLOAT_OVERFLOW: DWORD = 0xC0000091;
pub const STATUS_FLOAT_STACK_CHECK: DWORD = 0xC0000092;
pub const STATUS_FLOAT_UNDERFLOW: DWORD = 0xC0000093;
pub const STATUS_INTEGER_DIVIDE_BY_ZERO: DWORD = 0xC0000094;
pub const STATUS_INTEGER_OVERFLOW: DWORD = 0xC0000095;
pub const STATUS_PRIVILEGED_INSTRUCTION: DWORD = 0xC0000096;
pub const STATUS_STACK_OVERFLOW: DWORD = 0xC00000FD;
pub const STATUS_DLL_NOT_FOUND: DWORD = 0xC0000135;
pub const STATUS_ORDINAL_NOT_FOUND: DWORD = 0xC0000138;
pub const STATUS_ENTRYPOINT_NOT_FOUND: DWORD = 0xC0000139;
pub const STATUS_CONTROL_C_EXIT: DWORD = 0xC000013A;
pub const STATUS_DLL_INIT_FAILED: DWORD = 0xC0000142;
pub const STATUS_FLOAT_MULTIPLE_FAULTS: DWORD = 0xC00002B4;
pub const STATUS_FLOAT_MULTIPLE_TRAPS: DWORD = 0xC00002B5;
pub const STATUS_REG_NAT_CONSUMPTION: DWORD = 0xC00002C9;
pub const STATUS_HEAP_CORRUPTION: DWORD = 0xC0000374;
pub const STATUS_STACK_BUFFER_OVERRUN: DWORD = 0xC0000409;
pub const STATUS_INVALID_CRUNTIME_PARAMETER: DWORD = 0xC0000417;
pub const STATUS_ASSERTION_FAILURE: DWORD = 0xC0000420;
pub const STATUS_SXS_EARLY_DEACTIVATION: DWORD = 0xC015000F;
pub const STATUS_SXS_INVALID_DEACTIVATION: DWORD = 0xC0150010;
pub const MAXIMUM_WAIT_OBJECTS: DWORD = 64;
pub const MAXIMUM_SUSPEND_COUNT: CHAR = MAXCHAR;
pub type KSPIN_LOCK = ULONG_PTR;
pub type PKSPIN_LOCK = *mut KSPIN_LOCK;
STRUCT!{struct M128A { // FIXME align 16
Low: ULONGLONG,
High: LONGLONG,
}}
pub type PM128A = *mut M128A;
#[cfg(target_arch = "x86")]
STRUCT!{struct XSAVE_FORMAT { // FIXME align 16
ControlWord: WORD,
StatusWord: WORD,
TagWord: BYTE,
Reserved1: BYTE,
ErrorOpcode: WORD,
ErrorOffset: DWORD,
ErrorSelector: WORD,
Reserved2: WORD,
DataOffset: DWORD,
DataSelector: WORD,
Reserved3: WORD,
MxCsr: DWORD,
MxCsr_Mask: DWORD,
FloatRegisters: [M128A; 8],
XmmRegisters: [M128A; 8],
Reserved4: [BYTE; 224],
}}
#[cfg(target_arch = "x86_64")]
STRUCT!{struct XSAVE_FORMAT { // FIXME align 16
ControlWord: WORD,
StatusWord: WORD,
TagWord: BYTE,
Reserved1: BYTE,
ErrorOpcode: WORD,
ErrorOffset: DWORD,
ErrorSelector: WORD,
Reserved2: WORD,
DataOffset: DWORD,
DataSelector: WORD,
Reserved3: WORD,
MxCsr: DWORD,
MxCsr_Mask: DWORD,
FloatRegisters: [M128A; 8],
XmmRegisters: [M128A; 16],
Reserved4: [BYTE; 96],
}}
pub type PXSAVE_FORMAT = *mut XSAVE_FORMAT;
STRUCT!{struct XSAVE_AREA_HEADER { // FIXME align 8
Mask: DWORD64,
CompactionMask: DWORD64,
Reserved2: [DWORD64; 6],
}}
pub type PXSAVE_AREA_HEADER = *mut XSAVE_AREA_HEADER;
STRUCT!{struct XSAVE_AREA { // FIXME align 16
LegacyState: XSAVE_FORMAT,
Header: XSAVE_AREA_HEADER,
}}
pub type PXSAVE_AREA = *mut XSAVE_AREA;
#[cfg(target_arch = "x86")]
STRUCT!{struct XSTATE_CONTEXT {
Mask: DWORD64,
Length: DWORD,
Reserved1: DWORD,
Area: PXSAVE_AREA,
Reserved2: DWORD,
Buffer: PVOID,
Reserved3: DWORD,
}}
#[cfg(target_arch = "x86_64")]
STRUCT!{struct XSTATE_CONTEXT {
Mask: DWORD64,
Length: DWORD,
Reserved1: DWORD,
Area: PXSAVE_AREA,
Buffer: PVOID,
}}
pub type PXSTATE_CONTEXT = *mut XSTATE_CONTEXT;
STRUCT!{struct SCOPE_TABLE_AMD64 {
Count: DWORD,
ScopeRecord: [SCOPE_TABLE_AMD64_ScopeRecord; 1],
}}
STRUCT!{struct SCOPE_TABLE_AMD64_ScopeRecord {
BeginAddress: DWORD,
EndAddress: DWORD,
HandlerAddress: DWORD,
JumpTarget: DWORD,
}}
pub type PSCOPE_TABLE_AMD64 = *mut SCOPE_TABLE_AMD64;
// Skip interlocked and bit manipulation stuff because it is all intrinsics
// Use the native Rust equivalents instead
#[cfg(target_arch = "x86_64")]
IFDEF!{
pub const EXCEPTION_READ_FAULT: DWORD = 0;
pub const EXCEPTION_WRITE_FAULT: DWORD = 1;
pub const EXCEPTION_EXECUTE_FAULT: DWORD = 8;
pub const CONTEXT_AMD64: DWORD = 0x00100000;
pub const CONTEXT_CONTROL: DWORD = CONTEXT_AMD64 | 0x00000001;
pub const CONTEXT_INTEGER: DWORD = CONTEXT_AMD64 | 0x00000002;
pub const CONTEXT_SEGMENTS: DWORD = CONTEXT_AMD64 | 0x00000004;
pub const CONTEXT_FLOATING_POINT: DWORD = CONTEXT_AMD64 | 0x00000008;
pub const CONTEXT_DEBUG_REGISTERS: DWORD = CONTEXT_AMD64 | 0x00000010;
pub const CONTEXT_FULL: DWORD = CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT;
pub const CONTEXT_ALL: DWORD = CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS
| CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS;
pub const CONTEXT_XSTATE: DWORD = CONTEXT_AMD64 | 0x00000040;
pub const CONTEXT_EXCEPTION_ACTIVE: DWORD = 0x08000000;
pub const CONTEXT_SERVICE_ACTIVE: DWORD = 0x10000000;
pub const CONTEXT_EXCEPTION_REQUEST: DWORD = 0x40000000;
pub const CONTEXT_EXCEPTION_REPORTING: DWORD = 0x80000000;
pub const INITIAL_MXCSR: DWORD = 0x1f80;
pub const INITIAL_FPCSR: DWORD = 0x027f;
pub type XMM_SAVE_AREA32 = XSAVE_FORMAT;
pub type PXMM_SAVE_AREA32 = *mut XSAVE_FORMAT;
STRUCT!{struct CONTEXT { // FIXME align 16
P1Home: DWORD64,
P2Home: DWORD64,
P3Home: DWORD64,
P4Home: DWORD64,
P5Home: DWORD64,
P6Home: DWORD64,
ContextFlags: DWORD,
MxCsr: DWORD,
SegCs: WORD,
SegDs: WORD,
SegEs: WORD,
SegFs: WORD,
SegGs: WORD,
SegSs: WORD,
EFlags: DWORD,
Dr0: DWORD64,
Dr1: DWORD64,
Dr2: DWORD64,
Dr3: DWORD64,
Dr6: DWORD64,
Dr7: DWORD64,
Rax: DWORD64,
Rcx: DWORD64,
Rdx: DWORD64,
Rbx: DWORD64,
Rsp: DWORD64,
Rbp: DWORD64,
Rsi: DWORD64,
Rdi: DWORD64,
R8: DWORD64,
R9: DWORD64,
R10: DWORD64,
R11: DWORD64,
R12: DWORD64,
R13: DWORD64,
R14: DWORD64,
R15: DWORD64,
Rip: DWORD64,
FltSave: XMM_SAVE_AREA32, // FIXME union
VectorRegister: [M128A; 26],
VectorControl: DWORD64,
DebugControl: DWORD64,
LastBranchToRip: DWORD64,
LastBranchFromRip: DWORD64,
LastExceptionToRip: DWORD64,
LastExceptionFromRip: DWORD64,
}}
pub type PCONTEXT = *mut CONTEXT;
pub type RUNTIME_FUNCTION = IMAGE_RUNTIME_FUNCTION_ENTRY;
pub type PRUNTIME_FUNCTION = *mut IMAGE_RUNTIME_FUNCTION_ENTRY;
pub type SCOPE_TABLE = SCOPE_TABLE_AMD64;
pub type PSCOPE_TABLE = *mut SCOPE_TABLE_AMD64;
pub const RUNTIME_FUNCTION_INDIRECT: DWORD = 0x1;
pub const UNW_FLAG_NHANDLER: DWORD = 0x0;
pub const UNW_FLAG_EHANDLER: DWORD = 0x1;
pub const UNW_FLAG_UHANDLER: DWORD = 0x2;
pub const UNW_FLAG_CHAININFO: DWORD = 0x4;
pub const UNW_FLAG_NO_EPILOGUE: DWORD = 0x80000000;
pub const UNWIND_HISTORY_TABLE_SIZE: usize = 12;
STRUCT!{struct UNWIND_HISTORY_TABLE_ENTRY {
ImageBase: DWORD64,
FunctionEntry: PRUNTIME_FUNCTION,
}}
pub type PUNWIND_HISTORY_TABLE_ENTRY = *mut UNWIND_HISTORY_TABLE_ENTRY;
STRUCT!{struct UNWIND_HISTORY_TABLE {
Count: DWORD,
LocalHint: BYTE,
GlobalHint: BYTE,
Search: BYTE,
Once: BYTE,
LowAddress: DWORD64,
HighAddress: DWORD64,
Entry: [UNWIND_HISTORY_TABLE_ENTRY; UNWIND_HISTORY_TABLE_SIZE],
}}
pub type PUNWIND_HISTORY_TABLE = *mut UNWIND_HISTORY_TABLE;
FN!{cdecl PGET_RUNTIME_FUNCTION_CALLBACK(
ControlPc: DWORD64,
Context: PVOID,
) -> PRUNTIME_FUNCTION}
FN!{cdecl POUT_OF_PROCESS_FUNCTION_TABLE_CALLBACK(
Process: HANDLE,
TableAddress: PVOID,
Entries: PDWORD,
Functions: *mut PRUNTIME_FUNCTION,
) -> DWORD}
pub const OUT_OF_PROCESS_FUNCTION_TABLE_CALLBACK_EXPORT_NAME: &'static str
= "OutOfProcessFunctionTableCallback";
STRUCT!{struct DISPATCHER_CONTEXT {
ControlPc: DWORD64,
ImageBase: DWORD64,
FunctionEntry: PRUNTIME_FUNCTION,
EstablisherFrame: DWORD64,
ContextRecord: PCONTEXT,
LanguageHandler: PEXCEPTION_ROUTINE,
HandlerData: PVOID,
HistoryTable: PUNWIND_HISTORY_TABLE,
ScopeIndex: DWORD,
Fill0: DWORD,
}}
pub type PDISPATCHER_CONTEXT = *mut DISPATCHER_CONTEXT;
}
// 3966
#[cfg(target_arch = "x86")]
pub const SIZE_OF_80387_REGISTERS: usize = 80;
#[cfg(target_arch = "x86")]
STRUCT!{struct FLOATING_SAVE_AREA {
ControlWord: DWORD,
StatusWord: DWORD,
TagWord: DWORD,
ErrorOffset: DWORD,
ErrorSelector: DWORD,
DataOffset: DWORD,
DataSelector: DWORD,
RegisterArea: [BYTE; SIZE_OF_80387_REGISTERS],
Spare0: DWORD,
}}
#[cfg(target_arch = "x86")]
pub type PFLOATING_SAVE_AREA = *mut FLOATING_SAVE_AREA;
#[cfg(target_arch = "x86")]
pub const MAXIMUM_SUPPORTED_EXTENSION: usize = 512;
#[cfg(target_arch = "x86")]
STRUCT!{struct CONTEXT {
ContextFlags: DWORD,
Dr0: DWORD,
Dr1: DWORD,
Dr2: DWORD,
Dr3: DWORD,
Dr6: DWORD,
Dr7: DWORD,
FloatSave: FLOATING_SAVE_AREA,
SegGs: DWORD,
SegFs: DWORD,
SegEs: DWORD,
SegDs: DWORD,
Edi: DWORD,
Esi: DWORD,
Ebx: DWORD,
Edx: DWORD,
Ecx: DWORD,
Eax: DWORD,
Ebp: DWORD,
Eip: DWORD,
SegCs: DWORD,
EFlags: DWORD,
Esp: DWORD,
SegSs: DWORD,
ExtendedRegisters: [BYTE; MAXIMUM_SUPPORTED_EXTENSION],
}}
#[cfg(target_arch = "x86")]
pub type PCONTEXT = *mut CONTEXT;
STRUCT!{struct KNONVOLATILE_CONTEXT_POINTERS_u1 {
Xmm0: PM128A,
Xmm1: PM128A,
Xmm2: PM128A,
Xmm3: PM128A,
Xmm4: PM128A,
Xmm5: PM128A,
Xmm6: PM128A,
Xmm7: PM128A,
Xmm8: PM128A,
Xmm9: PM128A,
Xmm10: PM128A,
Xmm11: PM128A,
Xmm12: PM128A,
Xmm14: PM128A,
Xmm15: PM128A,
}}
STRUCT!{struct KNONVOLATILE_CONTEXT_POINTERS_u2 {
Rax: DWORD64,
Rcx: DWORD64,
Rdx: DWORD64,
Rbx: DWORD64,
Rsp: DWORD64,
Rbp: DWORD64,
Rsi: DWORD64,
Rdi: DWORD64,
R8: DWORD64,
R9: DWORD64,
R10: DWORD64,
R11: DWORD64,
R12: DWORD64,
R13: DWORD64,
R14: DWORD64,
R15: DWORD64,
}}
STRUCT!{struct KNONVOLATILE_CONTEXT_POINTERS {
FloatingContext: [PM128A; 16],
IntegerContext: [PDWORD64; 16],
}}
// FIXME: all unions are untagged
UNION!(
KNONVOLATILE_CONTEXT_POINTERS, FloatingContext, Xmms, Xmms_mut,
KNONVOLATILE_CONTEXT_POINTERS_u1
);
UNION!(
KNONVOLATILE_CONTEXT_POINTERS, IntegerContext, Regs, Regs_mut,
KNONVOLATILE_CONTEXT_POINTERS_u2
);
pub type PKNONVOLATILE_CONTEXT_POINTERS = *mut KNONVOLATILE_CONTEXT_POINTERS;
//8983
pub const EXCEPTION_MAXIMUM_PARAMETERS: usize = 15;
STRUCT!{struct EXCEPTION_RECORD {
ExceptionCode: DWORD,
ExceptionFlags: DWORD,
ExceptionRecord: *mut EXCEPTION_RECORD,
ExceptionAddress: PVOID,
NumberParameters: DWORD,
ExceptionInformation: [ULONG_PTR; EXCEPTION_MAXIMUM_PARAMETERS],
}}
pub type PEXCEPTION_RECORD = *mut EXCEPTION_RECORD;
//9023
STRUCT!{struct EXCEPTION_POINTERS {
ExceptionRecord: PEXCEPTION_RECORD,
ContextRecord: PCONTEXT,
}}
pub type PEXCEPTION_POINTERS = *mut EXCEPTION_POINTERS;
pub type PACCESS_TOKEN = PVOID;
pub type PSECURITY_DESCRIPTOR = PVOID;
pub type PSID = PVOID;
pub type PCLAIMS_BLOB = PVOID;
//9091
pub type ACCESS_MASK = DWORD;
pub type PACCESS_MASK = *mut ACCESS_MASK;
pub const DELETE: DWORD = 0x00010000;
pub const READ_CONTROL: DWORD = 0x00020000;
pub const WRITE_DAC: DWORD = 0x00040000;
pub const WRITE_OWNER: DWORD = 0x00080000;
pub const SYNCHRONIZE: DWORD = 0x00100000;
pub const STANDARD_RIGHTS_REQUIRED: DWORD = 0x000F0000;
pub const STANDARD_RIGHTS_READ: DWORD = READ_CONTROL;
pub const STANDARD_RIGHTS_WRITE: DWORD = READ_CONTROL;
pub const STANDARD_RIGHTS_EXECUTE: DWORD = READ_CONTROL;
pub const STANDARD_RIGHTS_ALL: DWORD = 0x001F0000;
pub const SPECIFIC_RIGHTS_ALL: DWORD = 0x0000FFFF;
pub const ACCESS_SYSTEM_SECURITY: DWORD = 0x01000000;
pub const MAXIMUM_ALLOWED: DWORD = 0x02000000;
pub const GENERIC_READ: DWORD = 0x80000000;
pub const GENERIC_WRITE: DWORD = 0x40000000;
pub const GENERIC_EXECUTE: DWORD = 0x20000000;
pub const GENERIC_ALL: DWORD = 0x10000000;
//9170
STRUCT!{struct LUID_AND_ATTRIBUTES {
Luid: LUID,
Attributes: DWORD,
}}
pub type PLUID_AND_ATTRIBUTES = *mut LUID_AND_ATTRIBUTES;
//9243
ENUM!{enum SID_NAME_USE {
SidTypeUser = 1,
SidTypeGroup,
SidTypeDomain,
SidTypeAlias,
SidTypeWellKnownGroup,
SidTypeDeletedAccount,
SidTypeInvalid,
SidTypeUnknown,
SidTypeComputer,
SidTypeLabel,
}}
pub type PSID_NAME_USE = *mut SID_NAME_USE;
STRUCT!{struct SID_AND_ATTRIBUTES {
Sid: PSID,
Attributes: DWORD,
}}
pub type PSID_AND_ATTRIBUTES = *mut SID_AND_ATTRIBUTES;
//9802
pub const ACL_REVISION: BYTE = 2;
pub const ACL_REVISION_DS: BYTE = 4;
pub const ACL_REVISION1: BYTE = 1;
pub const MIN_ACL_REVISION: BYTE = ACL_REVISION2;
pub const ACL_REVISION2: BYTE = 2;
pub const ACL_REVISION3: BYTE = 3;
pub const ACL_REVISION4: BYTE = 4;
pub const MAX_ACL_REVISION: BYTE = ACL_REVISION4;
STRUCT!{struct ACL {
AclRevision: BYTE,
Sbz1: BYTE,
AclSize: WORD,
AceCount: WORD,
Sbz2: WORD,
}}
pub type PACL = *mut ACL;
//9888
pub const SE_PRIVILEGE_ENABLED_BY_DEFAULT: DWORD = 0x00000001;
pub const SE_PRIVILEGE_ENABLED: DWORD = 0x00000002;
pub const SE_PRIVILEGE_REMOVED: DWORD = 0x00000004;
pub const SE_PRIVILEGE_USED_FOR_ACCESS: DWORD = 0x80000000;
pub const SE_PRIVILEGE_VALID_ATTRIBUTES: DWORD = SE_PRIVILEGE_ENABLED_BY_DEFAULT
| SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS;
pub const PRIVILEGE_SET_ALL_NECESSARY: DWORD = 1;
//10689
pub const TOKEN_ASSIGN_PRIMARY: DWORD = 0x0001;
pub const TOKEN_DUPLICATE: DWORD = 0x0002;
pub const TOKEN_IMPERSONATE: DWORD = 0x0004;
pub const TOKEN_QUERY: DWORD = 0x0008;
pub const TOKEN_QUERY_SOURCE: DWORD = 0x0010;
pub const TOKEN_ADJUST_PRIVILEGES: DWORD = 0x0020;
pub const TOKEN_ADJUST_GROUPS: DWORD = 0x0040;
pub const TOKEN_ADJUST_DEFAULT: DWORD = 0x0080;
pub const TOKEN_ADJUST_SESSIONID: DWORD = 0x0100;
pub const TOKEN_ALL_ACCESS_P: DWORD = STANDARD_RIGHTS_REQUIRED | TOKEN_ASSIGN_PRIMARY
| TOKEN_DUPLICATE | TOKEN_IMPERSONATE | TOKEN_QUERY | TOKEN_QUERY_SOURCE
| TOKEN_ADJUST_PRIVILEGES | TOKEN_ADJUST_GROUPS | TOKEN_ADJUST_DEFAULT;
pub const TOKEN_ALL_ACCESS: DWORD = TOKEN_ALL_ACCESS_P | TOKEN_ADJUST_SESSIONID;
pub const TOKEN_READ: DWORD = STANDARD_RIGHTS_READ | TOKEN_QUERY;
pub const TOKEN_WRITE: DWORD = STANDARD_RIGHTS_WRITE | TOKEN_ADJUST_PRIVILEGES
| TOKEN_ADJUST_GROUPS | TOKEN_ADJUST_DEFAULT;
pub const TOKEN_EXECUTE: DWORD = STANDARD_RIGHTS_EXECUTE;
//10823
STRUCT!{struct TOKEN_PRIVILEGES {
PrivilegeCount: DWORD,
Privileges: [LUID_AND_ATTRIBUTES; 0],
}}
pub type PTOKEN_PRIVILEGES = *mut TOKEN_PRIVILEGES;
//10965
pub const CLAIM_SECURITY_ATTRIBUTE_TYPE_INVALID: WORD = 0x00;
pub const CLAIM_SECURITY_ATTRIBUTE_TYPE_INT64: WORD = 0x01;
pub const CLAIM_SECURITY_ATTRIBUTE_TYPE_UINT64: WORD = 0x02;
pub const CLAIM_SECURITY_ATTRIBUTE_TYPE_STRING: WORD = 0x03;
STRUCT!{struct CLAIM_SECURITY_ATTRIBUTE_FQBN_VALUE {
Version: DWORD64,
Name: PWSTR,
}}
pub type PCLAIM_SECURITY_ATTRIBUTE_FQBN_VALUE = *mut CLAIM_SECURITY_ATTRIBUTE_FQBN_VALUE;
pub const CLAIM_SECURITY_ATTRIBUTE_TYPE_FQBN: WORD = 0x04;
pub const CLAIM_SECURITY_ATTRIBUTE_TYPE_SID: WORD = 0x05;
pub const CLAIM_SECURITY_ATTRIBUTE_TYPE_BOOLEAN: WORD = 0x06;
STRUCT!{struct CLAIM_SECURITY_ATTRIBUTE_OCTET_STRING_VALUE {
pValue: PVOID,
ValueLength: DWORD,
}}
pub type PCLAIM_SECURITY_ATTRIBUTE_OCTET_STRING_VALUE =
*mut CLAIM_SECURITY_ATTRIBUTE_OCTET_STRING_VALUE;
pub const CLAIM_SECURITY_ATTRIBUTE_TYPE_OCTET_STRING: WORD = 0x10;
pub const CLAIM_SECURITY_ATTRIBUTE_NON_INHERITABLE: DWORD = 0x0001;
pub const CLAIM_SECURITY_ATTRIBUTE_VALUE_CASE_SENSITIVE: DWORD = 0x0002;
pub const CLAIM_SECURITY_ATTRIBUTE_USE_FOR_DENY_ONLY: DWORD = 0x0004;
pub const CLAIM_SECURITY_ATTRIBUTE_DISABLED_BY_DEFAULT: DWORD = 0x0008;
pub const CLAIM_SECURITY_ATTRIBUTE_DISABLED: DWORD = 0x0010;
pub const CLAIM_SECURITY_ATTRIBUTE_MANDATORY: DWORD = 0x0020;
pub const CLAIM_SECURITY_ATTRIBUTE_VALID_FLAGS: DWORD = CLAIM_SECURITY_ATTRIBUTE_NON_INHERITABLE
| CLAIM_SECURITY_ATTRIBUTE_VALUE_CASE_SENSITIVE | CLAIM_SECURITY_ATTRIBUTE_USE_FOR_DENY_ONLY
| CLAIM_SECURITY_ATTRIBUTE_DISABLED_BY_DEFAULT | CLAIM_SECURITY_ATTRIBUTE_DISABLED
| CLAIM_SECURITY_ATTRIBUTE_MANDATORY;
pub const CLAIM_SECURITY_ATTRIBUTE_CUSTOM_FLAGS: DWORD = 0xFFFF0000;
STRUCT!{struct CLAIM_SECURITY_ATTRIBUTE_V1 {
Name: PWSTR,
ValueType: WORD,
Reserved: WORD,
Flags: DWORD,
ValueCount: DWORD,
// Put data here
}}
pub type PCLAIM_SECURITY_ATTRIBUTE_V1 = *mut CLAIM_SECURITY_ATTRIBUTE_V1;
STRUCT!{struct CLAIM_SECURITY_ATTRIBUTE_RELATIVE_V1 {
Name: DWORD,
ValueType: WORD,
Reserved: WORD,
Flags: DWORD,
ValueCount: DWORD,
// Put array here
}}
pub type PCLAIM_SECURITY_ATTRIBUTE_RELATIVE_V1 = *mut CLAIM_SECURITY_ATTRIBUTE_RELATIVE_V1;
pub const CLAIM_SECURITY_ATTRIBUTES_INFORMATION_VERSION_V1: WORD = 1;
pub const CLAIM_SECURITY_ATTRIBUTES_INFORMATION_VERSION: WORD =
CLAIM_SECURITY_ATTRIBUTES_INFORMATION_VERSION_V1;
STRUCT!{struct CLAIM_SECURITY_ATTRIBUTES_INFORMATION {
Version: WORD,
Reserved: WORD,
AttributeCount: DWORD,
pAttributeV1: PCLAIM_SECURITY_ATTRIBUTE_V1,
}}
pub type PCLAIM_SECURITY_ATTRIBUTES_INFORMATION = *mut CLAIM_SECURITY_ATTRIBUTES_INFORMATION;
//11257
pub type SECURITY_INFORMATION = DWORD;
pub type PSECURITY_INFORMATION = *mut DWORD;
pub const OWNER_SECURITY_INFORMATION: SECURITY_INFORMATION = 0x00000001;
pub const GROUP_SECURITY_INFORMATION: SECURITY_INFORMATION = 0x00000002;
pub const DACL_SECURITY_INFORMATION: SECURITY_INFORMATION = 0x00000004;
pub const SACL_SECURITY_INFORMATION: SECURITY_INFORMATION = 0x00000008;
pub const LABEL_SECURITY_INFORMATION: SECURITY_INFORMATION = 0x00000010;
pub const ATTRIBUTE_SECURITY_INFORMATION: SECURITY_INFORMATION = 0x00000020;
pub const SCOPE_SECURITY_INFORMATION: SECURITY_INFORMATION = 0x00000040;
pub const PROCESS_TRUST_LABEL_SECURITY_INFORMATION: SECURITY_INFORMATION = 0x00000080;
pub const BACKUP_SECURITY_INFORMATION: SECURITY_INFORMATION = 0x00010000;
pub const PROTECTED_DACL_SECURITY_INFORMATION: SECURITY_INFORMATION = 0x80000000;
pub const PROTECTED_SACL_SECURITY_INFORMATION: SECURITY_INFORMATION = 0x40000000;
pub const UNPROTECTED_DACL_SECURITY_INFORMATION: SECURITY_INFORMATION = 0x20000000;
pub const UNPROTECTED_SACL_SECURITY_INFORMATION: SECURITY_INFORMATION = 0x10000000;
ENUM!{enum SE_LEARNING_MODE_DATA_TYPE {
SeLearningModeInvalidType = 0,
SeLearningModeSettings,
SeLearningModeMax,
}}
STRUCT!{struct SECURITY_CAPABILITIES {
AppContainerSid: PSID,
Capabilities: PSID_AND_ATTRIBUTES,
CapabilityCount: DWORD,
Reserved: DWORD,
}}
pub type PSECURITY_CAPABILITIES = *mut SECURITY_CAPABILITIES;
pub type LPSECURITY_CAPABILITIES = *mut SECURITY_CAPABILITIES;
pub const PROCESS_TERMINATE: DWORD = 0x0001;
pub const PROCESS_CREATE_THREAD: DWORD = 0x0002;
pub const PROCESS_SET_SESSIONID: DWORD = 0x0004;
pub const PROCESS_VM_OPERATION: DWORD = 0x0008;
pub const PROCESS_VM_READ: DWORD = 0x0010;
pub const PROCESS_VM_WRITE: DWORD = 0x0020;
pub const PROCESS_DUP_HANDLE: DWORD = 0x0040;
pub const PROCESS_CREATE_PROCESS: DWORD = 0x0080;
pub const PROCESS_SET_QUOTA: DWORD = 0x0100;
pub const PROCESS_SET_INFORMATION: DWORD = 0x0200;
pub const PROCESS_QUERY_INFORMATION: DWORD = 0x0400;
pub const PROCESS_SUSPEND_RESUME: DWORD = 0x0800;
pub const PROCESS_QUERY_LIMITED_INFORMATION: DWORD = 0x1000;
pub const PROCESS_SET_LIMITED_INFORMATION: DWORD = 0x2000;
pub const PROCESS_ALL_ACCESS: DWORD = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFFF;
//11007
pub const THREAD_BASE_PRIORITY_LOWRT: DWORD = 15;
pub const THREAD_BASE_PRIORITY_MAX: DWORD = 2;
pub const THREAD_BASE_PRIORITY_MIN: DWORD = -2i32 as DWORD;
pub const THREAD_BASE_PRIORITY_IDLE: DWORD = -15i32 as DWORD;
//11018
STRUCT!{struct QUOTA_LIMITS {
PagedPoolLimit: SIZE_T,
NonPagedPoolLimit: SIZE_T,
MinimumWorkingSetSize: SIZE_T,
MaximumWorkingSetSize: SIZE_T,
PagefileLimit: SIZE_T,
TimeLimit: LARGE_INTEGER,
}}
pub type PQUOTA_LIMITS = *mut QUOTA_LIMITS;
pub const QUOTA_LIMITS_HARDWS_MIN_ENABLE: DWORD = 0x00000001;
pub const QUOTA_LIMITS_HARDWS_MIN_DISABLE: DWORD = 0x00000002;
pub const QUOTA_LIMITS_HARDWS_MAX_ENABLE: DWORD = 0x00000004;
pub const QUOTA_LIMITS_HARDWS_MAX_DISABLE: DWORD = 0x00000008;
pub const QUOTA_LIMITS_USE_DEFAULT_LIMITS: DWORD = 0x00000010;
STRUCT!{struct RATE_QUOTA_LIMIT {
RateData: DWORD,
BitFields: DWORD,
}}
BITFIELD!(RATE_QUOTA_LIMIT BitFields: DWORD [
RatePercent set_RatePercent[0..7],
Reserved0 set_Reserved0[7..32],
]);
pub type PRATE_QUOTA_LIMIT = *mut RATE_QUOTA_LIMIT;
STRUCT!{struct QUOTA_LIMITS_EX {
PagedPoolLimit: SIZE_T,
NonPagedPoolLimit: SIZE_T,
MinimumWorkingSetSize: SIZE_T,
MaximumWorkingSetSize: SIZE_T,
PagefileLimit: SIZE_T,
TimeLimit: LARGE_INTEGER,
WorkingSetLimit: SIZE_T,
Reserved2: SIZE_T,
Reserved3: SIZE_T,
Reserved4: SIZE_T,
Flags: DWORD,
CpuRateLimit: RATE_QUOTA_LIMIT,
}}
pub type PQUOTA_LIMITS_EX = *mut QUOTA_LIMITS_EX;
STRUCT!{struct IO_COUNTERS {
ReadOperationCount: ULONGLONG,
WriteOperationCount: ULONGLONG,
OtherOperationCount: ULONGLONG,
ReadTransferCount: ULONGLONG,
WriteTransferCount: ULONGLONG,
OtherTransferCount: ULONGLONG,
}}
pub type PIO_COUNTERS = *mut IO_COUNTERS;
//11192
STRUCT!{struct JOBOBJECT_BASIC_LIMIT_INFORMATION {
PerProcessUserTimeLimit: LARGE_INTEGER,
PerJobUserTimeLimit: LARGE_INTEGER,
LimitFlags: DWORD,
MinimumWorkingSetSize: SIZE_T,
MaximumWorkingSetSize: SIZE_T,
ActiveProcessLimit: DWORD,
Affinity: ULONG_PTR,
PriorityClass: DWORD,
SchedulingClass: DWORD,
}}
pub type PJOBOBJECT_BASIC_LIMIT_INFORMATION = *mut JOBOBJECT_BASIC_LIMIT_INFORMATION;
STRUCT!{struct JOBOBJECT_EXTENDED_LIMIT_INFORMATION {
BasicLimitInformation: JOBOBJECT_BASIC_LIMIT_INFORMATION,
IoInfo: IO_COUNTERS,
ProcessMemoryLimit: SIZE_T,
JobMemoryLimit: SIZE_T,
PeakProcessMemoryUsed: SIZE_T,
PeakJobMemoryUsed: SIZE_T,
}}
pub type PJOBOBJECT_EXTENDED_LIMIT_INFORMATION = *mut JOBOBJECT_EXTENDED_LIMIT_INFORMATION;
STRUCT!{struct JOBOBJECT_BASIC_PROCESS_ID_LIST {
NumberOfAssignedProcesses: DWORD,
NumberOfProcessIdsInList: DWORD,
ProcessIdList: [ULONG_PTR; 0],
}}
//11712
pub const JOB_OBJECT_TERMINATE_AT_END_OF_JOB: DWORD = 0;
pub const JOB_OBJECT_POST_AT_END_OF_JOB: DWORD = 1;
pub const JOB_OBJECT_MSG_END_OF_JOB_TIME: DWORD = 1;
pub const JOB_OBJECT_MSG_END_OF_PROCESS_TIME: DWORD = 2;
pub const JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT: DWORD = 3;
pub const JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO: DWORD = 4;
pub const JOB_OBJECT_MSG_NEW_PROCESS: DWORD = 6;
pub const JOB_OBJECT_MSG_EXIT_PROCESS: DWORD = 7;
pub const JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS: DWORD = 8;
pub const JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT: DWORD = 9;
pub const JOB_OBJECT_MSG_JOB_MEMORY_LIMIT: DWORD = 10;
pub const JOB_OBJECT_MSG_NOTIFICATION_LIMIT: DWORD = 11;
pub const JOB_OBJECT_MSG_JOB_CYCLE_TIME_LIMIT: DWORD = 12;
pub const JOB_OBJECT_MSG_MINIMUM: DWORD = 1;
pub const JOB_OBJECT_MSG_MAXIMUM: DWORD = 12;
pub const JOB_OBJECT_VALID_COMPLETION_FILTER: DWORD = ((1 << (JOB_OBJECT_MSG_MAXIMUM + 1)) - 1)
- ((1 << JOB_OBJECT_MSG_MINIMUM) - 1);
pub const JOB_OBJECT_LIMIT_WORKINGSET: DWORD = 0x00000001;
pub const JOB_OBJECT_LIMIT_PROCESS_TIME: DWORD = 0x00000002;
pub const JOB_OBJECT_LIMIT_JOB_TIME: DWORD = 0x00000004;
pub const JOB_OBJECT_LIMIT_ACTIVE_PROCESS: DWORD = 0x00000008;
pub const JOB_OBJECT_LIMIT_AFFINITY: DWORD = 0x00000010;
pub const JOB_OBJECT_LIMIT_PRIORITY_CLASS: DWORD = 0x00000020;
pub const JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME: DWORD = 0x00000040;
pub const JOB_OBJECT_LIMIT_SCHEDULING_CLASS: DWORD = 0x00000080;
pub const JOB_OBJECT_LIMIT_PROCESS_MEMORY: DWORD = 0x00000100;
pub const JOB_OBJECT_LIMIT_JOB_MEMORY: DWORD = 0x00000200;
pub const JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION: DWORD = 0x00000400;
pub const JOB_OBJECT_LIMIT_BREAKAWAY_OK: DWORD = 0x00000800;
pub const JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK: DWORD = 0x00001000;
pub const JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE: DWORD = 0x00002000;
pub const JOB_OBJECT_LIMIT_SUBSET_AFFINITY: DWORD = 0x00004000;
pub const JOB_OBJECT_LIMIT_JOB_READ_BYTES: DWORD = 0x00010000;
pub const JOB_OBJECT_LIMIT_JOB_WRITE_BYTES: DWORD = 0x00020000;
pub const JOB_OBJECT_LIMIT_RATE_CONTROL: DWORD = 0x00040000;
pub const JOB_OBJECT_LIMIT_RESERVED3: DWORD = 0x00008000;
pub const JOB_OBJECT_LIMIT_VALID_FLAGS: DWORD = 0x0007ffff;
pub const JOB_OBJECT_BASIC_LIMIT_VALID_FLAGS: DWORD = 0x000000ff;
pub const JOB_OBJECT_EXTENDED_LIMIT_VALID_FLAGS: DWORD = 0x00007fff;
pub const JOB_OBJECT_NOTIFICATION_LIMIT_VALID_FLAGS: DWORD = 0x00070204;
pub const JOB_OBJECT_RESERVED_LIMIT_VALID_FLAGS: DWORD = 0x0007ffff;
pub const JOB_OBJECT_UILIMIT_NONE: DWORD = 0x00000000;
pub const JOB_OBJECT_UILIMIT_HANDLES: DWORD = 0x00000001;
pub const JOB_OBJECT_UILIMIT_READCLIPBOARD: DWORD = 0x00000002;
pub const JOB_OBJECT_UILIMIT_WRITECLIPBOARD: DWORD = 0x00000004;
pub const JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS: DWORD = 0x00000008;
pub const JOB_OBJECT_UILIMIT_DISPLAYSETTINGS: DWORD = 0x00000010;
pub const JOB_OBJECT_UILIMIT_GLOBALATOMS: DWORD = 0x00000020;
pub const JOB_OBJECT_UILIMIT_DESKTOP: DWORD = 0x00000040;
pub const JOB_OBJECT_UILIMIT_EXITWINDOWS: DWORD = 0x00000080;
pub const JOB_OBJECT_UILIMIT_ALL: DWORD = 0x000000FF;
pub const JOB_OBJECT_UI_VALID_FLAGS: DWORD = 0x000000FF;
pub const JOB_OBJECT_SECURITY_NO_ADMIN: DWORD = 0x00000001;
pub const JOB_OBJECT_SECURITY_RESTRICTED_TOKEN: DWORD = 0x00000002;
pub const JOB_OBJECT_SECURITY_ONLY_TOKEN: DWORD = 0x00000004;
pub const JOB_OBJECT_SECURITY_FILTER_TOKENS: DWORD = 0x00000008;
pub const JOB_OBJECT_SECURITY_VALID_FLAGS: DWORD = 0x0000000f;
pub const JOB_OBJECT_CPU_RATE_CONTROL_ENABLE: DWORD = 0x1;
pub const JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED: DWORD = 0x2;
pub const JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP: DWORD = 0x4;
pub const JOB_OBJECT_CPU_RATE_CONTROL_NOTIFY: DWORD = 0x8;
pub const JOB_OBJECT_CPU_RATE_CONTROL_VALID_FLAGS: DWORD = 0xf;
ENUM!{enum JOBOBJECTINFOCLASS {
JobObjectBasicAccountingInformation = 1,
JobObjectBasicLimitInformation,
JobObjectBasicProcessIdList,
JobObjectBasicUIRestrictions,
JobObjectSecurityLimitInformation,
JobObjectEndOfJobTimeInformation,
JobObjectAssociateCompletionPortInformation,
JobObjectBasicAndIoAccountingInformation,
JobObjectExtendedLimitInformation,
JobObjectJobSetInformation,
JobObjectGroupInformation,
JobObjectNotificationLimitInformation,
JobObjectLimitViolationInformation,
JobObjectGroupInformationEx,
JobObjectCpuRateControlInformation,
JobObjectCompletionFilter,
JobObjectCompletionCounter,
JobObjectReserved1Information = 18,
JobObjectReserved2Information,
JobObjectReserved3Information,
JobObjectReserved4Information,
JobObjectReserved5Information,
JobObjectReserved6Information,
JobObjectReserved7Information,
JobObjectReserved8Information,
JobObjectReserved9Information,
JobObjectReserved10Information,
JobObjectReserved11Information,
JobObjectReserved12Information,
JobObjectReserved13Information,
JobObjectReserved14Information = 31,
JobObjectNetRateControlInformation,
JobObjectNotificationLimitInformation2,
JobObjectLimitViolationInformation2,
JobObjectCreateSilo,
JobObjectSiloBasicInformation,
JobObjectReserved15Information = 37,
JobObjectReserved16Information,
JobObjectReserved17Information,
JobObjectReserved18Information,
JobObjectReserved19Information = 41,
JobObjectReserved20Information,
MaxJobObjectInfoClass,
}}
//12063
pub const SECTION_QUERY: DWORD = 0x0001;
pub const SECTION_MAP_WRITE: DWORD = 0x0002;
pub const SECTION_MAP_READ: DWORD = 0x0004;
pub const SECTION_MAP_EXECUTE: DWORD = 0x0008;
pub const SECTION_EXTEND_SIZE: DWORD = 0x0010;
pub const SECTION_MAP_EXECUTE_EXPLICIT: DWORD = 0x0020;
pub const SECTION_ALL_ACCESS: DWORD = STANDARD_RIGHTS_REQUIRED | SECTION_QUERY
| SECTION_MAP_WRITE | SECTION_MAP_READ | SECTION_MAP_EXECUTE | SECTION_EXTEND_SIZE;
//12100
pub const PAGE_NOACCESS: DWORD = 0x01;
pub const PAGE_READONLY: DWORD = 0x02;
pub const PAGE_READWRITE: DWORD = 0x04;
pub const PAGE_WRITECOPY: DWORD = 0x08;
pub const PAGE_EXECUTE: DWORD = 0x10;
pub const PAGE_EXECUTE_READ: DWORD = 0x20;
pub const PAGE_EXECUTE_READWRITE: DWORD = 0x40;
pub const PAGE_EXECUTE_WRITECOPY: DWORD = 0x80;
pub const PAGE_GUARD: DWORD = 0x100;
pub const PAGE_NOCACHE: DWORD = 0x200;
pub const PAGE_WRITECOMBINE: DWORD = 0x400;
pub const PAGE_REVERT_TO_FILE_MAP: DWORD = 0x80000000;
pub const PAGE_TARGETS_NO_UPDATE: DWORD = 0x40000000;
pub const PAGE_TARGETS_INVALID: DWORD = 0x40000000;
pub const MEM_COMMIT: DWORD = 0x1000;
pub const MEM_RESERVE: DWORD = 0x2000;
pub const MEM_DECOMMIT: DWORD = 0x4000;
pub const MEM_RELEASE: DWORD = 0x8000;
pub const MEM_FREE: DWORD = 0x10000;
pub const MEM_PRIVATE: DWORD = 0x20000;
pub const MEM_MAPPED: DWORD = 0x40000;
pub const MEM_RESET: DWORD = 0x80000;
pub const MEM_TOP_DOWN: DWORD = 0x100000;
pub const MEM_WRITE_WATCH: DWORD = 0x200000;
pub const MEM_PHYSICAL: DWORD = 0x400000;
pub const MEM_ROTATE: DWORD = 0x800000;
pub const MEM_DIFFERENT_IMAGE_BASE_OK: DWORD = 0x800000;
pub const MEM_RESET_UNDO: DWORD = 0x1000000;
pub const MEM_LARGE_PAGES: DWORD = 0x20000000;
pub const MEM_4MB_PAGES: DWORD = 0x80000000;
pub const SEC_FILE: DWORD = 0x800000;
pub const SEC_IMAGE: DWORD = 0x1000000;
pub const SEC_PROTECTED_IMAGE: DWORD = 0x2000000;
pub const SEC_RESERVE: DWORD = 0x4000000;
pub const SEC_COMMIT: DWORD = 0x8000000;
pub const SEC_NOCACHE: DWORD = 0x10000000;
pub const SEC_WRITECOMBINE: DWORD = 0x40000000;
pub const SEC_LARGE_PAGES: DWORD = 0x80000000;
pub const SEC_IMAGE_NO_EXECUTE: DWORD = (SEC_IMAGE | SEC_NOCACHE);
pub const MEM_IMAGE: DWORD = SEC_IMAGE;
pub const WRITE_WATCH_FLAG_RESET: DWORD = 0x01;
pub const MEM_UNMAP_WITH_TRANSIENT_BOOST: DWORD = 0x01;
//12217
pub const FILE_READ_DATA: DWORD = 0x0001;
pub const FILE_LIST_DIRECTORY: DWORD = 0x0001;
pub const FILE_WRITE_DATA: DWORD = 0x0002;
pub const FILE_ADD_FILE: DWORD = 0x0002;
pub const FILE_APPEND_DATA: DWORD = 0x0004;
pub const FILE_ADD_SUBDIRECTORY: DWORD = 0x0004;
pub const FILE_CREATE_PIPE_INSTANCE: DWORD = 0x0004;
pub const FILE_READ_EA: DWORD = 0x0008;
pub const FILE_WRITE_EA: DWORD = 0x0010;
pub const FILE_EXECUTE: DWORD = 0x0020;
pub const FILE_TRAVERSE: DWORD = 0x0020;
pub const FILE_DELETE_CHILD: DWORD = 0x0040;
pub const FILE_READ_ATTRIBUTES: DWORD = 0x0080;
pub const FILE_WRITE_ATTRIBUTES: DWORD = 0x0100;
pub const FILE_ALL_ACCESS: DWORD = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF;
pub const FILE_GENERIC_READ: DWORD = STANDARD_RIGHTS_READ | FILE_READ_DATA
| FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE;
pub const FILE_GENERIC_WRITE: DWORD = STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA
| FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE;
pub const FILE_GENERIC_EXECUTE: DWORD = STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES
| FILE_EXECUTE | SYNCHRONIZE;
pub const FILE_SHARE_READ: DWORD = 0x00000001;
pub const FILE_SHARE_WRITE: DWORD = 0x00000002;
pub const FILE_SHARE_DELETE: DWORD = 0x00000004;
pub const FILE_ATTRIBUTE_READONLY: DWORD = 0x00000001;
pub const FILE_ATTRIBUTE_HIDDEN: DWORD = 0x00000002;
pub const FILE_ATTRIBUTE_SYSTEM: DWORD = 0x00000004;
pub const FILE_ATTRIBUTE_DIRECTORY: DWORD = 0x00000010;
pub const FILE_ATTRIBUTE_ARCHIVE: DWORD = 0x00000020;
pub const FILE_ATTRIBUTE_DEVICE: DWORD = 0x00000040;
pub const FILE_ATTRIBUTE_NORMAL: DWORD = 0x00000080;
pub const FILE_ATTRIBUTE_TEMPORARY: DWORD = 0x00000100;
pub const FILE_ATTRIBUTE_SPARSE_FILE: DWORD = 0x00000200;
pub const FILE_ATTRIBUTE_REPARSE_POINT: DWORD = 0x00000400;
pub const FILE_ATTRIBUTE_COMPRESSED: DWORD = 0x00000800;
pub const FILE_ATTRIBUTE_OFFLINE: DWORD = 0x00001000;
pub const FILE_ATTRIBUTE_NOT_CONTENT_INDEXED: DWORD = 0x00002000;
pub const FILE_ATTRIBUTE_ENCRYPTED: DWORD = 0x00004000;
pub const FILE_ATTRIBUTE_INTEGRITY_STREAM: DWORD = 0x00008000;
pub const FILE_ATTRIBUTE_VIRTUAL: DWORD = 0x00010000;
pub const FILE_ATTRIBUTE_NO_SCRUB_DATA: DWORD = 0x00020000;
pub const FILE_ATTRIBUTE_EA: DWORD = 0x00040000;
pub const FILE_NOTIFY_CHANGE_FILE_NAME: DWORD = 0x00000001;
pub const FILE_NOTIFY_CHANGE_DIR_NAME: DWORD = 0x00000002;
pub const FILE_NOTIFY_CHANGE_ATTRIBUTES: DWORD = 0x00000004;
pub const FILE_NOTIFY_CHANGE_SIZE: DWORD = 0x00000008;
pub const FILE_NOTIFY_CHANGE_LAST_WRITE: DWORD = 0x00000010;
pub const FILE_NOTIFY_CHANGE_LAST_ACCESS: DWORD = 0x00000020;
pub const FILE_NOTIFY_CHANGE_CREATION: DWORD = 0x00000040;
pub const FILE_NOTIFY_CHANGE_SECURITY: DWORD = 0x00000100;
pub const FILE_ACTION_ADDED: DWORD = 0x00000001;
pub const FILE_ACTION_REMOVED: DWORD = 0x00000002;
pub const FILE_ACTION_MODIFIED: DWORD = 0x00000003;
pub const FILE_ACTION_RENAMED_OLD_NAME: DWORD = 0x00000004;
pub const FILE_ACTION_RENAMED_NEW_NAME: DWORD = 0x00000005;
pub const MAILSLOT_NO_MESSAGE: DWORD = 0xFFFFFFFF;
pub const MAILSLOT_WAIT_FOREVER: DWORD = 0xFFFFFFFF;
pub const FILE_CASE_SENSITIVE_SEARCH: DWORD = 0x00000001;
pub const FILE_CASE_PRESERVED_NAMES: DWORD = 0x00000002;
pub const FILE_UNICODE_ON_DISK: DWORD = 0x00000004;
pub const FILE_PERSISTENT_ACLS: DWORD = 0x00000008;
pub const FILE_FILE_COMPRESSION: DWORD = 0x00000010;
pub const FILE_VOLUME_QUOTAS: DWORD = 0x00000020;
pub const FILE_SUPPORTS_SPARSE_FILES: DWORD = 0x00000040;
pub const FILE_SUPPORTS_REPARSE_POINTS: DWORD = 0x00000080;
pub const FILE_SUPPORTS_REMOTE_STORAGE: DWORD = 0x00000100;
pub const FILE_VOLUME_IS_COMPRESSED: DWORD = 0x00008000;
pub const FILE_SUPPORTS_OBJECT_IDS: DWORD = 0x00010000;
pub const FILE_SUPPORTS_ENCRYPTION: DWORD = 0x00020000;
pub const FILE_NAMED_STREAMS: DWORD = 0x00040000;
pub const FILE_READ_ONLY_VOLUME: DWORD = 0x00080000;
pub const FILE_SEQUENTIAL_WRITE_ONCE: DWORD = 0x00100000;
pub const FILE_SUPPORTS_TRANSACTIONS: DWORD = 0x00200000;
pub const FILE_SUPPORTS_HARD_LINKS: DWORD = 0x00400000;
pub const FILE_SUPPORTS_EXTENDED_ATTRIBUTES: DWORD = 0x00800000;
pub const FILE_SUPPORTS_OPEN_BY_FILE_ID: DWORD = 0x01000000;
pub const FILE_SUPPORTS_USN_JOURNAL: DWORD = 0x02000000;
pub const FILE_SUPPORTS_INTEGRITY_STREAMS: DWORD = 0x04000000;
pub const FILE_INVALID_FILE_ID: LONGLONG = -1;
STRUCT!{struct FILE_ID_128 {
Identifier: [BYTE; 16],
}}
pub type PFILE_ID_128 = *mut FILE_ID_128;
STRUCT!{struct FILE_NOTIFY_INFORMATION {
NextEntryOffset: DWORD,
Action: DWORD,
FileNameLength: DWORD,
FileName: [WCHAR; 0],
}}
STRUCT!{struct FILE_SEGMENT_ELEMENT {
Buffer: PVOID64,
Alignment: ULONGLONG,
}}
pub type PFILE_SEGMENT_ELEMENT = *mut FILE_SEGMENT_ELEMENT;
//12475
pub const IO_REPARSE_TAG_MOUNT_POINT: DWORD = 0xA0000003;
pub const IO_REPARSE_TAG_HSM: DWORD = 0xC0000004;
pub const IO_REPARSE_TAG_HSM2: DWORD = 0x80000006;
pub const IO_REPARSE_TAG_SIS: DWORD = 0x80000007;
pub const IO_REPARSE_TAG_WIM: DWORD = 0x80000008;
pub const IO_REPARSE_TAG_CSV: DWORD = 0x80000009;
pub const IO_REPARSE_TAG_DFS: DWORD = 0x8000000A;
pub const IO_REPARSE_TAG_SYMLINK: DWORD = 0xA000000C;
pub const IO_REPARSE_TAG_DFSR: DWORD = 0x80000012;
pub const IO_REPARSE_TAG_DEDUP: DWORD = 0x80000013;
pub const IO_REPARSE_TAG_NFS: DWORD = 0x80000014;
pub const IO_REPARSE_TAG_FILE_PLACEHOLDER: DWORD = 0x80000015;
pub const IO_REPARSE_TAG_WOF: DWORD = 0x80000017;
//12788
pub const DUPLICATE_CLOSE_SOURCE: DWORD = 0x00000001;
pub const DUPLICATE_SAME_ACCESS: DWORD = 0x00000002;
//14708
STRUCT!{struct PROCESSOR_POWER_POLICY_INFO {
TimeCheck: DWORD,
DemoteLimit: DWORD,
PromoteLimit: DWORD,
DemotePercent: BYTE,
PromotePercent: BYTE,
Spare: [BYTE; 2],
Reserved: DWORD,
}}
BITFIELD!(PROCESSOR_POWER_POLICY_INFO Reserved: DWORD [
AllowDemotion set_AllowDemotion[0..1],
AllowPromotion set_AllowPromotion[1..2],
]);
pub type PPROCESSOR_POWER_POLICY_INFO = *mut PROCESSOR_POWER_POLICY_INFO;
//15000
STRUCT!{struct IMAGE_FILE_HEADER {
Machine: WORD,
NumberOfSections: WORD,
TimeDateStamp: DWORD,
PointerToSymbolTable: DWORD,
NumberOfSymbols: DWORD,
SizeOfOptionalHeader: WORD,
Characteristics: WORD,
}}
pub type PIMAGE_FILE_HEADER = *mut IMAGE_FILE_HEADER;
pub const IMAGE_SIZEOF_FILE_HEADER: usize = 20;
pub const IMAGE_FILE_RELOCS_STRIPPED: WORD = 0x0001;
pub const IMAGE_FILE_EXECUTABLE_IMAGE: WORD = 0x0002;
pub const IMAGE_FILE_LINE_NUMS_STRIPPED: WORD = 0x0004;
pub const IMAGE_FILE_LOCAL_SYMS_STRIPPED: WORD = 0x0008;
pub const IMAGE_FILE_AGGRESIVE_WS_TRIM: WORD = 0x0010;
pub const IMAGE_FILE_LARGE_ADDRESS_AWARE: WORD = 0x0020;
pub const IMAGE_FILE_BYTES_REVERSED_LO: WORD = 0x0080;
pub const IMAGE_FILE_32BIT_MACHINE: WORD = 0x0100;
pub const IMAGE_FILE_DEBUG_STRIPPED: WORD = 0x0200;
pub const IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP: WORD = 0x0400;
pub const IMAGE_FILE_NET_RUN_FROM_SWAP: WORD = 0x0800;
pub const IMAGE_FILE_SYSTEM: WORD = 0x1000;
pub const IMAGE_FILE_DLL: WORD = 0x2000;
pub const IMAGE_FILE_UP_SYSTEM_ONLY: WORD = 0x4000;
pub const IMAGE_FILE_BYTES_REVERSED_HI: WORD = 0x8000;
pub const IMAGE_FILE_MACHINE_UNKNOWN: WORD = 0;
pub const IMAGE_FILE_MACHINE_I386: WORD = 0x014c;
pub const IMAGE_FILE_MACHINE_R3000: WORD = 0x0162;
pub const IMAGE_FILE_MACHINE_R4000: WORD = 0x0166;
pub const IMAGE_FILE_MACHINE_R10000: WORD = 0x0168;
pub const IMAGE_FILE_MACHINE_WCEMIPSV2: WORD = 0x0169;
pub const IMAGE_FILE_MACHINE_ALPHA: WORD = 0x0184;
pub const IMAGE_FILE_MACHINE_SH3: WORD = 0x01a2;
pub const IMAGE_FILE_MACHINE_SH3DSP: WORD = 0x01a3;
pub const IMAGE_FILE_MACHINE_SH3E: WORD = 0x01a4;
pub const IMAGE_FILE_MACHINE_SH4: WORD = 0x01a6;
pub const IMAGE_FILE_MACHINE_SH5: WORD = 0x01a8;
pub const IMAGE_FILE_MACHINE_ARM: WORD = 0x01c0;
pub const IMAGE_FILE_MACHINE_THUMB: WORD = 0x01c2;
pub const IMAGE_FILE_MACHINE_ARMNT: WORD = 0x01c4;
pub const IMAGE_FILE_MACHINE_AM33: WORD = 0x01d3;
pub const IMAGE_FILE_MACHINE_POWERPC: WORD = 0x01F0;
pub const IMAGE_FILE_MACHINE_POWERPCFP: WORD = 0x01f1;
pub const IMAGE_FILE_MACHINE_IA64: WORD = 0x0200;
pub const IMAGE_FILE_MACHINE_MIPS16: WORD = 0x0266;
pub const IMAGE_FILE_MACHINE_ALPHA64: WORD = 0x0284;
pub const IMAGE_FILE_MACHINE_MIPSFPU: WORD = 0x0366;
pub const IMAGE_FILE_MACHINE_MIPSFPU16: WORD = 0x0466;
pub const IMAGE_FILE_MACHINE_AXP64: WORD = IMAGE_FILE_MACHINE_ALPHA64;
pub const IMAGE_FILE_MACHINE_TRICORE: WORD = 0x0520;
pub const IMAGE_FILE_MACHINE_CEF: WORD = 0x0CEF;
pub const IMAGE_FILE_MACHINE_EBC: WORD = 0x0EBC;
pub const IMAGE_FILE_MACHINE_AMD64: WORD = 0x8664;
pub const IMAGE_FILE_MACHINE_M32R: WORD = 0x9041;
pub const IMAGE_FILE_MACHINE_CEE: WORD = 0xC0EE;
STRUCT!{struct IMAGE_DATA_DIRECTORY {
VirtualAddress: DWORD,
Size: DWORD,
}}
pub type PIMAGE_DATA_DIRECTORY = *mut IMAGE_DATA_DIRECTORY;
pub const IMAGE_NUMBEROF_DIRECTORY_ENTRIES: usize = 16;
STRUCT!{struct IMAGE_OPTIONAL_HEADER32 {
Magic: WORD,
MajorLinkerVersion: BYTE,
MinorLinkerVersion: BYTE,
SizeOfCode: DWORD,
SizeOfInitializedData: DWORD,
SizeOfUninitializedData: DWORD,
AddressOfEntryPoint: DWORD,
BaseOfCode: DWORD,
BaseOfData: DWORD,
ImageBase: DWORD,
SectionAlignment: DWORD,
FileAlignment: DWORD,
MajorOperatingSystemVersion: WORD,
MinorOperatingSystemVersion: WORD,
MajorImageVersion: WORD,
MinorImageVersion: WORD,
MajorSubsystemVersion: WORD,
MinorSubsystemVersion: WORD,
Win32VersionValue: DWORD,
SizeOfImage: DWORD,
SizeOfHeaders: DWORD,
CheckSum: DWORD,
Subsystem: WORD,
DllCharacteristics: WORD,
SizeOfStackReserve: DWORD,
SizeOfStackCommit: DWORD,
SizeOfHeapReserve: DWORD,
SizeOfHeapCommit: DWORD,
LoaderFlags: DWORD,
NumberOfRvaAndSizes: DWORD,
DataDirectory: [IMAGE_DATA_DIRECTORY; IMAGE_NUMBEROF_DIRECTORY_ENTRIES],
}}
pub type PIMAGE_OPTIONAL_HEADER32 = *mut IMAGE_OPTIONAL_HEADER32;
STRUCT!{struct IMAGE_ROM_OPTIONAL_HEADER {
Magic: WORD,
MajorLinkerVersion: BYTE,
MinorLinkerVersion: BYTE,
SizeOfCode: DWORD,
SizeOfInitializedData: DWORD,
SizeOfUninitializedData: DWORD,
AddressOfEntryPoint: DWORD,
BaseOfCode: DWORD,
BaseOfData: DWORD,
BaseOfBss: DWORD,
GprMask: DWORD,
CprMask: [DWORD; 4],
GpValue: DWORD,
}}
pub type PIMAGE_ROM_OPTIONAL_HEADER = *mut IMAGE_ROM_OPTIONAL_HEADER;
STRUCT!{struct IMAGE_OPTIONAL_HEADER64 {
Magic: WORD,
MajorLinkerVersion: BYTE,
MinorLinkerVersion: BYTE,
SizeOfCode: DWORD,
SizeOfInitializedData: DWORD,
SizeOfUninitializedData: DWORD,
AddressOfEntryPoint: DWORD,
BaseOfCode: DWORD,
ImageBase: ULONGLONG,
SectionAlignment: DWORD,
FileAlignment: DWORD,
MajorOperatingSystemVersion: WORD,
MinorOperatingSystemVersion: WORD,
MajorImageVersion: WORD,
MinorImageVersion: WORD,
MajorSubsystemVersion: WORD,
MinorSubsystemVersion: WORD,
Win32VersionValue: DWORD,
SizeOfImage: DWORD,
SizeOfHeaders: DWORD,
CheckSum: DWORD,
Subsystem: WORD,
DllCharacteristics: WORD,
SizeOfStackReserve: ULONGLONG,
SizeOfStackCommit: ULONGLONG,
SizeOfHeapReserve: ULONGLONG,
SizeOfHeapCommit: ULONGLONG,
LoaderFlags: DWORD,
NumberOfRvaAndSizes: DWORD,
DataDirectory: [IMAGE_DATA_DIRECTORY; IMAGE_NUMBEROF_DIRECTORY_ENTRIES],
}}
pub type PIMAGE_OPTIONAL_HEADER64 = *mut IMAGE_OPTIONAL_HEADER64;
pub const IMAGE_NT_OPTIONAL_HDR32_MAGIC: WORD = 0x10b;
pub const IMAGE_NT_OPTIONAL_HDR64_MAGIC: WORD = 0x20b;
pub const IMAGE_ROM_OPTIONAL_HDR_MAGIC: WORD = 0x107;
#[cfg(target_arch = "x86_64")]
pub type IMAGE_OPTIONAL_HEADER = IMAGE_OPTIONAL_HEADER64;
#[cfg(target_arch = "x86_64")]
pub type PIMAGE_OPTIONAL_HEADER = PIMAGE_OPTIONAL_HEADER64;
#[cfg(target_arch = "x86")]
pub type IMAGE_OPTIONAL_HEADER = IMAGE_OPTIONAL_HEADER32;
#[cfg(target_arch = "x86")]
pub type PIMAGE_OPTIONAL_HEADER = PIMAGE_OPTIONAL_HEADER32;
STRUCT!{struct IMAGE_NT_HEADERS64 {
Signature: DWORD,
FileHeader: IMAGE_FILE_HEADER,
OptionalHeader: IMAGE_OPTIONAL_HEADER64,
}}
pub type PIMAGE_NT_HEADERS64 = *mut IMAGE_NT_HEADERS64;
STRUCT!{struct IMAGE_NT_HEADERS32 {
Signature: DWORD,
FileHeader: IMAGE_FILE_HEADER,
OptionalHeader: IMAGE_OPTIONAL_HEADER32,
}}
pub type PIMAGE_NT_HEADERS32 = *mut IMAGE_NT_HEADERS32;
STRUCT!{struct IMAGE_ROM_HEADERS {
FileHeader: IMAGE_FILE_HEADER,
OptionalHeader: IMAGE_ROM_OPTIONAL_HEADER,
}}
pub type PIMAGE_ROM_HEADERS = *mut IMAGE_ROM_HEADERS;
#[cfg(target_arch = "x86_64")]
pub type IMAGE_NT_HEADERS = IMAGE_NT_HEADERS64;
#[cfg(target_arch = "x86_64")]
pub type PIMAGE_NT_HEADERS = PIMAGE_NT_HEADERS64;
#[cfg(target_arch = "x86")]
pub type IMAGE_NT_HEADERS = IMAGE_NT_HEADERS32;
#[cfg(target_arch = "x86")]
pub type PIMAGE_NT_HEADERS = PIMAGE_NT_HEADERS32;
pub const IMAGE_SUBSYSTEM_UNKNOWN: WORD = 0;
pub const IMAGE_SUBSYSTEM_NATIVE: WORD = 1;
pub const IMAGE_SUBSYSTEM_WINDOWS_GUI: WORD = 2;
pub const IMAGE_SUBSYSTEM_WINDOWS_CUI: WORD = 3;
pub const IMAGE_SUBSYSTEM_OS2_CUI: WORD = 5;
pub const IMAGE_SUBSYSTEM_POSIX_CUI: WORD = 7;
pub const IMAGE_SUBSYSTEM_NATIVE_WINDOWS: WORD = 8;
pub const IMAGE_SUBSYSTEM_WINDOWS_CE_GUI: WORD = 9;
pub const IMAGE_SUBSYSTEM_EFI_APPLICATION: WORD = 10;
pub const IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER: WORD = 11;
pub const IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER: WORD = 12;
pub const IMAGE_SUBSYSTEM_EFI_ROM: WORD = 13;
pub const IMAGE_SUBSYSTEM_XBOX: WORD = 14;
pub const IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION: WORD = 16;
pub const IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA: WORD = 0x0020;
pub const IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE: WORD = 0x0040;
pub const IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY: WORD = 0x0080;
pub const IMAGE_DLLCHARACTERISTICS_NX_COMPAT: WORD = 0x0100;
pub const IMAGE_DLLCHARACTERISTICS_NO_ISOLATION: WORD = 0x0200;
pub const IMAGE_DLLCHARACTERISTICS_NO_SEH: WORD = 0x0400;
pub const IMAGE_DLLCHARACTERISTICS_NO_BIND: WORD = 0x0800;
pub const IMAGE_DLLCHARACTERISTICS_APPCONTAINER: WORD = 0x1000;
pub const IMAGE_DLLCHARACTERISTICS_WDM_DRIVER: WORD = 0x2000;
pub const IMAGE_DLLCHARACTERISTICS_GUARD_CF: WORD = 0x4000;
pub const IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE: WORD = 0x8000;
pub const IMAGE_DIRECTORY_ENTRY_EXPORT: WORD = 0;
pub const IMAGE_DIRECTORY_ENTRY_IMPORT: WORD = 1;
pub const IMAGE_DIRECTORY_ENTRY_RESOURCE: WORD = 2;
pub const IMAGE_DIRECTORY_ENTRY_EXCEPTION: WORD = 3;
pub const IMAGE_DIRECTORY_ENTRY_SECURITY: WORD = 4;
pub const IMAGE_DIRECTORY_ENTRY_BASERELOC: WORD = 5;
pub const IMAGE_DIRECTORY_ENTRY_DEBUG: WORD = 6;
pub const IMAGE_DIRECTORY_ENTRY_ARCHITECTURE: WORD = 7;
pub const IMAGE_DIRECTORY_ENTRY_GLOBALPTR: WORD = 8;
pub const IMAGE_DIRECTORY_ENTRY_TLS: WORD = 9;
pub const IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG: WORD = 10;
pub const IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT: WORD = 11;
pub const IMAGE_DIRECTORY_ENTRY_IAT: WORD = 12;
pub const IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT: WORD = 13;
pub const IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR: WORD = 14;
STRUCT!{struct ANON_OBJECT_HEADER {
Sig1: WORD,
Sig2: WORD,
Version: WORD,
Machine: WORD,
TimeDateStamp: DWORD,
ClassID: CLSID,
SizeOfData: DWORD,
}}
STRUCT!{struct ANON_OBJECT_HEADER_V2 {
Sig1: WORD,
Sig2: WORD,
Version: WORD,
Machine: WORD,
TimeDateStamp: DWORD,
ClassID: CLSID,
SizeOfData: DWORD,
Flags: DWORD,
MetaDataSize: DWORD,
MetaDataOffset: DWORD,
}}
STRUCT!{struct ANON_OBJECT_HEADER_BIGOBJ {
Sig1: WORD,
Sig2: WORD,
Version: WORD,
Machine: WORD,
TimeDateStamp: DWORD,
ClassID: CLSID,
SizeOfData: DWORD,
Flags: DWORD,
MetaDataSize: DWORD,
MetaDataOffset: DWORD,
NumberOfSections: DWORD,
PointerToSymbolTable: DWORD,
NumberOfSymbols: DWORD,
}}
pub const IMAGE_SIZEOF_SHORT_NAME: usize = 8;
STRUCT!{struct IMAGE_SECTION_HEADER {
Name: [BYTE; IMAGE_SIZEOF_SHORT_NAME],
PhysicalAddressOrVirtualSize: DWORD,
VirtualAddress: DWORD,
SizeOfRawData: DWORD,
PointerToRawData: DWORD,
PointerToRelocations: DWORD,
PointerToLinenumbers: DWORD,
NumberOfRelocations: WORD,
NumberOfLinenumbers: WORD,
Characteristics: DWORD,
}}
pub type PIMAGE_SECTION_HEADER = *mut IMAGE_SECTION_HEADER;
pub const IMAGE_SIZEOF_SECTION_HEADER: usize = 40;
pub const IMAGE_SCN_TYPE_NO_PAD: DWORD = 0x00000008;
pub const IMAGE_SCN_CNT_CODE: DWORD = 0x00000020;
pub const IMAGE_SCN_CNT_INITIALIZED_DATA: DWORD = 0x00000040;
pub const IMAGE_SCN_CNT_UNINITIALIZED_DATA: DWORD = 0x00000080;
pub const IMAGE_SCN_LNK_OTHER: DWORD = 0x00000100;
pub const IMAGE_SCN_LNK_INFO: DWORD = 0x00000200;
pub const IMAGE_SCN_LNK_REMOVE: DWORD = 0x00000800;
pub const IMAGE_SCN_LNK_COMDAT: DWORD = 0x00001000;
pub const IMAGE_SCN_NO_DEFER_SPEC_EXC: DWORD = 0x00004000;
pub const IMAGE_SCN_GPREL: DWORD = 0x00008000;
pub const IMAGE_SCN_MEM_FARDATA: DWORD = 0x00008000;
pub const IMAGE_SCN_MEM_PURGEABLE: DWORD = 0x00020000;
pub const IMAGE_SCN_MEM_16BIT: DWORD = 0x00020000;
pub const IMAGE_SCN_MEM_LOCKED: DWORD = 0x00040000;
pub const IMAGE_SCN_MEM_PRELOAD: DWORD = 0x00080000;
pub const IMAGE_SCN_ALIGN_1BYTES: DWORD = 0x00100000;
pub const IMAGE_SCN_ALIGN_2BYTES: DWORD = 0x00200000;
pub const IMAGE_SCN_ALIGN_4BYTES: DWORD = 0x00300000;
pub const IMAGE_SCN_ALIGN_8BYTES: DWORD = 0x00400000;
pub const IMAGE_SCN_ALIGN_16BYTES: DWORD = 0x00500000;
pub const IMAGE_SCN_ALIGN_32BYTES: DWORD = 0x00600000;
pub const IMAGE_SCN_ALIGN_64BYTES: DWORD = 0x00700000;
pub const IMAGE_SCN_ALIGN_128BYTES: DWORD = 0x00800000;
pub const IMAGE_SCN_ALIGN_256BYTES: DWORD = 0x00900000;
pub const IMAGE_SCN_ALIGN_512BYTES: DWORD = 0x00A00000;
pub const IMAGE_SCN_ALIGN_1024BYTES: DWORD = 0x00B00000;
pub const IMAGE_SCN_ALIGN_2048BYTES: DWORD = 0x00C00000;
pub const IMAGE_SCN_ALIGN_4096BYTES: DWORD = 0x00D00000;
pub const IMAGE_SCN_ALIGN_8192BYTES: DWORD = 0x00E00000;
pub const IMAGE_SCN_ALIGN_MASK: DWORD = 0x00F00000;
pub const IMAGE_SCN_LNK_NRELOC_OVFL: DWORD = 0x01000000;
pub const IMAGE_SCN_MEM_DISCARDABLE: DWORD = 0x02000000;
pub const IMAGE_SCN_MEM_NOT_CACHED: DWORD = 0x04000000;
pub const IMAGE_SCN_MEM_NOT_PAGED: DWORD = 0x08000000;
pub const IMAGE_SCN_MEM_SHARED: DWORD = 0x10000000;
pub const IMAGE_SCN_MEM_EXECUTE: DWORD = 0x20000000;
pub const IMAGE_SCN_MEM_READ: DWORD = 0x40000000;
pub const IMAGE_SCN_MEM_WRITE: DWORD = 0x80000000;
pub const IMAGE_SCN_SCALE_INDEX: DWORD = 0x00000001;
//16590
STRUCT!{struct IMAGE_DEBUG_DIRECTORY {
Characteristics: DWORD,
TimeDateStamp: DWORD,
MajorVersion: WORD,
MinorVersion: WORD,
Type: DWORD,
SizeOfData: DWORD,
AddressOfRawData: DWORD,
PointerToRawData: DWORD,
}}
pub type PIMAGE_DEBUG_DIRECTORY = *mut IMAGE_DEBUG_DIRECTORY;
pub const IMAGE_DEBUG_TYPE_UNKNOWN: DWORD = 0;
pub const IMAGE_DEBUG_TYPE_COFF: DWORD = 1;
pub const IMAGE_DEBUG_TYPE_CODEVIEW: DWORD = 2;
pub const IMAGE_DEBUG_TYPE_FPO: DWORD = 3;
pub const IMAGE_DEBUG_TYPE_MISC: DWORD = 4;
pub const IMAGE_DEBUG_TYPE_EXCEPTION: DWORD = 5;
pub const IMAGE_DEBUG_TYPE_FIXUP: DWORD = 6;
pub const IMAGE_DEBUG_TYPE_OMAP_TO_SRC: DWORD = 7;
pub const IMAGE_DEBUG_TYPE_OMAP_FROM_SRC: DWORD = 8;
pub const IMAGE_DEBUG_TYPE_BORLAND: DWORD = 9;
pub const IMAGE_DEBUG_TYPE_RESERVED10: DWORD = 10;
pub const IMAGE_DEBUG_TYPE_CLSID: DWORD = 11;
STRUCT!{struct IMAGE_COFF_SYMBOLS_HEADER {
NumberOfSymbols: DWORD,
LvaToFirstSymbol: DWORD,
NumberOfLinenumbers: DWORD,
LvaToFirstLinenumber: DWORD,
RvaToFirstByteOfCode: DWORD,
RvaToLastByteOfCode: DWORD,
RvaToFirstByteOfData: DWORD,
RvaToLastByteOfData: DWORD,
}}
pub type PIMAGE_COFF_SYMBOLS_HEADER = *mut IMAGE_COFF_SYMBOLS_HEADER;
STRUCT!{struct IMAGE_RUNTIME_FUNCTION_ENTRY {
BeginAddress: DWORD,
EndAddress: DWORD,
UnwindInfoAddress: DWORD,
}}
UNION!(IMAGE_RUNTIME_FUNCTION_ENTRY, UnwindInfoAddress, UnwindData, UnwindData_mut, DWORD);
pub type PIMAGE_RUNTIME_FUNCTION_ENTRY = *mut IMAGE_RUNTIME_FUNCTION_ENTRY;
pub const FRAME_FPO: WORD = 0;
pub const FRAME_TRAP: WORD = 1;
pub const FRAME_TSS: WORD = 2;
pub const FRAME_NONFPO: WORD = 3;
STRUCT!{struct FPO_DATA {
ulOffStart: DWORD,
cbProcSize: DWORD,
cdwLocals: DWORD,
cdwParams: WORD,
bitfield: WORD,
}}
pub type PFPO_DATA = *mut FPO_DATA;
pub const SIZEOF_RFPO_DATA: usize = 16;
pub const IMAGE_DEBUG_MISC_EXENAME: DWORD = 1;
STRUCT!{struct IMAGE_DEBUG_MISC {
DataType: DWORD,
Length: DWORD,
Unicode: BOOLEAN,
Reserved: [BYTE; 3],
Data: [BYTE; 0],
}}
pub type PIMAGE_DEBUG_MISC = *mut IMAGE_DEBUG_MISC;
STRUCT!{struct IMAGE_FUNCTION_ENTRY {
StartingAddress: DWORD,
EndingAddress: DWORD,
EndOfPrologue: DWORD,
}}
pub type PIMAGE_FUNCTION_ENTRY = *mut IMAGE_FUNCTION_ENTRY;
STRUCT!{struct IMAGE_FUNCTION_ENTRY64 {
StartingAddress: ULONGLONG,
EndingAddress: ULONGLONG,
EndOfPrologueOrUnwindInfoAddress: ULONGLONG,
}}
pub type PIMAGE_FUNCTION_ENTRY64 = *mut IMAGE_FUNCTION_ENTRY64;
//18245
pub const HEAP_NO_SERIALIZE: DWORD = 0x00000001;
pub const HEAP_GROWABLE: DWORD = 0x00000002;
pub const HEAP_GENERATE_EXCEPTIONS: DWORD = 0x00000004;
pub const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
pub const HEAP_REALLOC_IN_PLACE_ONLY: DWORD = 0x00000010;
pub const HEAP_TAIL_CHECKING_ENABLED: DWORD = 0x00000020;
pub const HEAP_FREE_CHECKING_ENABLED: DWORD = 0x00000040;
pub const HEAP_DISABLE_COALESCE_ON_FREE: DWORD = 0x00000080;
pub const HEAP_CREATE_ALIGN_16: DWORD = 0x00010000;
pub const HEAP_CREATE_ENABLE_TRACING: DWORD = 0x00020000;
pub const HEAP_CREATE_ENABLE_EXECUTE: DWORD = 0x00040000;
pub const HEAP_MAXIMUM_TAG: DWORD = 0x0FFF;
pub const HEAP_PSEUDO_TAG_FLAG: DWORD = 0x8000;
pub const HEAP_TAG_SHIFT: DWORD = 18;
//18145
STRUCT!{struct RTL_CRITICAL_SECTION_DEBUG {
Type: WORD,
CreatorBackTraceIndex: WORD,
CriticalSection: *mut RTL_CRITICAL_SECTION,
ProcessLocksList: LIST_ENTRY,
EntryCount: DWORD,
ContentionCount: DWORD,
Flags: DWORD,
CreatorBackTraceIndexHigh: WORD,
SpareWORD: WORD,
}}
pub type PRTL_CRITICAL_SECTION_DEBUG = *mut RTL_CRITICAL_SECTION_DEBUG;
pub type RTL_RESOURCE_DEBUG = RTL_CRITICAL_SECTION_DEBUG;
pub type PRTL_RESOURCE_DEBUG = *mut RTL_CRITICAL_SECTION_DEBUG;
pub const RTL_CRITSECT_TYPE: WORD = 0;
pub const RTL_RESOURCE_TYPE: WORD = 1;
pub const RTL_CRITICAL_SECTION_FLAG_NO_DEBUG_INFO: ULONG_PTR = 0x01000000;
pub const RTL_CRITICAL_SECTION_FLAG_DYNAMIC_SPIN: ULONG_PTR = 0x02000000;
pub const RTL_CRITICAL_SECTION_FLAG_STATIC_INIT: ULONG_PTR = 0x04000000;
pub const RTL_CRITICAL_SECTION_FLAG_RESOURCE_TYPE: ULONG_PTR = 0x08000000;
pub const RTL_CRITICAL_SECTION_FLAG_FORCE_DEBUG_INFO: ULONG_PTR = 0x10000000;
pub const RTL_CRITICAL_SECTION_ALL_FLAG_BITS: ULONG_PTR = 0xFF000000;
pub const RTL_CRITICAL_SECTION_FLAG_RESERVED: ULONG_PTR = RTL_CRITICAL_SECTION_ALL_FLAG_BITS
& !(RTL_CRITICAL_SECTION_FLAG_NO_DEBUG_INFO | RTL_CRITICAL_SECTION_FLAG_DYNAMIC_SPIN
| RTL_CRITICAL_SECTION_FLAG_STATIC_INIT | RTL_CRITICAL_SECTION_FLAG_RESOURCE_TYPE
| RTL_CRITICAL_SECTION_FLAG_FORCE_DEBUG_INFO);
pub const RTL_CRITICAL_SECTION_DEBUG_FLAG_STATIC_INIT: DWORD = 0x00000001;
STRUCT!{struct RTL_CRITICAL_SECTION {
DebugInfo: PRTL_CRITICAL_SECTION_DEBUG,
LockCount: LONG,
RecursionCount: LONG,
OwningThread: HANDLE,
LockSemaphore: HANDLE,
SpinCount: ULONG_PTR,
}}
pub type PRTL_CRITICAL_SECTION = *mut RTL_CRITICAL_SECTION;
STRUCT!{struct RTL_SRWLOCK {
Ptr: PVOID,
}}
pub type PRTL_SRWLOCK = *mut RTL_SRWLOCK;
pub const RTL_SRWLOCK_INIT: RTL_SRWLOCK = RTL_SRWLOCK { Ptr: 0 as PVOID };
STRUCT!{struct RTL_CONDITION_VARIABLE {
Ptr: PVOID,
}}
pub type PRTL_CONDITION_VARIABLE = *mut RTL_CONDITION_VARIABLE;
pub const RTL_CONDITION_VARIABLE_INIT: RTL_CONDITION_VARIABLE = RTL_CONDITION_VARIABLE {
Ptr: 0 as PVOID,
};
//18204
FN!{stdcall PAPCFUNC(
Parameter: ULONG_PTR,
) -> ()}
FN!{stdcall PVECTORED_EXCEPTION_HANDLER(
ExceptionInfo: *mut EXCEPTION_POINTERS,
) -> LONG}
ENUM!{enum HEAP_INFORMATION_CLASS {
HeapCompatibilityInformation = 0,
HeapEnableTerminationOnCorruption = 1,
HeapOptimizeResources = 3,
}}
pub const HEAP_OPTIMIZE_RESOURCES_CURRENT_VERSION: DWORD = 1;
STRUCT!{struct HEAP_OPTIMIZE_RESOURCES_INFORMATION {
Version: DWORD,
Flags: DWORD,
}}
pub type PHEAP_OPTIMIZE_RESOURCES_INFORMATION = *mut HEAP_OPTIMIZE_RESOURCES_INFORMATION;
pub const WT_EXECUTEDEFAULT: ULONG = 0x00000000;
pub const WT_EXECUTEINIOTHREAD: ULONG = 0x00000001;
pub const WT_EXECUTEINUITHREAD: ULONG = 0x00000002;
pub const WT_EXECUTEINWAITTHREAD: ULONG = 0x00000004;
pub const WT_EXECUTEONLYONCE: ULONG = 0x00000008;
pub const WT_EXECUTEINTIMERTHREAD: ULONG = 0x00000020;
pub const WT_EXECUTELONGFUNCTION: ULONG = 0x00000010;
pub const WT_EXECUTEINPERSISTENTIOTHREAD: ULONG = 0x00000040;
pub const WT_EXECUTEINPERSISTENTTHREAD: ULONG = 0x00000080;
pub const WT_TRANSFER_IMPERSONATION: ULONG = 0x00000100;
FN!{stdcall WAITORTIMERCALLBACKFUNC(
PVOID,
BOOLEAN,
) -> ()}
FN!{stdcall WORKERCALLBACKFUNC(
PVOID,
) -> ()}
FN!{stdcall APC_CALLBACK_FUNCTION(
DWORD,
PVOID,
PVOID,
) -> ()}
pub type WAITORTIMERCALLBACK = WAITORTIMERCALLBACKFUNC;
FN!{stdcall PFLS_CALLBACK_FUNCTION(
lpFlsData: PVOID,
) -> ()}
FN!{stdcall PSECURE_MEMORY_CACHE_CALLBACK(
Addr: PVOID,
Range: SIZE_T,
) -> BOOLEAN}
pub const WT_EXECUTEINLONGTHREAD: ULONG = 0x00000010;
pub const WT_EXECUTEDELETEWAIT: ULONG = 0x00000008;
//18570
pub const KEY_QUERY_VALUE: u32 = 0x0001;
pub const KEY_SET_VALUE: u32 = 0x0002;
pub const KEY_CREATE_SUB_KEY: u32 = 0x0004;
pub const KEY_ENUMERATE_SUB_KEYS: u32 = 0x0008;
pub const KEY_NOTIFY: u32 = 0x0010;
pub const KEY_CREATE_LINK: u32 = 0x0020;
pub const KEY_WOW64_32KEY: u32 = 0x0200;
pub const KEY_WOW64_64KEY: u32 = 0x0100;
pub const KEY_WOW64_RES: u32 = 0x0300;
pub const KEY_READ: u32 = (STANDARD_RIGHTS_READ | KEY_QUERY_VALUE | KEY_ENUMERATE_SUB_KEYS
| KEY_NOTIFY) & !SYNCHRONIZE;
pub const KEY_WRITE: u32 = (STANDARD_RIGHTS_WRITE | KEY_SET_VALUE | KEY_CREATE_SUB_KEY)
& !SYNCHRONIZE;
pub const KEY_EXECUTE: u32 = KEY_READ & !SYNCHRONIZE;
pub const KEY_ALL_ACCESS: u32 = (STANDARD_RIGHTS_ALL | KEY_QUERY_VALUE | KEY_SET_VALUE
| KEY_CREATE_SUB_KEY | KEY_ENUMERATE_SUB_KEYS | KEY_NOTIFY | KEY_CREATE_LINK) & !SYNCHRONIZE;
pub const REG_CREATED_NEW_KEY: DWORD = 0x00000001;
pub const REG_OPENED_EXISTING_KEY: DWORD = 0x00000002;
pub const REG_NOTIFY_CHANGE_NAME: DWORD = 0x00000001;
pub const REG_NOTIFY_CHANGE_ATTRIBUTES: DWORD = 0x00000002;
pub const REG_NOTIFY_CHANGE_LAST_SET: DWORD = 0x00000004;
pub const REG_NOTIFY_CHANGE_SECURITY: DWORD = 0x00000008;
pub const REG_LEGAL_CHANGE_FILTER: DWORD = REG_NOTIFY_CHANGE_NAME |
REG_NOTIFY_CHANGE_ATTRIBUTES |
REG_NOTIFY_CHANGE_LAST_SET |
REG_NOTIFY_CHANGE_SECURITY;
pub const REG_NOTIFY_THREAD_AGNOSTIC: DWORD = 0x10000000; //supported only on Windows 8 and later
pub const REG_OPTION_RESERVED: DWORD = 0x00000000;
pub const REG_OPTION_NON_VOLATILE: DWORD = 0x00000000;
pub const REG_OPTION_VOLATILE: DWORD = 0x00000001;
pub const REG_OPTION_CREATE_LINK: DWORD = 0x00000002;
pub const REG_OPTION_BACKUP_RESTORE: DWORD = 0x00000004;
pub const REG_OPTION_OPEN_LINK: DWORD = 0x00000008;
pub const REG_NONE: DWORD = 0;
pub const REG_SZ: DWORD = 1;
pub const REG_EXPAND_SZ: DWORD = 2;
pub const REG_BINARY: DWORD = 3;
pub const REG_DWORD: DWORD = 4;
pub const REG_DWORD_LITTLE_ENDIAN: DWORD = 4;
pub const REG_DWORD_BIG_ENDIAN: DWORD = 5;
pub const REG_LINK: DWORD = 6;
pub const REG_MULTI_SZ: DWORD = 7;
pub const REG_RESOURCE_LIST: DWORD = 8;
pub const REG_FULL_RESOURCE_DESCRIPTOR: DWORD = 9;
pub const REG_RESOURCE_REQUIREMENTS_LIST: DWORD = 10;
pub const REG_QWORD: DWORD = 11;
pub const REG_QWORD_LITTLE_ENDIAN: DWORD = 11;
//18720
pub const SERVICE_KERNEL_DRIVER: DWORD = 0x00000001;
pub const SERVICE_FILE_SYSTEM_DRIVER: DWORD = 0x00000002;
pub const SERVICE_ADAPTER: DWORD = 0x00000004;
pub const SERVICE_RECOGNIZER_DRIVER: DWORD = 0x00000008;
pub const SERVICE_DRIVER: DWORD = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER
| SERVICE_RECOGNIZER_DRIVER;
pub const SERVICE_WIN32_OWN_PROCESS: DWORD = 0x00000010;
pub const SERVICE_WIN32_SHARE_PROCESS: DWORD = 0x00000020;
pub const SERVICE_WIN32: DWORD = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS;
pub const SERVICE_INTERACTIVE_PROCESS: DWORD = 0x00000100;
pub const SERVICE_TYPE_ALL: DWORD = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER
| SERVICE_INTERACTIVE_PROCESS;
STRUCT!{struct TP_CALLBACK_INSTANCE {
dummy: *mut c_void,
}}
pub type PTP_CALLBACK_INSTANCE = *mut TP_CALLBACK_INSTANCE;
STRUCT!{struct TP_IO {
dummy: *mut c_void,
}}
pub type PTP_IO = *mut TP_IO;
STRUCT!{struct TP_POOL {
dummy: *mut c_void,
}}
pub type PTP_POOL = *mut TP_POOL;
STRUCT!{struct TP_CLEANUP_GROUP {
dummy: *mut c_void,
}}
pub type PTP_CLEANUP_GROUP = *mut TP_CLEANUP_GROUP;
STRUCT!{struct TP_TIMER {
dummy: *mut c_void,
}}
pub type PTP_TIMER = *mut TP_TIMER;
STRUCT!{struct TP_WAIT {
dummy: *mut c_void,
}}
pub type PTP_WAIT = *mut TP_WAIT;
STRUCT!{struct TP_WORK {
dummy: *mut c_void,
}}
pub type PTP_WORK = *mut TP_WORK;
STRUCT!{struct ACTIVATION_CONTEXT {
dummy: *mut c_void,
}}
ENUM!{enum TP_CALLBACK_PRIORITY {
TP_CALLBACK_PRIORITY_HIGH,
TP_CALLBACK_PRIORITY_NORMAL,
TP_CALLBACK_PRIORITY_LOW,
TP_CALLBACK_PRIORITY_INVALID,
TP_CALLBACK_PRIORITY_COUNT = 4,
}}
FN!{stdcall PTP_CLEANUP_GROUP_CANCEL_CALLBACK(
ObjectContext: PVOID,
CleanupContext: PVOID,
) -> ()}
FN!{stdcall PTP_SIMPLE_CALLBACK(
Instance: PTP_CALLBACK_INSTANCE,
Context: PVOID,
) -> ()}
FN!{stdcall PTP_WORK_CALLBACK(
Instance: PTP_CALLBACK_INSTANCE,
Context: PVOID,
Work: PTP_WORK,
) -> ()}
FN!{stdcall PTP_TIMER_CALLBACK(
Instance: PTP_CALLBACK_INSTANCE,
Context: PVOID,
Timer: PTP_TIMER,
) -> ()}
pub type TP_WAIT_RESULT = DWORD;
FN!{stdcall PTP_WAIT_CALLBACK(
Instance: PTP_CALLBACK_INSTANCE,
Context: PVOID,
Wait: PTP_WAIT,
WaitResult: TP_WAIT_RESULT,
) -> ()}
pub type TP_VERSION = DWORD;
pub type PTP_VERSION = *mut DWORD;
STRUCT!{struct TP_POOL_STACK_INFORMATION {
StackReserve: SIZE_T,
StackCommit: SIZE_T,
}}
pub type PTP_POOL_STACK_INFORMATION = *mut TP_POOL_STACK_INFORMATION;
STRUCT!{struct TP_CALLBACK_ENVIRON_V3_s {
BitFields: DWORD,
}}
BITFIELD!(TP_CALLBACK_ENVIRON_V3_s BitFields: DWORD [
LongFunction set_LongFunction[0..1],
Persistent set_Persistent[1..2],
Private set_Private[2..32],
]);
STRUCT!{struct TP_CALLBACK_ENVIRON_V3 {
Version: TP_VERSION,
Pool: PTP_POOL,
CleanupGroup: PTP_CLEANUP_GROUP,
CleanupGroupCancelCallback: PTP_CLEANUP_GROUP_CANCEL_CALLBACK,
RaceDll: PVOID,
ActivationContext: *mut ACTIVATION_CONTEXT,
FinalizationCallback: PTP_SIMPLE_CALLBACK,
u: DWORD,
CallbackPriority: TP_CALLBACK_PRIORITY,
Size: DWORD,
}}
UNION!(TP_CALLBACK_ENVIRON_V3, u, Flags, Flags_mut, DWORD);
UNION!(TP_CALLBACK_ENVIRON_V3, u, s, s_mut, TP_CALLBACK_ENVIRON_V3_s);
pub type TP_CALLBACK_ENVIRON = TP_CALLBACK_ENVIRON_V3;
pub type PTP_CALLBACK_ENVIRON = *mut TP_CALLBACK_ENVIRON_V3;
STRUCT!{struct JOB_SET_ARRAY {
JobHandle: HANDLE,
MemberLevel: DWORD,
Flags: DWORD,
}}
pub type PJOB_SET_ARRAY = *mut JOB_SET_ARRAY;
STRUCT!{struct RTL_BARRIER {
Reserved1: DWORD,
Reserved2: DWORD,
Reserved3: [ULONG_PTR; 2],
Reserved4: DWORD,
Reserved5: DWORD,
}}
pub type PRTL_BARRIER = *mut RTL_BARRIER;
STRUCT!{struct RTL_RUN_ONCE {
Ptr: PVOID,
}}
pub type PRTL_RUN_ONCE = *mut RTL_RUN_ONCE;
ENUM!{enum RTL_UMS_THREAD_INFO_CLASS {
UmsThreadInvalidInfoClass = 0,
UmsThreadUserContext,
UmsThreadPriority, // Reserved
UmsThreadAffinity, // Reserved
UmsThreadTeb,
UmsThreadIsSuspended,
UmsThreadIsTerminated,
UmsThreadMaxInfoClass,
}}
ENUM!{enum RTL_UMS_SCHEDULER_REASON {
UmsSchedulerStartup = 0,
UmsSchedulerThreadBlocked,
UmsSchedulerThreadYield,
}}
FN!{stdcall PRTL_UMS_SCHEDULER_ENTRY_POINT(
Reason: RTL_UMS_SCHEDULER_REASON,
ActivationPayload: ULONG_PTR,
SchedulerParam: PVOID,
) -> ()}
ENUM!{enum FIRMWARE_TYPE {
FirmwareTypeUnknown,
FirmwareTypeBios,
FirmwareTypeUefi,
FirmwareTypeMax,
}}
pub type PFIRMWARE_TYPE = *mut FIRMWARE_TYPE;
ENUM!{enum LOGICAL_PROCESSOR_RELATIONSHIP {
RelationProcessorCore,
RelationNumaNode,
RelationCache,
RelationProcessorPackage,
RelationGroup,
RelationAll = 0xffff,
}}
ENUM!{enum PROCESSOR_CACHE_TYPE {
CacheUnified,
CacheInstruction,
CacheData,
CacheTrace,
}}
STRUCT!{struct CACHE_DESCRIPTOR {
Level: BYTE,
Associativity: BYTE,
LineSize: WORD,
Size: DWORD,
Type: PROCESSOR_CACHE_TYPE,
}}
pub type PCACHE_DESCRIPTOR = *mut CACHE_DESCRIPTOR;
STRUCT!{struct SYSTEM_LOGICAL_PROCESSOR_INFORMATION_ProcessorCore {
Flags: BYTE,
}}
STRUCT!{struct SYSTEM_LOGICAL_PROCESSOR_INFORMATION_NumaNode {
NodeNumber: DWORD,
}}
STRUCT!{struct SYSTEM_LOGICAL_PROCESSOR_INFORMATION {
ProcessorMask: ULONG_PTR,
Relationship: LOGICAL_PROCESSOR_RELATIONSHIP,
Reserved: [ULONGLONG; 2],
}}
UNION!(
SYSTEM_LOGICAL_PROCESSOR_INFORMATION, Reserved, ProcessorCore, ProcessorCore_mut,
SYSTEM_LOGICAL_PROCESSOR_INFORMATION_ProcessorCore
);
UNION!(
SYSTEM_LOGICAL_PROCESSOR_INFORMATION, Reserved, NumaNode, NumaNode_mut,
SYSTEM_LOGICAL_PROCESSOR_INFORMATION_NumaNode
);
UNION!(SYSTEM_LOGICAL_PROCESSOR_INFORMATION, Reserved, Cache, Cache_mut, CACHE_DESCRIPTOR);
pub type PSYSTEM_LOGICAL_PROCESSOR_INFORMATION = *mut SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
STRUCT!{struct SYSTEM_PROCESSOR_CYCLE_TIME_INFORMATION {
CycleTime: DWORD64,
}}
pub type PSYSTEM_PROCESSOR_CYCLE_TIME_INFORMATION = *mut SYSTEM_PROCESSOR_CYCLE_TIME_INFORMATION;
ENUM!{enum HARDWARE_COUNTER_TYPE {
PMCCounter,
MaxHardwareCounterType,
}}
pub type PHARDWARE_COUNTER_TYPE = *mut HARDWARE_COUNTER_TYPE;
ENUM!{enum PROCESS_MITIGATION_POLICY {
ProcessDEPPolicy,
ProcessASLRPolicy,
ProcessDynamicCodePolicy,
ProcessStrictHandleCheckPolicy,
ProcessSystemCallDisablePolicy,
ProcessMitigationOptionsMask,
ProcessExtensionPointDisablePolicy,
ProcessReserved1Policy,
ProcessSignaturePolicy,
MaxProcessMitigationPolicy,
}}
STRUCT!{struct OSVERSIONINFOA {
dwOSVersionInfoSize: DWORD,
dwMajorVersion: DWORD,
dwMinorVersion: DWORD,
dwBuildNumber: DWORD,
dwPlatformId: DWORD,
szCSDVersion: [CHAR; 128],
}}
pub type POSVERSIONINFOA = *mut OSVERSIONINFOA;
pub type LPOSVERSIONINFOA = *mut OSVERSIONINFOA;
STRUCT!{struct OSVERSIONINFOW {
dwOSVersionInfoSize: DWORD,
dwMajorVersion: DWORD,
dwMinorVersion: DWORD,
dwBuildNumber: DWORD,
dwPlatformId: DWORD,
szCSDVersion: [WCHAR; 128],
}}
pub type POSVERSIONINFOW = *mut OSVERSIONINFOW;
pub type LPOSVERSIONINFOW = *mut OSVERSIONINFOW;
STRUCT!{struct OSVERSIONINFOEXA {
dwOSVersionInfoSize: DWORD,
dwMajorVersion: DWORD,
dwMinorVersion: DWORD,
dwBuildNumber: DWORD,
dwPlatformId: DWORD,
szCSDVersion: [CHAR; 128],
wServicePackMajor: WORD,
wServicePackMinor: WORD,
wSuiteMask: WORD,
wProductType: BYTE,
wReserved: BYTE,
}}
pub type POSVERSIONINFOEXA = *mut OSVERSIONINFOEXA;
pub type LPOSVERSIONINFOEXA = *mut OSVERSIONINFOEXA;
STRUCT!{struct OSVERSIONINFOEXW {
dwOSVersionInfoSize: DWORD,
dwMajorVersion: DWORD,
dwMinorVersion: DWORD,
dwBuildNumber: DWORD,
dwPlatformId: DWORD,
szCSDVersion: [WCHAR; 128],
wServicePackMajor: WORD,
wServicePackMinor: WORD,
wSuiteMask: WORD,
wProductType: BYTE,
wReserved: BYTE,
}}
pub type POSVERSIONINFOEXW = *mut OSVERSIONINFOEXW;
pub type LPOSVERSIONINFOEXW = *mut OSVERSIONINFOEXW;
STRUCT!{struct SLIST_ENTRY {
Next: *mut SLIST_ENTRY,
}}
pub type PSLIST_ENTRY = *mut SLIST_ENTRY;
STRUCT!{struct SLIST_HEADER_HeaderX64 {
BitFields1: ULONGLONG,
BitFields2: ULONGLONG,
}}
BITFIELD!(SLIST_HEADER_HeaderX64 BitFields1: ULONGLONG [
Depth set_Depth[0..16],
Sequence set_Sequence[16..64],
]);
BITFIELD!(SLIST_HEADER_HeaderX64 BitFields2: ULONGLONG [
Reserved set_Reserved[0..4],
NextEntry set_NextEntry[4..64],
]);
STRUCT!{struct SLIST_HEADER {
Alignment: ULONGLONG,
Region: ULONGLONG,
}}
UNION!(SLIST_HEADER, Alignment, HeaderX64, HeaderX64_mut, SLIST_HEADER_HeaderX64);
pub type PSLIST_HEADER = *mut SLIST_HEADER;
ENUM!{enum SYSTEM_POWER_STATE {
PowerSystemUnspecified = 0,
PowerSystemWorking = 1,
PowerSystemSleeping1 = 2,
PowerSystemSleeping2 = 3,
PowerSystemSleeping3 = 4,
PowerSystemHibernate = 5,
PowerSystemShutdown = 6,
PowerSystemMaximum = 7,
}}
pub type PSYSTEM_POWER_STATE = *mut SYSTEM_POWER_STATE;
ENUM!{enum POWER_ACTION {
PowerActionNone = 0,
PowerActionReserved,
PowerActionSleep,
PowerActionHibernate,
PowerActionShutdown,
PowerActionShutdownReset,
PowerActionShutdownOff,
PowerActionWarmEject,
}}
pub type PPOWER_ACTION = *mut POWER_ACTION;
ENUM!{enum DEVICE_POWER_STATE {
PowerDeviceUnspecified = 0,
PowerDeviceD0,
PowerDeviceD1,
PowerDeviceD2,
PowerDeviceD3,
PowerDeviceMaximum,
}}
pub type PDEVICE_POWER_STATE = *mut DEVICE_POWER_STATE;
ENUM!{enum MONITOR_DISPLAY_STATE {
PowerMonitorOff = 0,
PowerMonitorOn,
PowerMonitorDim,
}}
pub type PMONITOR_DISPLAY_STATE = *mut MONITOR_DISPLAY_STATE;
ENUM!{enum USER_ACTIVITY_PRESENCE {
PowerUserPresent = 0,
PowerUserNotPresent,
PowerUserInactive,
PowerUserMaximum,
//PowerUserInvalid = 3,
}}
pub type PUSER_ACTIVITY_PRESENCE = *mut USER_ACTIVITY_PRESENCE;
pub type EXECUTION_STATE = DWORD;
pub type PEXECUTION_STATE = *mut DWORD;
ENUM!{enum LATENCY_TIME {
LT_DONT_CARE,
LT_LOWEST_LATENCY,
}}
ENUM!{enum POWER_REQUEST_TYPE {
PowerRequestDisplayRequired,
PowerRequestSystemRequired,
PowerRequestAwayModeRequired,
PowerRequestExecutionRequired,
}}
pub type PPOWER_REQUEST_TYPE = *mut POWER_REQUEST_TYPE;
pub const MAX_HW_COUNTERS: usize = 16;
STRUCT!{struct HARDWARE_COUNTER_DATA {
Type: HARDWARE_COUNTER_TYPE,
Reserved: DWORD,
Value: DWORD64,
}}
pub type PHARDWARE_COUNTER_DATA = *mut HARDWARE_COUNTER_DATA;
STRUCT!{struct PERFORMANCE_DATA {
Size: WORD,
Version: BYTE,
HwCountersCount: BYTE,
ContextSwitchCount: DWORD,
WaitReasonBitMap: DWORD64,
CycleTime: DWORD64,
RetryCount: DWORD,
Reserved: DWORD,
HwCounters: [HARDWARE_COUNTER_DATA; MAX_HW_COUNTERS],
}}
pub type PPERFORMANCE_DATA = *mut PERFORMANCE_DATA;
STRUCT!{struct MEMORY_BASIC_INFORMATION {
BaseAddress: PVOID,
AllocationBase: PVOID,
AllocationProtect: DWORD,
RegionSize: SIZE_T,
State: DWORD,
Protect: DWORD,
Type: DWORD,
}}
pub type PMEMORY_BASIC_INFORMATION = *mut MEMORY_BASIC_INFORMATION;
STRUCT!{struct MEMORY_BASIC_INFORMATION32 {
BaseAddress: DWORD,
AllocationBase: DWORD,
AllocationProtect: DWORD,
RegionSize: DWORD,
State: DWORD,
Protect: DWORD,
Type: DWORD,
}}
pub type PMEMORY_BASIC_INFORMATION32 = *mut MEMORY_BASIC_INFORMATION32;
STRUCT!{struct MEMORY_BASIC_INFORMATION64 { // FIXME: align 16
BaseAddress: ULONGLONG,
AllocationBase: ULONGLONG,
AllocationProtect: DWORD,
__alignment1: DWORD,
RegionSize: ULONGLONG,
State: DWORD,
Protect: DWORD,
Type: DWORD,
__alignment2: DWORD,
}}
pub type PMEMORY_BASIC_INFORMATION64 = *mut MEMORY_BASIC_INFORMATION64;
pub const WOW64_SIZE_OF_80387_REGISTERS: usize = 80;
pub const WOW64_MAXIMUM_SUPPORTED_EXTENSION: usize = 512;
STRUCT!{struct WOW64_FLOATING_SAVE_AREA {
ControlWord: DWORD,
StatusWord: DWORD,
TagWord: DWORD,
ErrorOffset: DWORD,
ErrorSelector: DWORD,
DataOffset: DWORD,
DataSelector: DWORD,
RegisterArea: [BYTE; WOW64_SIZE_OF_80387_REGISTERS],
Cr0NpxState: DWORD,
}}
pub type PWOW64_FLOATING_SAVE_AREA = *mut WOW64_FLOATING_SAVE_AREA;
STRUCT!{struct WOW64_CONTEXT {
ContextFlags: DWORD,
Dr0: DWORD,
Dr1: DWORD,
Dr2: DWORD,
Dr3: DWORD,
Dr4: DWORD,
Dr5: DWORD,
Dr6: DWORD,
Dr7: DWORD,
FloatSave: WOW64_FLOATING_SAVE_AREA,
SegGs: DWORD,
SegFs: DWORD,
SegEs: DWORD,
SegDs: DWORD,
Edi: DWORD,
Esi: DWORD,
Ebx: DWORD,
Edx: DWORD,
Ecx: DWORD,
Eax: DWORD,
Ebp: DWORD,
Eip: DWORD,
SegCs: DWORD,
EFlags: DWORD,
Esp: DWORD,
SegSs: DWORD,
ExtendedRegisters: [BYTE; WOW64_MAXIMUM_SUPPORTED_EXTENSION],
}}
pub type PWOW64_CONTEXT = *mut WOW64_CONTEXT;
STRUCT!{struct WOW64_LDT_ENTRY_Bytes {
BaseMid: BYTE,
Flags1: BYTE,
Flags2: BYTE,
BaseHi: BYTE,
}}
STRUCT!{struct WOW64_LDT_ENTRY_Bits {
BitFields: DWORD,
}}
BITFIELD!(WOW64_LDT_ENTRY_Bits BitFields: DWORD [
BaseMid set_BaseMid[0..8],
Type set_Type[8..13],
Dpl set_Dpl[13..15],
Pres set_Pres[15..16],
LimitHi set_LimitHi[16..20],
Sys set_Sys[20..21],
Reserved_0 set_Reserved_0[21..22],
Default_Big set_Default_Big[22..23],
Granularity set_Granularity[23..24],
BaseHi set_BaseHi[24..32],
]);
STRUCT!{struct WOW64_LDT_ENTRY {
LimitLow: WORD,
BaseLow: WORD,
HighWord: DWORD,
}}
UNION!(WOW64_LDT_ENTRY, HighWord, Bytes, Bytes_mut, WOW64_LDT_ENTRY_Bytes);
UNION!(WOW64_LDT_ENTRY, HighWord, Bits, Bits_mut, WOW64_LDT_ENTRY_Bits);
pub type PWOW64_LDT_ENTRY = *mut WOW64_LDT_ENTRY;
STRUCT!{struct IMAGE_DOS_HEADER {
e_magic: WORD,
e_cblp: WORD,
e_cp: WORD,
e_crlc: WORD,
e_cparhdr: WORD,
e_minalloc: WORD,
e_maxalloc: WORD,
e_ss: WORD,
e_sp: WORD,
e_csum: WORD,
e_ip: WORD,
e_cs: WORD,
e_lfarlc: WORD,
e_ovno: WORD,
e_res: [WORD; 4],
e_oemid: WORD,
e_oeminfo: WORD,
e_res2: [WORD; 10],
e_lfanew: LONG,
}}
pub type PIMAGE_DOS_HEADER = *mut IMAGE_DOS_HEADER;
pub const DLL_PROCESS_ATTACH: DWORD = 1;
pub const DLL_THREAD_ATTACH: DWORD = 2;
pub const DLL_THREAD_DETACH: DWORD = 3;
pub const DLL_PROCESS_DETACH: DWORD = 0;
| 38.957552 | 98 | 0.786068 |
5072d3aeb853ac0967c520869292f8fd185f4385 | 791 | use includedir_codegen::Compression;
use walkdir::WalkDir;
fn main() {
let mut bundled = includedir_codegen::start("BUNDLED");
for f in &["ckb.toml", "ckb-miner.toml"] {
bundled
.add_file(f, Compression::Gzip)
.expect("add files to resource bundle");
}
for entry in WalkDir::new("specs").follow_links(true).into_iter() {
match entry {
Ok(ref e)
if !e.file_type().is_dir() && !e.file_name().to_string_lossy().starts_with(".") =>
{
bundled
.add_file(e.path(), Compression::Gzip)
.expect("add files to resource bundle");
}
_ => (),
}
}
bundled.build("bundled.rs").expect("build resource bundle");
}
| 29.296296 | 98 | 0.525917 |
797d85d474dd024d98cd47d3bfb51a9f394cea55 | 19,838 | use anyhow::Result;
use logger::prelude::*;
use starcoin_config::genesis_config::TOTAL_STC_AMOUNT;
use starcoin_config::{BuiltinNetworkID, ChainNetwork};
use starcoin_crypto::hash::PlainCryptoHash;
use starcoin_executor::execute_readonly_function;
use starcoin_state_api::{ChainStateReader, StateReaderExt, StateView};
use starcoin_transaction_builder::{build_package_with_stdlib_module, StdLibOptions};
use starcoin_types::account_config::config_change::ConfigChangeEvent;
use starcoin_types::account_config::TwoPhaseUpgradeV2Resource;
use starcoin_types::identifier::Identifier;
use starcoin_types::language_storage::{ModuleId, StructTag, TypeTag};
use starcoin_types::transaction::ScriptFunction;
use starcoin_vm_types::account_config::upgrade::UpgradeEvent;
use starcoin_vm_types::account_config::{association_address, core_code_address};
use starcoin_vm_types::account_config::{genesis_address, stc_type_tag};
use starcoin_vm_types::genesis_config::{ChainId, StdlibVersion};
use starcoin_vm_types::on_chain_config::{TransactionPublishOption, Version};
use starcoin_vm_types::on_chain_resource::LinearWithdrawCapability;
use starcoin_vm_types::token::stc::STC_TOKEN_CODE;
use starcoin_vm_types::transaction::{Package, TransactionPayload};
use starcoin_vm_types::values::VMValueCast;
use statedb::ChainStateDB;
use std::convert::TryInto;
use std::fs::File;
use std::io::Read;
use stdlib::{load_upgrade_package, StdlibCompat, STDLIB_VERSIONS};
use test_helper::dao::dao_vote_test;
use test_helper::executor::*;
use test_helper::Account;
#[stest::test]
fn test_dao_upgrade_module() -> Result<()> {
let alice = Account::new();
let (chain_state, net) = prepare_genesis();
let dao_action_type_tag = TypeTag::Struct(StructTag {
address: genesis_address(),
module: Identifier::new("UpgradeModuleDaoProposal").unwrap(),
name: Identifier::new("UpgradeModuleV2").unwrap(),
type_params: vec![],
});
let module = compile_modules_with_address(genesis_address(), TEST_MODULE)
.pop()
.unwrap();
let package = Package::new_with_module(module)?;
let package_hash = package.crypto_hash();
let vote_script_function = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("ModuleUpgradeScripts").unwrap(),
),
Identifier::new("propose_module_upgrade_v2").unwrap(),
vec![stc_type_tag()],
vec![
bcs_ext::to_bytes(&genesis_address()).unwrap(),
bcs_ext::to_bytes(&package_hash.to_vec()).unwrap(),
bcs_ext::to_bytes(&1u64).unwrap(),
bcs_ext::to_bytes(&0u64).unwrap(),
bcs_ext::to_bytes(&false).unwrap(),
],
);
let execute_script_function = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("ModuleUpgradeScripts").unwrap(),
),
Identifier::new("submit_module_upgrade_plan").unwrap(),
vec![stc_type_tag()],
vec![
bcs_ext::to_bytes(alice.address()).unwrap(),
bcs_ext::to_bytes(&0u64).unwrap(),
],
);
dao_vote_test(
&alice,
&chain_state,
&net,
vote_script_function,
dao_action_type_tag,
execute_script_function,
0,
)?;
association_execute_should_success(&net, &chain_state, TransactionPayload::Package(package))?;
assert_eq!(read_foo(&chain_state), 1);
Ok(())
}
#[stest::test]
fn test_dao_upgrade_module_enforced() -> Result<()> {
let alice = Account::new();
let (chain_state, net) = prepare_genesis();
let dao_action_type_tag = TypeTag::Struct(StructTag {
address: genesis_address(),
module: Identifier::new("UpgradeModuleDaoProposal").unwrap(),
name: Identifier::new("UpgradeModuleV2").unwrap(),
type_params: vec![],
});
let module = compile_modules_with_address(genesis_address(), TEST_MODULE)
.pop()
.unwrap();
let package = Package::new_with_module(module)?;
let package_hash = package.crypto_hash();
let vote_script_function = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("ModuleUpgradeScripts").unwrap(),
),
Identifier::new("propose_module_upgrade_v2").unwrap(),
vec![stc_type_tag()],
vec![
bcs_ext::to_bytes(&genesis_address()).unwrap(),
bcs_ext::to_bytes(&package_hash.to_vec()).unwrap(),
bcs_ext::to_bytes(&1u64).unwrap(),
bcs_ext::to_bytes(&0u64).unwrap(),
bcs_ext::to_bytes(&false).unwrap(),
],
);
let execute_script_function = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("ModuleUpgradeScripts").unwrap(),
),
Identifier::new("submit_module_upgrade_plan").unwrap(),
vec![stc_type_tag()],
vec![
bcs_ext::to_bytes(alice.address()).unwrap(),
bcs_ext::to_bytes(&0u64).unwrap(),
],
);
dao_vote_test(
&alice,
&chain_state,
&net,
vote_script_function,
dao_action_type_tag.clone(),
execute_script_function,
0,
)?;
association_execute_should_success(&net, &chain_state, TransactionPayload::Package(package))?;
assert_eq!(read_foo(&chain_state), 1);
// test upgrade module enforced
let alice = Account::new();
let module = compile_modules_with_address(genesis_address(), TEST_MODULE_1)
.pop()
.unwrap();
let package = Package::new_with_module(module)?;
let package_hash = package.crypto_hash();
let vote_script_function = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("ModuleUpgradeScripts").unwrap(),
),
Identifier::new("propose_module_upgrade_v2").unwrap(),
vec![stc_type_tag()],
vec![
bcs_ext::to_bytes(&genesis_address()).unwrap(),
bcs_ext::to_bytes(&package_hash.to_vec()).unwrap(),
bcs_ext::to_bytes(&2u64).unwrap(),
bcs_ext::to_bytes(&0u64).unwrap(),
bcs_ext::to_bytes(&true).unwrap(),
],
);
let execute_script_function = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("ModuleUpgradeScripts").unwrap(),
),
Identifier::new("submit_module_upgrade_plan").unwrap(),
vec![stc_type_tag()],
vec![
bcs_ext::to_bytes(alice.address()).unwrap(),
bcs_ext::to_bytes(&1u64).unwrap(),
],
);
dao_vote_test(
&alice,
&chain_state,
&net,
vote_script_function,
dao_action_type_tag,
execute_script_function,
1,
)?;
association_execute_should_success(&net, &chain_state, TransactionPayload::Package(package))?;
assert_eq!(read_foo(&chain_state), 2);
Ok(())
}
#[stest::test]
fn test_init_script() -> Result<()> {
let alice = Account::new();
let mut genesis_config = BuiltinNetworkID::Test.genesis_config().clone();
genesis_config.stdlib_version = StdlibVersion::Version(1);
let net = ChainNetwork::new_custom(
"init_script_test".to_string(),
ChainId::new(100),
genesis_config,
)?;
let chain_state = prepare_customized_genesis(&net);
let dao_action_type_tag = TypeTag::Struct(StructTag {
address: genesis_address(),
module: Identifier::new("UpgradeModuleDaoProposal").unwrap(),
name: Identifier::new("UpgradeModule").unwrap(),
type_params: vec![],
});
let init_script = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("PackageTxnManager").unwrap(),
),
Identifier::new("convert_TwoPhaseUpgrade_to_TwoPhaseUpgradeV2").unwrap(),
vec![],
vec![bcs_ext::to_bytes(&genesis_address()).unwrap()],
);
let module_names = vec!["Errors", "PackageTxnManager"];
let package = build_package_with_stdlib_module(
StdLibOptions::Compiled(StdlibVersion::Version(3)),
module_names,
Some(init_script),
)?;
let package_hash = package.crypto_hash();
let vote_script_function = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("ModuleUpgradeScripts").unwrap(),
),
Identifier::new("propose_module_upgrade").unwrap(),
vec![stc_type_tag()],
vec![
bcs_ext::to_bytes(&genesis_address()).unwrap(),
bcs_ext::to_bytes(&package_hash.to_vec()).unwrap(),
bcs_ext::to_bytes(&1u64).unwrap(),
bcs_ext::to_bytes(&0u64).unwrap(),
],
);
let execute_script_function = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("ModuleUpgradeScripts").unwrap(),
),
Identifier::new("submit_module_upgrade_plan").unwrap(),
vec![stc_type_tag()],
vec![
bcs_ext::to_bytes(alice.address()).unwrap(),
bcs_ext::to_bytes(&0u64).unwrap(),
],
);
dao_vote_test(
&alice,
&chain_state,
&net,
vote_script_function,
dao_action_type_tag,
execute_script_function,
0,
)?;
association_execute_should_success(&net, &chain_state, TransactionPayload::Package(package))?;
assert_eq!(read_two_phase_upgrade_v2_resource(&chain_state)?, false);
Ok(())
}
#[stest::test]
fn test_upgrade_stdlib_with_incremental_package() -> Result<()> {
let alice = Account::new();
let mut genesis_config = BuiltinNetworkID::Test.genesis_config().clone();
genesis_config.stdlib_version = StdlibVersion::Version(1);
let net = ChainNetwork::new_custom(
"test_stdlib_upgrade".to_string(),
ChainId::new(100),
genesis_config,
)?;
let chain_state = prepare_customized_genesis(&net);
let dao_action_type_tag = TypeTag::Struct(StructTag {
address: genesis_address(),
module: Identifier::new("UpgradeModuleDaoProposal").unwrap(),
name: Identifier::new("UpgradeModule").unwrap(),
type_params: vec![],
});
let path = std::path::PathBuf::from("../vm/stdlib/compiled/2/1-2/stdlib.blob")
.canonicalize()
.unwrap();
let mut bytes = vec![];
File::open(path)?.read_to_end(&mut bytes)?;
let package: Package = bcs_ext::from_bytes(&bytes)?;
let package_hash = package.crypto_hash();
let vote_script_function = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("ModuleUpgradeScripts").unwrap(),
),
Identifier::new("propose_module_upgrade").unwrap(),
vec![stc_type_tag()],
vec![
bcs_ext::to_bytes(&genesis_address()).unwrap(),
bcs_ext::to_bytes(&package_hash.to_vec()).unwrap(),
bcs_ext::to_bytes(&1u64).unwrap(),
bcs_ext::to_bytes(&0u64).unwrap(),
],
);
let execute_script_function = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("ModuleUpgradeScripts").unwrap(),
),
Identifier::new("submit_module_upgrade_plan").unwrap(),
vec![stc_type_tag()],
vec![
bcs_ext::to_bytes(alice.address()).unwrap(),
bcs_ext::to_bytes(&0u64).unwrap(),
],
);
dao_vote_test(
&alice,
&chain_state,
&net,
vote_script_function,
dao_action_type_tag,
execute_script_function,
0,
)?;
association_execute_should_success(&net, &chain_state, TransactionPayload::Package(package))?;
assert_eq!(read_two_phase_upgrade_v2_resource(&chain_state)?, false);
Ok(())
}
#[stest::test]
fn test_stdlib_upgrade() -> Result<()> {
let mut genesis_config = BuiltinNetworkID::Test.genesis_config().clone();
let stdlib_versions = STDLIB_VERSIONS.clone();
let mut current_version = stdlib_versions[0];
genesis_config.stdlib_version = current_version;
let net = ChainNetwork::new_custom(
"test_stdlib_upgrade".to_string(),
ChainId::new(100),
genesis_config,
)?;
let chain_state = prepare_customized_genesis(&net);
let mut proposal_id: u64 = 0;
let alice = Account::new();
for new_version in stdlib_versions.into_iter().skip(1) {
verify_version_state(current_version, &chain_state)?;
let dao_action_type_tag = new_version.upgrade_module_type_tag();
let package = match load_upgrade_package(current_version, new_version)? {
Some(package) => package,
None => {
info!(
"{:?} is same as {:?}, continue",
current_version, new_version
);
continue;
}
};
let package_hash = package.crypto_hash();
let vote_script_function = new_version.propose_module_upgrade_function(
stc_type_tag(),
genesis_address(),
package_hash,
0,
!StdlibVersion::compatible_with_previous(&new_version),
);
let execute_script_function = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("ModuleUpgradeScripts").unwrap(),
),
Identifier::new("submit_module_upgrade_plan").unwrap(),
vec![stc_type_tag()],
vec![
bcs_ext::to_bytes(alice.address()).unwrap(),
bcs_ext::to_bytes(&proposal_id).unwrap(),
],
);
dao_vote_test(
&alice,
&chain_state,
&net,
vote_script_function,
dao_action_type_tag,
execute_script_function,
proposal_id,
)?;
let output = association_execute_should_success(
&net,
&chain_state,
TransactionPayload::Package(package),
)?;
let contract_event = expect_event::<UpgradeEvent>(&output);
let _upgrade_event = contract_event.decode_event::<UpgradeEvent>()?;
let _version_config_event = expect_event::<ConfigChangeEvent<Version>>(&output);
ext_execute_after_upgrade(new_version, &net, &chain_state)?;
proposal_id += 1;
current_version = new_version;
}
Ok(())
}
fn ext_execute_after_upgrade(
version: StdlibVersion,
net: &ChainNetwork,
chain_state: &ChainStateDB,
) -> Result<()> {
match version {
StdlibVersion::Version(1) => {
//do nothing
}
StdlibVersion::Version(2) => {
//do nothing
}
StdlibVersion::Version(3) => {
let take_liner_time_capability = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("StdlibUpgradeScripts").unwrap(),
),
Identifier::new("take_linear_withdraw_capability").unwrap(),
vec![],
vec![],
);
association_execute_should_success(
net,
chain_state,
TransactionPayload::ScriptFunction(take_liner_time_capability),
)?;
}
_ => {
//do nothing.
}
}
Ok(())
}
fn verify_version_state<R>(version: StdlibVersion, chain_state: &R) -> Result<()>
where
R: ChainStateReader,
{
match version {
StdlibVersion::Version(1) => {
//TODO
}
StdlibVersion::Version(2) => {
assert!(
chain_state.get_stc_treasury()?.is_none(),
"expect treasury is none."
);
assert_eq!(read_two_phase_upgrade_v2_resource(chain_state)?, false);
}
StdlibVersion::Version(3) => {
assert!(
chain_state.get_stc_treasury()?.is_some(),
"expect treasury is some."
);
assert_eq!(
chain_state.get_stc_info().unwrap().unwrap().total_value,
TOTAL_STC_AMOUNT.scaling()
);
let withdraw_cap = chain_state
.get_resource_by_access_path::<LinearWithdrawCapability>(
LinearWithdrawCapability::resource_path_for(
association_address(),
STC_TOKEN_CODE.clone().try_into()?,
),
)?;
assert!(
withdraw_cap.is_some(),
"expect LinearWithdrawCapability exist at association_address"
);
}
_ => {
//do nothing.
}
}
Ok(())
}
#[stest::test]
fn test_upgrade_stdlib_with_disallowed_publish_option() -> Result<()> {
let alice = Account::new();
let mut genesis_config = BuiltinNetworkID::Test.genesis_config().clone();
genesis_config.publishing_option = TransactionPublishOption::locked();
let net = ChainNetwork::new_custom(
"test_stdlib_upgrade".to_string(),
ChainId::new(100),
genesis_config,
)?;
let chain_state = prepare_customized_genesis(&net);
let dao_action_type_tag = TypeTag::Struct(StructTag {
address: genesis_address(),
module: Identifier::new("UpgradeModuleDaoProposal").unwrap(),
name: Identifier::new("UpgradeModuleV2").unwrap(),
type_params: vec![],
});
let module = compile_modules_with_address(genesis_address(), TEST_MODULE)
.pop()
.unwrap();
let package = Package::new_with_module(module)?;
let package_hash = package.crypto_hash();
let vote_script_function = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("ModuleUpgradeScripts").unwrap(),
),
Identifier::new("propose_module_upgrade_v2").unwrap(),
vec![stc_type_tag()],
vec![
bcs_ext::to_bytes(&genesis_address()).unwrap(),
bcs_ext::to_bytes(&package_hash.to_vec()).unwrap(),
bcs_ext::to_bytes(&1u64).unwrap(),
bcs_ext::to_bytes(&0u64).unwrap(),
bcs_ext::to_bytes(&false).unwrap(),
],
);
let execute_script_function = ScriptFunction::new(
ModuleId::new(
core_code_address(),
Identifier::new("ModuleUpgradeScripts").unwrap(),
),
Identifier::new("submit_module_upgrade_plan").unwrap(),
vec![stc_type_tag()],
vec![
bcs_ext::to_bytes(alice.address()).unwrap(),
bcs_ext::to_bytes(&0u64).unwrap(),
],
);
dao_vote_test(
&alice,
&chain_state,
&net,
vote_script_function,
dao_action_type_tag,
execute_script_function,
0,
)?;
association_execute_should_success(&net, &chain_state, TransactionPayload::Package(package))?;
assert_eq!(read_foo(&chain_state), 1);
Ok(())
}
fn read_two_phase_upgrade_v2_resource<R>(state_reader: &R) -> Result<bool>
where
R: ChainStateReader,
{
Ok(state_reader
.get_resource::<TwoPhaseUpgradeV2Resource>(genesis_address())?
.map(|tpu| tpu.enforced())
.unwrap_or(false))
}
fn read_foo(state_view: &dyn StateView) -> u8 {
let mut ret = execute_readonly_function(
state_view,
&ModuleId::new(genesis_address(), Identifier::new("M").unwrap()),
&Identifier::new("foo").unwrap(),
vec![],
vec![],
)
.unwrap();
assert_eq!(ret.len(), 1);
ret.pop().unwrap().1.cast().unwrap()
}
| 33.738095 | 98 | 0.606664 |
aced1e4ec4db58b4e850c7b4e7f147de00772e62 | 150 | mod conversion_impls;
mod deserialisation_impls;
mod error;
mod helpers;
mod parse;
use parse::SchemaFromString;
pub use error::SchemaParsingError;
| 15 | 34 | 0.813333 |
75ee162df2477b252cc247796dbe83662d1a6c10 | 8,070 | #[macro_use]
extern crate glium;
use std::io::Cursor;
fn main() {
#[allow(unused_imports)]
use glium::{glutin, Surface};
let event_loop = glutin::event_loop::EventLoop::new();
let wb = glutin::window::WindowBuilder::new();
let cb = glutin::ContextBuilder::new().with_depth_buffer(24);
let display = glium::Display::new(wb, cb, &event_loop).unwrap();
#[derive(Copy, Clone)]
struct Vertex {
position: [f32; 3],
normal: [f32; 3],
tex_coords: [f32; 2],
}
implement_vertex!(Vertex, position, normal, tex_coords);
let shape = glium::vertex::VertexBuffer::new(&display, &[
Vertex { position: [-1.0, 1.0, 0.0], normal: [0.0, 0.0, -1.0], tex_coords: [0.0, 1.0] },
Vertex { position: [ 1.0, 1.0, 0.0], normal: [0.0, 0.0, -1.0], tex_coords: [1.0, 1.0] },
Vertex { position: [-1.0, -1.0, 0.0], normal: [0.0, 0.0, -1.0], tex_coords: [0.0, 0.0] },
Vertex { position: [ 1.0, -1.0, 0.0], normal: [0.0, 0.0, -1.0], tex_coords: [1.0, 0.0] },
]).unwrap();
let image = image::load(Cursor::new(&include_bytes!("../book/tuto-14-diffuse.jpg")[..]),
image::ImageFormat::Jpeg).unwrap().to_rgba8();
let image_dimensions = image.dimensions();
let image = glium::texture::RawImage2d::from_raw_rgba_reversed(&image.into_raw(), image_dimensions);
let diffuse_texture = glium::texture::SrgbTexture2d::new(&display, image).unwrap();
let image = image::load(Cursor::new(&include_bytes!("../book/tuto-14-normal.png")[..]),
image::ImageFormat::Png).unwrap().to_rgba8();
let image_dimensions = image.dimensions();
let image = glium::texture::RawImage2d::from_raw_rgba_reversed(&image.into_raw(), image_dimensions);
let normal_map = glium::texture::Texture2d::new(&display, image).unwrap();
let vertex_shader_src = r#"
#version 150
in vec3 position;
in vec3 normal;
in vec2 tex_coords;
out vec3 v_normal;
out vec3 v_position;
out vec2 v_tex_coords;
uniform mat4 perspective;
uniform mat4 view;
uniform mat4 model;
void main() {
v_tex_coords = tex_coords;
mat4 modelview = view * model;
v_normal = transpose(inverse(mat3(modelview))) * normal;
gl_Position = perspective * modelview * vec4(position, 1.0);
v_position = gl_Position.xyz / gl_Position.w;
}
"#;
let fragment_shader_src = r#"
#version 140
in vec3 v_normal;
in vec3 v_position;
in vec2 v_tex_coords;
out vec4 color;
uniform vec3 u_light;
uniform sampler2D diffuse_tex;
uniform sampler2D normal_tex;
const vec3 specular_color = vec3(1.0, 1.0, 1.0);
mat3 cotangent_frame(vec3 normal, vec3 pos, vec2 uv) {
vec3 dp1 = dFdx(pos);
vec3 dp2 = dFdy(pos);
vec2 duv1 = dFdx(uv);
vec2 duv2 = dFdy(uv);
vec3 dp2perp = cross(dp2, normal);
vec3 dp1perp = cross(normal, dp1);
vec3 T = dp2perp * duv1.x + dp1perp * duv2.x;
vec3 B = dp2perp * duv1.y + dp1perp * duv2.y;
float invmax = inversesqrt(max(dot(T, T), dot(B, B)));
return mat3(T * invmax, B * invmax, normal);
}
void main() {
vec3 diffuse_color = texture(diffuse_tex, v_tex_coords).rgb;
vec3 ambient_color = diffuse_color * 0.1;
vec3 normal_map = texture(normal_tex, v_tex_coords).rgb;
mat3 tbn = cotangent_frame(v_normal, v_position, v_tex_coords);
vec3 real_normal = normalize(tbn * -(normal_map * 2.0 - 1.0));
float diffuse = max(dot(real_normal, normalize(u_light)), 0.0);
vec3 camera_dir = normalize(-v_position);
vec3 half_direction = normalize(normalize(u_light) + camera_dir);
float specular = pow(max(dot(half_direction, real_normal), 0.0), 16.0);
color = vec4(ambient_color + diffuse * diffuse_color + specular * specular_color, 1.0);
}
"#;
let program = glium::Program::from_source(&display, vertex_shader_src, fragment_shader_src,
None).unwrap();
event_loop.run(move |event, _, control_flow| {
let next_frame_time = std::time::Instant::now() +
std::time::Duration::from_nanos(16_666_667);
*control_flow = glutin::event_loop::ControlFlow::WaitUntil(next_frame_time);
match event {
glutin::event::Event::WindowEvent { event, .. } => match event {
glutin::event::WindowEvent::CloseRequested => {
*control_flow = glutin::event_loop::ControlFlow::Exit;
return;
},
_ => return,
},
glutin::event::Event::NewEvents(cause) => match cause {
glutin::event::StartCause::ResumeTimeReached { .. } => (),
glutin::event::StartCause::Init => (),
_ => return,
},
_ => return,
}
let mut target = display.draw();
target.clear_color_and_depth((0.0, 0.0, 1.0, 1.0), 1.0);
let model = [
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0f32]
];
let view = view_matrix(&[0.5, 0.2, -3.0], &[-0.5, -0.2, 3.0], &[0.0, 1.0, 0.0]);
let perspective = {
let (width, height) = target.get_dimensions();
let aspect_ratio = height as f32 / width as f32;
let fov: f32 = 3.141592 / 3.0;
let zfar = 1024.0;
let znear = 0.1;
let f = 1.0 / (fov / 2.0).tan();
[
[f * aspect_ratio , 0.0, 0.0 , 0.0],
[ 0.0 , f , 0.0 , 0.0],
[ 0.0 , 0.0, (zfar+znear)/(zfar-znear) , 1.0],
[ 0.0 , 0.0, -(2.0*zfar*znear)/(zfar-znear), 0.0],
]
};
let light = [1.4, 0.4, 0.7f32];
let params = glium::DrawParameters {
depth: glium::Depth {
test: glium::draw_parameters::DepthTest::IfLess,
write: true,
.. Default::default()
},
.. Default::default()
};
target.draw(&shape, glium::index::NoIndices(glium::index::PrimitiveType::TriangleStrip), &program,
&uniform! { model: model, view: view, perspective: perspective,
u_light: light, diffuse_tex: &diffuse_texture, normal_tex: &normal_map },
¶ms).unwrap();
target.finish().unwrap();
});
}
fn view_matrix(position: &[f32; 3], direction: &[f32; 3], up: &[f32; 3]) -> [[f32; 4]; 4] {
let f = {
let f = direction;
let len = f[0] * f[0] + f[1] * f[1] + f[2] * f[2];
let len = len.sqrt();
[f[0] / len, f[1] / len, f[2] / len]
};
let s = [up[1] * f[2] - up[2] * f[1],
up[2] * f[0] - up[0] * f[2],
up[0] * f[1] - up[1] * f[0]];
let s_norm = {
let len = s[0] * s[0] + s[1] * s[1] + s[2] * s[2];
let len = len.sqrt();
[s[0] / len, s[1] / len, s[2] / len]
};
let u = [f[1] * s_norm[2] - f[2] * s_norm[1],
f[2] * s_norm[0] - f[0] * s_norm[2],
f[0] * s_norm[1] - f[1] * s_norm[0]];
let p = [-position[0] * s_norm[0] - position[1] * s_norm[1] - position[2] * s_norm[2],
-position[0] * u[0] - position[1] * u[1] - position[2] * u[2],
-position[0] * f[0] - position[1] * f[1] - position[2] * f[2]];
[
[s_norm[0], u[0], f[0], 0.0],
[s_norm[1], u[1], f[1], 0.0],
[s_norm[2], u[2], f[2], 0.0],
[p[0], p[1], p[2], 1.0],
]
}
| 36.026786 | 106 | 0.503717 |
ac6137cccd56505b8ae40fe62e5c2cc74ed210bc | 1,042 | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT license OR Apache 2.0
use anyhow::{Context as AnyhowContext, Result};
use tracing::instrument;
#[instrument]
pub async fn certgen_main(output_base_path: String, host_san: String) -> Result<()> {
use std::fs;
use std::path::PathBuf;
let path = PathBuf::from(output_base_path);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).context("Directory creation must succeed for certs")?;
}
let cert =
rcgen::generate_simple_self_signed(vec![host_san]).context("Certificate generation failed")?;
let public_pem = cert.serialize_pem()?;
let private_pem = cert.serialize_private_key_pem();
fs::write(
path.with_file_name(path.file_name().unwrap().to_str().unwrap().to_string() + ".pub.pem"),
&public_pem,
)
.context("Failed writing public key")?;
fs::write(
path.with_file_name(path.file_name().unwrap().to_str().unwrap().to_string() + ".priv.pem"),
&private_pem,
)
.context("Failed writing private key")?;
Ok(())
}
| 34.733333 | 97 | 0.698656 |
230b00c0f8f552767f0c061c10354cb7dc834e96 | 2,296 | use syntax::ast;
use syntax::ext::base;
use syntax::ext::build::AstBuilder;
use syntax::symbol::Symbol;
use syntax::tokenstream;
use std::string::String;
pub fn expand_syntax_ext(
cx: &mut base::ExtCtxt<'_>,
sp: syntax_pos::Span,
tts: &[tokenstream::TokenTree],
) -> Box<dyn base::MacResult + 'static> {
let es = match base::get_exprs_from_tts(cx, sp, tts) {
Some(e) => e,
None => return base::DummyResult::expr(sp),
};
let mut accumulator = String::new();
let mut missing_literal = vec![];
let mut has_errors = false;
for e in es {
match e.node {
ast::ExprKind::Lit(ref lit) => match lit.node {
ast::LitKind::Str(ref s, _)
| ast::LitKind::Err(ref s)
| ast::LitKind::Float(ref s, _)
| ast::LitKind::FloatUnsuffixed(ref s) => {
accumulator.push_str(&s.as_str());
}
ast::LitKind::Char(c) => {
accumulator.push(c);
}
ast::LitKind::Int(i, ast::LitIntType::Unsigned(_))
| ast::LitKind::Int(i, ast::LitIntType::Signed(_))
| ast::LitKind::Int(i, ast::LitIntType::Unsuffixed) => {
accumulator.push_str(&i.to_string());
}
ast::LitKind::Bool(b) => {
accumulator.push_str(&b.to_string());
}
ast::LitKind::Byte(..) | ast::LitKind::ByteStr(..) => {
cx.span_err(e.span, "cannot concatenate a byte string literal");
}
},
ast::ExprKind::Err => {
has_errors = true;
}
_ => {
missing_literal.push(e.span);
}
}
}
if missing_literal.len() > 0 {
let mut err = cx.struct_span_err(missing_literal, "expected a literal");
err.note("only literals (like `\"foo\"`, `42` and `3.14`) can be passed to `concat!()`");
err.emit();
return base::DummyResult::expr(sp);
} else if has_errors {
return base::DummyResult::expr(sp);
}
let sp = sp.apply_mark(cx.current_expansion.mark);
base::MacEager::expr(cx.expr_str(sp, Symbol::intern(&accumulator)))
}
| 35.875 | 97 | 0.508711 |
e653a31725ac1b5036c97098964ecd98d92f6d24 | 8,148 | use diesel::*;
use schema::*;
#[test]
fn association_where_struct_name_doesnt_match_table_name() {
#[derive(PartialEq, Eq, Debug, Clone, Queryable, Identifiable, Associations)]
#[belongs_to(Post)]
#[table_name = "comments"]
struct OtherComment {
id: i32,
post_id: i32,
}
let connection = connection_with_sean_and_tess_in_users_table();
let sean = find_user_by_name("Sean", &connection);
insert_into(posts::table)
.values(&sean.new_post("Hello", None))
.execute(&connection)
.unwrap();
let post = posts::table.first::<Post>(&connection).unwrap();
insert_into(comments::table)
.values(&NewComment(post.id, "comment"))
.execute(&connection)
.unwrap();
let comment_text = OtherComment::belonging_to(&post)
.select(comments::text)
.first::<String>(&connection);
assert_eq!(Ok("comment".into()), comment_text);
}
#[test]
#[cfg(not(any(feature = "sqlite", feature = "mysql")))]
fn association_where_parent_and_child_have_underscores() {
#[derive(PartialEq, Eq, Debug, Clone, Queryable, Identifiable, Associations)]
#[belongs_to(User)]
pub struct SpecialPost {
id: i32,
user_id: i32,
title: String,
}
#[derive(Insertable)]
#[table_name = "special_posts"]
struct NewSpecialPost {
user_id: i32,
title: String,
}
impl SpecialPost {
fn new(user_id: i32, title: &str) -> NewSpecialPost {
NewSpecialPost {
user_id: user_id,
title: title.to_owned(),
}
}
}
#[derive(PartialEq, Eq, Debug, Clone, Queryable, Identifiable, Associations)]
#[belongs_to(SpecialPost)]
struct SpecialComment {
id: i32,
special_post_id: i32,
}
impl SpecialComment {
fn new(special_post_id: i32) -> NewSpecialComment {
NewSpecialComment {
special_post_id: special_post_id,
}
}
}
#[derive(Insertable)]
#[table_name = "special_comments"]
struct NewSpecialComment {
special_post_id: i32,
}
let connection = connection_with_sean_and_tess_in_users_table();
let sean = find_user_by_name("Sean", &connection);
let new_post = SpecialPost::new(sean.id, "title");
let special_post: SpecialPost = insert_into(special_posts::table)
.values(&new_post)
.get_result(&connection)
.unwrap();
let new_comment = SpecialComment::new(special_post.id);
insert_into(special_comments::table)
.values(&new_comment)
.execute(&connection)
.unwrap();
let comment: SpecialComment = SpecialComment::belonging_to(&special_post)
.first(&connection)
.unwrap();
assert_eq!(special_post.id, comment.special_post_id);
}
// This module has no test functions, as it's only to test compilation.
mod associations_can_have_nullable_foreign_keys {
#![allow(dead_code)]
table! {
foos{
id -> Integer,
}
}
table! {
bars {
id -> Integer,
foo_id -> Nullable<Integer>,
}
}
// This test has no assertions, as it is for compilation purposes only.
#[derive(Identifiable)]
pub struct Foo {
id: i32,
}
#[belongs_to(Foo)]
#[derive(Identifiable, Associations)]
pub struct Bar {
id: i32,
foo_id: Option<i32>,
}
}
// This module has no test functions, as it's only to test compilation.
mod multiple_lifetimes_in_insertable_struct_definition {
#![allow(dead_code)]
use schema::posts;
#[derive(Insertable)]
#[table_name = "posts"]
pub struct MyPost<'a> {
title: &'a str,
body: &'a str,
}
}
mod lifetimes_with_names_other_than_a {
#![allow(dead_code)]
use schema::posts;
#[derive(Insertable)]
#[table_name = "posts"]
pub struct MyPost<'a, 'b> {
id: i32,
title: &'b str,
body: &'a str,
}
}
mod insertable_with_cow {
#![allow(dead_code)]
use schema::posts;
use std::borrow::Cow;
#[derive(Insertable)]
#[table_name = "posts"]
pub struct MyPost<'a> {
id: i32,
title: Cow<'a, str>,
body: Cow<'a, str>,
}
}
mod custom_foreign_keys_are_respected_on_belongs_to {
#![allow(dead_code)]
use schema::User;
table! { special_posts { id -> Integer, author_id -> Integer, } }
#[derive(Identifiable, Associations)]
#[belongs_to(User, foreign_key = "author_id")]
pub struct SpecialPost {
id: i32,
author_id: i32,
}
}
mod derive_identifiable_with_lifetime {
#![allow(dead_code)]
use schema::posts;
#[derive(Identifiable)]
pub struct Post<'a> {
id: &'a i32,
}
}
#[test]
fn derive_identifiable_with_non_standard_pk() {
use diesel::associations::*;
#[derive(Identifiable)]
#[table_name = "posts"]
#[primary_key(foo_id)]
#[allow(dead_code)]
struct Foo<'a> {
id: i32,
foo_id: &'a str,
foo: i32,
}
let foo1 = Foo {
id: 1,
foo_id: "hi",
foo: 2,
};
let foo2 = Foo {
id: 2,
foo_id: "there",
foo: 3,
};
assert_eq!(&"hi", foo1.id());
assert_eq!(&"there", foo2.id());
// Fails to compile if wrong table is generated.
let _: posts::table = Foo::<'static>::table();
}
#[test]
fn derive_identifiable_with_composite_pk() {
use diesel::associations::Identifiable;
#[derive(Identifiable)]
#[primary_key(foo_id, bar_id)]
#[table_name = "posts"]
#[allow(dead_code)]
struct Foo {
id: i32,
foo_id: i32,
bar_id: i32,
foo: i32,
}
let foo1 = Foo {
id: 1,
foo_id: 2,
bar_id: 3,
foo: 4,
};
let foo2 = Foo {
id: 5,
foo_id: 6,
bar_id: 7,
foo: 8,
};
assert_eq!((&2, &3), foo1.id());
assert_eq!((&6, &7), foo2.id());
}
#[test]
fn derive_insertable_with_option_for_not_null_field_with_default() {
#[derive(Insertable)]
#[table_name = "users"]
struct NewUser {
id: Option<i32>,
name: &'static str,
}
let conn = connection();
let data = vec![
NewUser {
id: None,
name: "Jim",
},
NewUser {
id: Some(123),
name: "Bob",
},
];
assert_eq!(
Ok(2),
insert_into(users::table).values(&data).execute(&conn)
);
let users = users::table.load::<User>(&conn).unwrap();
let jim = users.iter().find(|u| u.name == "Jim");
let bob = users.iter().find(|u| u.name == "Bob");
assert!(jim.is_some());
assert_eq!(Some(&User::new(123, "Bob")), bob);
}
#[test]
#[cfg(feature = "postgres")]
fn derive_insertable_with_field_that_cannot_convert_expression_to_nullable() {
use diesel::sql_types::{Serial, Text};
sql_function!(nextval, nextval_t, (a: Text) -> Serial);
#[derive(Insertable)]
#[table_name = "users"]
struct NewUser {
id: nextval<&'static str>,
name: &'static str,
}
let conn = connection();
let data = NewUser {
id: nextval("users_id_seq"),
name: "Jim",
};
assert_eq!(
Ok(1),
insert_into(users::table).values(&data).execute(&conn)
);
let users = users::table.load::<User>(&conn).unwrap();
let jim = users.iter().find(|u| u.name == "Jim");
assert!(jim.is_some());
}
#[test]
fn nested_queryable_derives() {
#[derive(Queryable, Debug, PartialEq)]
struct UserAndPost {
user: User,
post: Post,
}
let conn = connection_with_sean_and_tess_in_users_table();
let sean = find_user_by_name("Sean", &conn);
insert_into(posts::table)
.values(&sean.new_post("Hi", None))
.execute(&conn)
.unwrap();
let post = posts::table.first(&conn).unwrap();
let expected = UserAndPost { user: sean, post };
let actual = users::table.inner_join(posts::table).get_result(&conn);
assert_eq!(Ok(expected), actual);
}
| 23.964706 | 81 | 0.578056 |
8adb1fc3391592d303466f0d237ef71550255807 | 847 | use std::ops::{Deref, DerefMut};
use crate::imgui::Sdl2ImguiManager;
// For now just wrap the input helper that skulpin provides
pub struct Sdl2ImguiManagerResource {
pub sdl2_imgui_manager: Sdl2ImguiManager,
}
impl Sdl2ImguiManagerResource {
/// Create a new TimeState. Default is not allowed because the current time affects the object
#[allow(clippy::new_without_default)]
pub fn new(sdl2_imgui_manager: Sdl2ImguiManager) -> Self {
Sdl2ImguiManagerResource { sdl2_imgui_manager }
}
}
impl Deref for Sdl2ImguiManagerResource {
type Target = Sdl2ImguiManager;
#[inline]
fn deref(&self) -> &Self::Target {
&self.sdl2_imgui_manager
}
}
impl DerefMut for Sdl2ImguiManagerResource {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.sdl2_imgui_manager
}
}
| 26.46875 | 98 | 0.709563 |
f43f3c49f08ef432a51c603086af85d280e5d45f | 766 | use tracing::Level;
use tracing_subscriber::FmtSubscriber;
mod args;
mod cycle;
mod destination;
mod devices;
mod flash;
mod list;
mod parity;
mod plan;
mod range;
mod record;
mod run;
mod scan;
mod serial;
mod slot;
mod status;
mod telegram;
fn main() -> Result<(), String> {
// a builder for `FmtSubscriber`.
let subscriber = FmtSubscriber::builder()
// all spans/events with a level higher than TRACE (e.g, debug, info, warn, etc.)
// will be written to stdout.
.with_max_level(Level::TRACE)
// completes the builder.
.finish();
tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed");
let args: args::TopLevel = argh::from_env();
run::run(args.invocation)
}
| 21.885714 | 100 | 0.677546 |
f75694118c90f34f344ba953413de4c824e20f6c | 3,041 | #[test]
fn percent_absolute_position() {
let layout = stretch::node::Node::new(
stretch::style::Style {
flex_direction: stretch::style::FlexDirection::Column,
size: stretch::geometry::Size {
width: stretch::style::Dimension::Points(60f32),
height: stretch::style::Dimension::Points(50f32),
..Default::default()
},
..Default::default()
},
vec![&stretch::node::Node::new(
stretch::style::Style {
position_type: stretch::style::PositionType::Absolute,
size: stretch::geometry::Size {
width: stretch::style::Dimension::Percent(1f32),
height: stretch::style::Dimension::Points(50f32),
..Default::default()
},
position: stretch::geometry::Rect {
start: stretch::style::Dimension::Percent(0.5f32),
..Default::default()
},
..Default::default()
},
vec![
&stretch::node::Node::new(
stretch::style::Style {
size: stretch::geometry::Size {
width: stretch::style::Dimension::Percent(1f32),
..Default::default()
},
..Default::default()
},
vec![],
),
&stretch::node::Node::new(
stretch::style::Style {
size: stretch::geometry::Size {
width: stretch::style::Dimension::Percent(1f32),
..Default::default()
},
..Default::default()
},
vec![],
),
],
)],
)
.compute_layout(stretch::geometry::Size::undefined())
.unwrap();
assert_eq!(layout.size.width, 60f32);
assert_eq!(layout.size.height, 50f32);
assert_eq!(layout.location.x, 0f32);
assert_eq!(layout.location.y, 0f32);
assert_eq!(layout.children[0usize].size.width, 60f32);
assert_eq!(layout.children[0usize].size.height, 50f32);
assert_eq!(layout.children[0usize].location.x, 30f32);
assert_eq!(layout.children[0usize].location.y, 0f32);
assert_eq!(layout.children[0usize].children[0usize].size.width, 30f32);
assert_eq!(layout.children[0usize].children[0usize].size.height, 50f32);
assert_eq!(layout.children[0usize].children[0usize].location.x, 0f32);
assert_eq!(layout.children[0usize].children[0usize].location.y, 0f32);
assert_eq!(layout.children[0usize].children[1usize].size.width, 30f32);
assert_eq!(layout.children[0usize].children[1usize].size.height, 50f32);
assert_eq!(layout.children[0usize].children[1usize].location.x, 30f32);
assert_eq!(layout.children[0usize].children[1usize].location.y, 0f32);
}
| 43.442857 | 76 | 0.519237 |
08c2986eccfb42b77a359b9ea151dd8aa17fd70a | 11,596 | // Generated from definition io.k8s.api.core.v1.SecurityContext
/// SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct SecurityContext {
/// AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
pub allow_privilege_escalation: Option<bool>,
/// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
pub capabilities: Option<crate::api::core::v1::Capabilities>,
/// Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
pub privileged: Option<bool>,
/// procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
pub proc_mount: Option<String>,
/// Whether this container has a read-only root filesystem. Default is false.
pub read_only_root_filesystem: Option<bool>,
/// The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
pub run_as_group: Option<i64>,
/// Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
pub run_as_non_root: Option<bool>,
/// The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
pub run_as_user: Option<i64>,
/// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
pub se_linux_options: Option<crate::api::core::v1::SELinuxOptions>,
/// Windows security options.
pub windows_options: Option<crate::api::core::v1::WindowsSecurityContextOptions>,
}
impl<'de> serde::Deserialize<'de> for SecurityContext {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_allow_privilege_escalation,
Key_capabilities,
Key_privileged,
Key_proc_mount,
Key_read_only_root_filesystem,
Key_run_as_group,
Key_run_as_non_root,
Key_run_as_user,
Key_se_linux_options,
Key_windows_options,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"allowPrivilegeEscalation" => Field::Key_allow_privilege_escalation,
"capabilities" => Field::Key_capabilities,
"privileged" => Field::Key_privileged,
"procMount" => Field::Key_proc_mount,
"readOnlyRootFilesystem" => Field::Key_read_only_root_filesystem,
"runAsGroup" => Field::Key_run_as_group,
"runAsNonRoot" => Field::Key_run_as_non_root,
"runAsUser" => Field::Key_run_as_user,
"seLinuxOptions" => Field::Key_se_linux_options,
"windowsOptions" => Field::Key_windows_options,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = SecurityContext;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("SecurityContext")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_allow_privilege_escalation: Option<bool> = None;
let mut value_capabilities: Option<crate::api::core::v1::Capabilities> = None;
let mut value_privileged: Option<bool> = None;
let mut value_proc_mount: Option<String> = None;
let mut value_read_only_root_filesystem: Option<bool> = None;
let mut value_run_as_group: Option<i64> = None;
let mut value_run_as_non_root: Option<bool> = None;
let mut value_run_as_user: Option<i64> = None;
let mut value_se_linux_options: Option<crate::api::core::v1::SELinuxOptions> = None;
let mut value_windows_options: Option<crate::api::core::v1::WindowsSecurityContextOptions> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_allow_privilege_escalation => value_allow_privilege_escalation = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_capabilities => value_capabilities = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_privileged => value_privileged = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_proc_mount => value_proc_mount = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_read_only_root_filesystem => value_read_only_root_filesystem = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_run_as_group => value_run_as_group = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_run_as_non_root => value_run_as_non_root = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_run_as_user => value_run_as_user = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_se_linux_options => value_se_linux_options = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_windows_options => value_windows_options = serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(SecurityContext {
allow_privilege_escalation: value_allow_privilege_escalation,
capabilities: value_capabilities,
privileged: value_privileged,
proc_mount: value_proc_mount,
read_only_root_filesystem: value_read_only_root_filesystem,
run_as_group: value_run_as_group,
run_as_non_root: value_run_as_non_root,
run_as_user: value_run_as_user,
se_linux_options: value_se_linux_options,
windows_options: value_windows_options,
})
}
}
deserializer.deserialize_struct(
"SecurityContext",
&[
"allowPrivilegeEscalation",
"capabilities",
"privileged",
"procMount",
"readOnlyRootFilesystem",
"runAsGroup",
"runAsNonRoot",
"runAsUser",
"seLinuxOptions",
"windowsOptions",
],
Visitor,
)
}
}
impl serde::Serialize for SecurityContext {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"SecurityContext",
self.allow_privilege_escalation.as_ref().map_or(0, |_| 1) +
self.capabilities.as_ref().map_or(0, |_| 1) +
self.privileged.as_ref().map_or(0, |_| 1) +
self.proc_mount.as_ref().map_or(0, |_| 1) +
self.read_only_root_filesystem.as_ref().map_or(0, |_| 1) +
self.run_as_group.as_ref().map_or(0, |_| 1) +
self.run_as_non_root.as_ref().map_or(0, |_| 1) +
self.run_as_user.as_ref().map_or(0, |_| 1) +
self.se_linux_options.as_ref().map_or(0, |_| 1) +
self.windows_options.as_ref().map_or(0, |_| 1),
)?;
if let Some(value) = &self.allow_privilege_escalation {
serde::ser::SerializeStruct::serialize_field(&mut state, "allowPrivilegeEscalation", value)?;
}
if let Some(value) = &self.capabilities {
serde::ser::SerializeStruct::serialize_field(&mut state, "capabilities", value)?;
}
if let Some(value) = &self.privileged {
serde::ser::SerializeStruct::serialize_field(&mut state, "privileged", value)?;
}
if let Some(value) = &self.proc_mount {
serde::ser::SerializeStruct::serialize_field(&mut state, "procMount", value)?;
}
if let Some(value) = &self.read_only_root_filesystem {
serde::ser::SerializeStruct::serialize_field(&mut state, "readOnlyRootFilesystem", value)?;
}
if let Some(value) = &self.run_as_group {
serde::ser::SerializeStruct::serialize_field(&mut state, "runAsGroup", value)?;
}
if let Some(value) = &self.run_as_non_root {
serde::ser::SerializeStruct::serialize_field(&mut state, "runAsNonRoot", value)?;
}
if let Some(value) = &self.run_as_user {
serde::ser::SerializeStruct::serialize_field(&mut state, "runAsUser", value)?;
}
if let Some(value) = &self.se_linux_options {
serde::ser::SerializeStruct::serialize_field(&mut state, "seLinuxOptions", value)?;
}
if let Some(value) = &self.windows_options {
serde::ser::SerializeStruct::serialize_field(&mut state, "windowsOptions", value)?;
}
serde::ser::SerializeStruct::end(state)
}
}
| 56.565854 | 421 | 0.617109 |
fbdaab06b48955229a780863e11cac33d9c2c8c4 | 4,656 | // This file is part of Substrate.
// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Genesis config definition.
use super::super::{DeclStorageDefExt, StorageLineTypeDef};
use frame_support_procedural_tools::syn_ext as ext;
use proc_macro2::TokenStream;
use quote::quote;
use syn::{parse_quote, spanned::Spanned};
pub struct GenesisConfigFieldDef {
pub name: syn::Ident,
pub typ: syn::Type,
pub attrs: Vec<syn::Meta>,
pub default: TokenStream,
}
pub struct GenesisConfigDef {
pub is_generic: bool,
pub fields: Vec<GenesisConfigFieldDef>,
/// For example: `<T: Config<I>, I: Instance=DefaultInstance>`.
pub genesis_struct_decl: TokenStream,
/// For example: `<T, I>`.
pub genesis_struct: TokenStream,
/// For example: `<T: Config<I>, I: Instance>`.
pub genesis_impl: TokenStream,
/// The where clause to use to constrain generics if genesis config is generic.
pub genesis_where_clause: Option<syn::WhereClause>,
}
impl GenesisConfigDef {
pub fn from_def(def: &DeclStorageDefExt) -> syn::Result<Self> {
let fields = Self::get_genesis_config_field_defs(def)?;
let is_generic = fields
.iter()
.any(|field| ext::type_contains_ident(&field.typ, &def.module_runtime_generic));
let (genesis_struct_decl, genesis_impl, genesis_struct, genesis_where_clause) =
if is_generic {
let runtime_generic = &def.module_runtime_generic;
let runtime_trait = &def.module_runtime_trait;
let optional_instance = &def.optional_instance;
let optional_instance_bound = &def.optional_instance_bound;
let optional_instance_bound_optional_default =
&def.optional_instance_bound_optional_default;
let where_clause = &def.where_clause;
(
quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound_optional_default>),
quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound>),
quote!(<#runtime_generic, #optional_instance>),
where_clause.clone(),
)
} else {
(quote!(), quote!(), quote!(), None)
};
Ok(Self {
is_generic,
fields,
genesis_struct_decl,
genesis_struct,
genesis_impl,
genesis_where_clause,
})
}
fn get_genesis_config_field_defs(
def: &DeclStorageDefExt,
) -> syn::Result<Vec<GenesisConfigFieldDef>> {
let mut config_field_defs = Vec::new();
for (config_field, line) in def.storage_lines.iter().filter_map(|line| {
line.config.as_ref().map(|config_field| (config_field.clone(), line))
}) {
let value_type = &line.value_type;
let typ = match &line.storage_type {
StorageLineTypeDef::Simple(_) => (*value_type).clone(),
StorageLineTypeDef::Map(map) => {
let key = &map.key;
parse_quote!( Vec<(#key, #value_type)> )
},
StorageLineTypeDef::DoubleMap(map) => {
let key1 = &map.key1;
let key2 = &map.key2;
parse_quote!( Vec<(#key1, #key2, #value_type)> )
},
StorageLineTypeDef::NMap(map) => {
let key_tuple = map.to_key_tuple();
parse_quote!( Vec<(#key_tuple, #value_type)> )
},
};
let default =
line.default_value
.as_ref()
.map(|d| {
if line.is_option {
quote!( #d.unwrap_or_default() )
} else {
quote!( #d )
}
})
.unwrap_or_else(|| quote!(Default::default()));
config_field_defs.push(GenesisConfigFieldDef {
name: config_field,
typ,
attrs: line.doc_attrs.clone(),
default,
});
}
for line in &def.extra_genesis_config_lines {
let attrs = line
.attrs
.iter()
.map(|attr| {
let meta = attr.parse_meta()?;
if meta.path().is_ident("cfg") {
return Err(syn::Error::new(
meta.span(),
"extra genesis config items do not support `cfg` attribute",
))
}
Ok(meta)
})
.collect::<syn::Result<_>>()?;
let default = line
.default
.as_ref()
.map(|e| quote!( #e ))
.unwrap_or_else(|| quote!(Default::default()));
config_field_defs.push(GenesisConfigFieldDef {
name: line.name.clone(),
typ: line.typ.clone(),
attrs,
default,
});
}
Ok(config_field_defs)
}
}
| 28.390244 | 91 | 0.675043 |
14cb549c061b8779aed2ec30f3bf32aa1c6e5640 | 968 | use crate::crd::SecretGenerator;
use kube::api::{Patch, PatchParams};
use kube::{Api, Client, Error};
use serde_json::{json, Value};
pub async fn add(client: Client, name: &str, namespace: &str) -> Result<SecretGenerator, Error> {
let api: Api<SecretGenerator> = Api::namespaced(client, namespace);
let finalizer: Value = json!({
"metadata": {
"finalizers": ["secretgenerators.locmai.dev"],
},
});
let patch: Patch<&Value> = Patch::Merge(&finalizer);
Ok(api.patch(name, &PatchParams::default(), &patch).await?)
}
pub async fn delete(client: Client, name: &str, namespace: &str) -> Result<SecretGenerator, Error> {
let api: Api<SecretGenerator> = Api::namespaced(client, namespace);
let finalizer: Value = json!({
"metadata": {
"finalizers": null
}
});
let patch: Patch<&Value> = Patch::Merge(&finalizer);
Ok(api.patch(name, &PatchParams::default(), &patch).await?)
}
| 32.266667 | 100 | 0.627066 |
751912b91c9d5ccf954ca09e8fbf735f470e32d5 | 1,330 | #![cfg(test)]
use cosmwasm_std::testing::{MockApi, MockQuerier, MockStorage};
use cosmwasm_std::{Api, Binary, Extern, HumanAddr, StdResult};
use cosmwasm_std::{CanonicalAddr, Coin};
#[derive(Copy, Clone)]
pub struct MyMockApi {
pub original_mock: MockApi,
}
impl MyMockApi {
pub fn new() -> MyMockApi {
MyMockApi {
original_mock: MockApi::new(20),
}
}
}
impl Api for MyMockApi {
fn canonical_address(&self, human: &HumanAddr) -> StdResult<CanonicalAddr> {
self.original_mock.canonical_address(human)
}
fn human_address(&self, canonical: &CanonicalAddr) -> StdResult<HumanAddr> {
// CanonicalAddr(Binary(hasher.finalize().to_vec()))
let hashbytes = &canonical.0 .0;
let mut stringbytes = base64::encode(hashbytes).as_bytes().to_vec();
stringbytes.truncate(20);
let canonical_string = CanonicalAddr(Binary(stringbytes));
self.original_mock.human_address(&canonical_string)
}
}
pub fn my_mock_dependencies(
contract_balance: &[Coin],
) -> Extern<MockStorage, MyMockApi, MockQuerier> {
let contract_addr = HumanAddr::from("cosmos2contract");
Extern {
storage: MockStorage::default(),
api: MyMockApi::new(),
querier: MockQuerier::new(&[(&contract_addr, contract_balance)]),
}
}
| 30.227273 | 80 | 0.666165 |
149e9701ecf9500e29bd5b9939e48844b6f536d1 | 120,229 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(clippy::unnecessary_wraps)]
pub fn parse_batch_get_channel_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::BatchGetChannelOutput, crate::error::BatchGetChannelError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::BatchGetChannelError::unhandled)?;
Err(crate::error::BatchGetChannelError::generic(generic))
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_batch_get_channel_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::BatchGetChannelOutput, crate::error::BatchGetChannelError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::batch_get_channel_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_batch_get_channel(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchGetChannelError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_batch_get_stream_key_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::BatchGetStreamKeyOutput, crate::error::BatchGetStreamKeyError>
{
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::BatchGetStreamKeyError::unhandled)?;
Err(crate::error::BatchGetStreamKeyError::generic(generic))
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_batch_get_stream_key_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::BatchGetStreamKeyOutput, crate::error::BatchGetStreamKeyError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::batch_get_stream_key_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_batch_get_stream_key(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchGetStreamKeyError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_channel_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::CreateChannelOutput, crate::error::CreateChannelError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::CreateChannelError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::CreateChannelError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::CreateChannelError {
meta: generic,
kind: crate::error::CreateChannelErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PendingVerification" => crate::error::CreateChannelError {
meta: generic,
kind: crate::error::CreateChannelErrorKind::PendingVerification({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::pending_verification::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_pending_verification_json_err(response.body().as_ref(), output).map_err(crate::error::CreateChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::CreateChannelError {
meta: generic,
kind: crate::error::CreateChannelErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ServiceQuotaExceededException" => crate::error::CreateChannelError {
meta: generic,
kind: crate::error::CreateChannelErrorKind::ServiceQuotaExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_quota_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_service_quota_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::CreateChannelError {
meta: generic,
kind: crate::error::CreateChannelErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::CreateChannelError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_channel_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::CreateChannelOutput, crate::error::CreateChannelError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::create_channel_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_create_channel(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateChannelError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_recording_configuration_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::CreateRecordingConfigurationOutput,
crate::error::CreateRecordingConfigurationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::CreateRecordingConfigurationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::CreateRecordingConfigurationError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::CreateRecordingConfigurationError {
meta: generic,
kind: crate::error::CreateRecordingConfigurationErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::CreateRecordingConfigurationError {
meta: generic,
kind: crate::error::CreateRecordingConfigurationErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalServerException" => crate::error::CreateRecordingConfigurationError {
meta: generic,
kind: crate::error::CreateRecordingConfigurationErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PendingVerification" => crate::error::CreateRecordingConfigurationError {
meta: generic,
kind: crate::error::CreateRecordingConfigurationErrorKind::PendingVerification({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::pending_verification::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_pending_verification_json_err(response.body().as_ref(), output).map_err(crate::error::CreateRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ServiceQuotaExceededException" => crate::error::CreateRecordingConfigurationError {
meta: generic,
kind:
crate::error::CreateRecordingConfigurationErrorKind::ServiceQuotaExceededException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_quota_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_service_quota_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
},
"ValidationException" => crate::error::CreateRecordingConfigurationError {
meta: generic,
kind: crate::error::CreateRecordingConfigurationErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::CreateRecordingConfigurationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_recording_configuration_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::CreateRecordingConfigurationOutput,
crate::error::CreateRecordingConfigurationError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::create_recording_configuration_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_create_recording_configuration(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateRecordingConfigurationError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_stream_key_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::CreateStreamKeyOutput, crate::error::CreateStreamKeyError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::CreateStreamKeyError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::CreateStreamKeyError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::CreateStreamKeyError {
meta: generic,
kind: crate::error::CreateStreamKeyErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateStreamKeyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PendingVerification" => crate::error::CreateStreamKeyError {
meta: generic,
kind: crate::error::CreateStreamKeyErrorKind::PendingVerification({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::pending_verification::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_pending_verification_json_err(response.body().as_ref(), output).map_err(crate::error::CreateStreamKeyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::CreateStreamKeyError {
meta: generic,
kind: crate::error::CreateStreamKeyErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateStreamKeyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ServiceQuotaExceededException" => crate::error::CreateStreamKeyError {
meta: generic,
kind: crate::error::CreateStreamKeyErrorKind::ServiceQuotaExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_quota_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_service_quota_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateStreamKeyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::CreateStreamKeyError {
meta: generic,
kind: crate::error::CreateStreamKeyErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateStreamKeyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::CreateStreamKeyError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_stream_key_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::CreateStreamKeyOutput, crate::error::CreateStreamKeyError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::create_stream_key_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_create_stream_key(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateStreamKeyError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_channel_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteChannelOutput, crate::error::DeleteChannelError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeleteChannelError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteChannelError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DeleteChannelError {
meta: generic,
kind: crate::error::DeleteChannelErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::DeleteChannelError {
meta: generic,
kind: crate::error::DeleteChannelErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"PendingVerification" => crate::error::DeleteChannelError {
meta: generic,
kind: crate::error::DeleteChannelErrorKind::PendingVerification({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::pending_verification::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_pending_verification_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::DeleteChannelError {
meta: generic,
kind: crate::error::DeleteChannelErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::DeleteChannelError {
meta: generic,
kind: crate::error::DeleteChannelErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteChannelError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_channel_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteChannelOutput, crate::error::DeleteChannelError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_channel_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_playback_key_pair_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeletePlaybackKeyPairOutput,
crate::error::DeletePlaybackKeyPairError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeletePlaybackKeyPairError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeletePlaybackKeyPairError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DeletePlaybackKeyPairError {
meta: generic,
kind: crate::error::DeletePlaybackKeyPairErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeletePlaybackKeyPairError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PendingVerification" => crate::error::DeletePlaybackKeyPairError {
meta: generic,
kind: crate::error::DeletePlaybackKeyPairErrorKind::PendingVerification({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::pending_verification::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_pending_verification_json_err(response.body().as_ref(), output).map_err(crate::error::DeletePlaybackKeyPairError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::DeletePlaybackKeyPairError {
meta: generic,
kind: crate::error::DeletePlaybackKeyPairErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeletePlaybackKeyPairError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::DeletePlaybackKeyPairError {
meta: generic,
kind: crate::error::DeletePlaybackKeyPairErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeletePlaybackKeyPairError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeletePlaybackKeyPairError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_playback_key_pair_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeletePlaybackKeyPairOutput,
crate::error::DeletePlaybackKeyPairError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_playback_key_pair_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_recording_configuration_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeleteRecordingConfigurationOutput,
crate::error::DeleteRecordingConfigurationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeleteRecordingConfigurationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DeleteRecordingConfigurationError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DeleteRecordingConfigurationError {
meta: generic,
kind: crate::error::DeleteRecordingConfigurationErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::DeleteRecordingConfigurationError {
meta: generic,
kind: crate::error::DeleteRecordingConfigurationErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalServerException" => crate::error::DeleteRecordingConfigurationError {
meta: generic,
kind: crate::error::DeleteRecordingConfigurationErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::DeleteRecordingConfigurationError {
meta: generic,
kind: crate::error::DeleteRecordingConfigurationErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::DeleteRecordingConfigurationError {
meta: generic,
kind: crate::error::DeleteRecordingConfigurationErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteRecordingConfigurationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_recording_configuration_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeleteRecordingConfigurationOutput,
crate::error::DeleteRecordingConfigurationError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_recording_configuration_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_stream_key_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteStreamKeyOutput, crate::error::DeleteStreamKeyError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeleteStreamKeyError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteStreamKeyError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DeleteStreamKeyError {
meta: generic,
kind: crate::error::DeleteStreamKeyErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteStreamKeyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PendingVerification" => crate::error::DeleteStreamKeyError {
meta: generic,
kind: crate::error::DeleteStreamKeyErrorKind::PendingVerification({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::pending_verification::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_pending_verification_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteStreamKeyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::DeleteStreamKeyError {
meta: generic,
kind: crate::error::DeleteStreamKeyErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteStreamKeyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::DeleteStreamKeyError {
meta: generic,
kind: crate::error::DeleteStreamKeyErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteStreamKeyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteStreamKeyError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_stream_key_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteStreamKeyOutput, crate::error::DeleteStreamKeyError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_stream_key_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_channel_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetChannelOutput, crate::error::GetChannelError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetChannelError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetChannelError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::GetChannelError {
meta: generic,
kind: crate::error::GetChannelErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::GetChannelError {
meta: generic,
kind: crate::error::GetChannelErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::GetChannelError {
meta: generic,
kind: crate::error::GetChannelErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetChannelError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_channel_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetChannelOutput, crate::error::GetChannelError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_channel_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_channel(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetChannelError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_playback_key_pair_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetPlaybackKeyPairOutput,
crate::error::GetPlaybackKeyPairError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetPlaybackKeyPairError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetPlaybackKeyPairError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::GetPlaybackKeyPairError {
meta: generic,
kind: crate::error::GetPlaybackKeyPairErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetPlaybackKeyPairError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::GetPlaybackKeyPairError {
meta: generic,
kind: crate::error::GetPlaybackKeyPairErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetPlaybackKeyPairError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::GetPlaybackKeyPairError {
meta: generic,
kind: crate::error::GetPlaybackKeyPairErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetPlaybackKeyPairError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetPlaybackKeyPairError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_playback_key_pair_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetPlaybackKeyPairOutput,
crate::error::GetPlaybackKeyPairError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_playback_key_pair_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_playback_key_pair(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetPlaybackKeyPairError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_recording_configuration_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetRecordingConfigurationOutput,
crate::error::GetRecordingConfigurationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetRecordingConfigurationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::GetRecordingConfigurationError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::GetRecordingConfigurationError {
meta: generic,
kind: crate::error::GetRecordingConfigurationErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::GetRecordingConfigurationError {
meta: generic,
kind: crate::error::GetRecordingConfigurationErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::GetRecordingConfigurationError {
meta: generic,
kind: crate::error::GetRecordingConfigurationErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::GetRecordingConfigurationError {
meta: generic,
kind: crate::error::GetRecordingConfigurationErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetRecordingConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetRecordingConfigurationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_recording_configuration_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetRecordingConfigurationOutput,
crate::error::GetRecordingConfigurationError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_recording_configuration_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_recording_configuration(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetRecordingConfigurationError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_stream_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetStreamOutput, crate::error::GetStreamError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetStreamError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetStreamError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::GetStreamError {
meta: generic,
kind: crate::error::GetStreamErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ChannelNotBroadcasting" => crate::error::GetStreamError {
meta: generic,
kind: crate::error::GetStreamErrorKind::ChannelNotBroadcasting({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::channel_not_broadcasting::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_channel_not_broadcasting_json_err(response.body().as_ref(), output).map_err(crate::error::GetStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::GetStreamError {
meta: generic,
kind: crate::error::GetStreamErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::GetStreamError {
meta: generic,
kind: crate::error::GetStreamErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetStreamError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_stream_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetStreamOutput, crate::error::GetStreamError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_stream_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_stream(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetStreamError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_stream_key_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetStreamKeyOutput, crate::error::GetStreamKeyError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetStreamKeyError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetStreamKeyError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::GetStreamKeyError {
meta: generic,
kind: crate::error::GetStreamKeyErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetStreamKeyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::GetStreamKeyError {
meta: generic,
kind: crate::error::GetStreamKeyErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetStreamKeyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::GetStreamKeyError {
meta: generic,
kind: crate::error::GetStreamKeyErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetStreamKeyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetStreamKeyError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_stream_key_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetStreamKeyOutput, crate::error::GetStreamKeyError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_stream_key_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_stream_key(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetStreamKeyError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_stream_session_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetStreamSessionOutput, crate::error::GetStreamSessionError>
{
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetStreamSessionError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetStreamSessionError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::GetStreamSessionError {
meta: generic,
kind: crate::error::GetStreamSessionErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetStreamSessionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::GetStreamSessionError {
meta: generic,
kind: crate::error::GetStreamSessionErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetStreamSessionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::GetStreamSessionError {
meta: generic,
kind: crate::error::GetStreamSessionErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetStreamSessionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetStreamSessionError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_stream_session_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetStreamSessionOutput, crate::error::GetStreamSessionError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_stream_session_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_stream_session(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetStreamSessionError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_import_playback_key_pair_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ImportPlaybackKeyPairOutput,
crate::error::ImportPlaybackKeyPairError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ImportPlaybackKeyPairError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ImportPlaybackKeyPairError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ImportPlaybackKeyPairError {
meta: generic,
kind: crate::error::ImportPlaybackKeyPairErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ImportPlaybackKeyPairError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::ImportPlaybackKeyPairError {
meta: generic,
kind: crate::error::ImportPlaybackKeyPairErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ImportPlaybackKeyPairError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"PendingVerification" => crate::error::ImportPlaybackKeyPairError {
meta: generic,
kind: crate::error::ImportPlaybackKeyPairErrorKind::PendingVerification({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::pending_verification::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_pending_verification_json_err(response.body().as_ref(), output).map_err(crate::error::ImportPlaybackKeyPairError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ServiceQuotaExceededException" => crate::error::ImportPlaybackKeyPairError {
meta: generic,
kind: crate::error::ImportPlaybackKeyPairErrorKind::ServiceQuotaExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_quota_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_service_quota_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ImportPlaybackKeyPairError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ImportPlaybackKeyPairError {
meta: generic,
kind: crate::error::ImportPlaybackKeyPairErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ImportPlaybackKeyPairError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ImportPlaybackKeyPairError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_import_playback_key_pair_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ImportPlaybackKeyPairOutput,
crate::error::ImportPlaybackKeyPairError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::import_playback_key_pair_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_import_playback_key_pair(
response.body().as_ref(),
output,
)
.map_err(crate::error::ImportPlaybackKeyPairError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_channels_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListChannelsOutput, crate::error::ListChannelsError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListChannelsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListChannelsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListChannelsError {
meta: generic,
kind: crate::error::ListChannelsErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListChannelsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::ListChannelsError {
meta: generic,
kind: crate::error::ListChannelsErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListChannelsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ValidationException" => crate::error::ListChannelsError {
meta: generic,
kind: crate::error::ListChannelsErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListChannelsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListChannelsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_channels_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListChannelsOutput, crate::error::ListChannelsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_channels_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_list_channels(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListChannelsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_playback_key_pairs_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListPlaybackKeyPairsOutput,
crate::error::ListPlaybackKeyPairsError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListPlaybackKeyPairsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListPlaybackKeyPairsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListPlaybackKeyPairsError {
meta: generic,
kind: crate::error::ListPlaybackKeyPairsErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListPlaybackKeyPairsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ListPlaybackKeyPairsError {
meta: generic,
kind: crate::error::ListPlaybackKeyPairsErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListPlaybackKeyPairsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListPlaybackKeyPairsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_playback_key_pairs_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListPlaybackKeyPairsOutput,
crate::error::ListPlaybackKeyPairsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_playback_key_pairs_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_list_playback_key_pairs(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListPlaybackKeyPairsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_recording_configurations_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListRecordingConfigurationsOutput,
crate::error::ListRecordingConfigurationsError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListRecordingConfigurationsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::ListRecordingConfigurationsError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListRecordingConfigurationsError {
meta: generic,
kind: crate::error::ListRecordingConfigurationsErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListRecordingConfigurationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::ListRecordingConfigurationsError {
meta: generic,
kind: crate::error::ListRecordingConfigurationsErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListRecordingConfigurationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ListRecordingConfigurationsError {
meta: generic,
kind: crate::error::ListRecordingConfigurationsErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListRecordingConfigurationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListRecordingConfigurationsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_recording_configurations_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListRecordingConfigurationsOutput,
crate::error::ListRecordingConfigurationsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_recording_configurations_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_list_recording_configurations(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListRecordingConfigurationsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_stream_keys_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListStreamKeysOutput, crate::error::ListStreamKeysError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListStreamKeysError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListStreamKeysError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListStreamKeysError {
meta: generic,
kind: crate::error::ListStreamKeysErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListStreamKeysError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::ListStreamKeysError {
meta: generic,
kind: crate::error::ListStreamKeysErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListStreamKeysError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ListStreamKeysError {
meta: generic,
kind: crate::error::ListStreamKeysErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListStreamKeysError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListStreamKeysError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_stream_keys_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListStreamKeysOutput, crate::error::ListStreamKeysError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_stream_keys_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_list_stream_keys(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListStreamKeysError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_streams_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListStreamsOutput, crate::error::ListStreamsError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListStreamsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListStreamsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListStreamsError {
meta: generic,
kind: crate::error::ListStreamsErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListStreamsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ListStreamsError {
meta: generic,
kind: crate::error::ListStreamsErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListStreamsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListStreamsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_streams_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListStreamsOutput, crate::error::ListStreamsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_streams_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_list_streams(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListStreamsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_stream_sessions_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListStreamSessionsOutput,
crate::error::ListStreamSessionsError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListStreamSessionsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListStreamSessionsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListStreamSessionsError {
meta: generic,
kind: crate::error::ListStreamSessionsErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListStreamSessionsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::ListStreamSessionsError {
meta: generic,
kind: crate::error::ListStreamSessionsErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListStreamSessionsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ListStreamSessionsError {
meta: generic,
kind: crate::error::ListStreamSessionsErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListStreamSessionsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListStreamSessionsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_stream_sessions_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListStreamSessionsOutput,
crate::error::ListStreamSessionsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_stream_sessions_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_list_stream_sessions(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListStreamSessionsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_tags_for_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListTagsForResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListTagsForResourceError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalServerException" => crate::error::ListTagsForResourceError {
meta: generic,
kind: crate::error::ListTagsForResourceErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::ListTagsForResourceError {
meta: generic,
kind: crate::error::ListTagsForResourceErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ListTagsForResourceError {
meta: generic,
kind: crate::error::ListTagsForResourceErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListTagsForResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_tags_for_resource_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_tags_for_resource_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_list_tags_for_resource(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_put_metadata_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::PutMetadataOutput, crate::error::PutMetadataError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::PutMetadataError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::PutMetadataError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::PutMetadataError {
meta: generic,
kind: crate::error::PutMetadataErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutMetadataError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ChannelNotBroadcasting" => crate::error::PutMetadataError {
meta: generic,
kind: crate::error::PutMetadataErrorKind::ChannelNotBroadcasting({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::channel_not_broadcasting::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_channel_not_broadcasting_json_err(response.body().as_ref(), output).map_err(crate::error::PutMetadataError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::PutMetadataError {
meta: generic,
kind: crate::error::PutMetadataErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutMetadataError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ThrottlingException" => crate::error::PutMetadataError {
meta: generic,
kind: crate::error::PutMetadataErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_throttling_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutMetadataError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::PutMetadataError {
meta: generic,
kind: crate::error::PutMetadataErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutMetadataError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::PutMetadataError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_put_metadata_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::PutMetadataOutput, crate::error::PutMetadataError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::put_metadata_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_stop_stream_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::StopStreamOutput, crate::error::StopStreamError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::StopStreamError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::StopStreamError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::StopStreamError {
meta: generic,
kind: crate::error::StopStreamErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::StopStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ChannelNotBroadcasting" => crate::error::StopStreamError {
meta: generic,
kind: crate::error::StopStreamErrorKind::ChannelNotBroadcasting({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::channel_not_broadcasting::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_channel_not_broadcasting_json_err(response.body().as_ref(), output).map_err(crate::error::StopStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::StopStreamError {
meta: generic,
kind: crate::error::StopStreamErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::StopStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"StreamUnavailable" => {
crate::error::StopStreamError {
meta: generic,
kind: crate::error::StopStreamErrorKind::StreamUnavailable({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::stream_unavailable::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_stream_unavailable_json_err(response.body().as_ref(), output).map_err(crate::error::StopStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ValidationException" => crate::error::StopStreamError {
meta: generic,
kind: crate::error::StopStreamErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::StopStreamError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::StopStreamError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_stop_stream_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::StopStreamOutput, crate::error::StopStreamError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::stop_stream_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_tag_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::TagResourceOutput, crate::error::TagResourceError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::TagResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::TagResourceError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalServerException" => crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::TagResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_tag_resource_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::TagResourceOutput, crate::error::TagResourceError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::tag_resource_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_untag_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UntagResourceOutput, crate::error::UntagResourceError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::UntagResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::UntagResourceError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalServerException" => crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_server_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UntagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UntagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UntagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::UntagResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_untag_resource_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UntagResourceOutput, crate::error::UntagResourceError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::untag_resource_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_channel_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UpdateChannelOutput, crate::error::UpdateChannelError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::UpdateChannelError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::UpdateChannelError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::UpdateChannelError {
meta: generic,
kind: crate::error::UpdateChannelErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UpdateChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::UpdateChannelError {
meta: generic,
kind: crate::error::UpdateChannelErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UpdateChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"PendingVerification" => crate::error::UpdateChannelError {
meta: generic,
kind: crate::error::UpdateChannelErrorKind::PendingVerification({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::pending_verification::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_pending_verification_json_err(response.body().as_ref(), output).map_err(crate::error::UpdateChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::UpdateChannelError {
meta: generic,
kind: crate::error::UpdateChannelErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UpdateChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::UpdateChannelError {
meta: generic,
kind: crate::error::UpdateChannelErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_validation_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UpdateChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::UpdateChannelError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_channel_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UpdateChannelOutput, crate::error::UpdateChannelError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::update_channel_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_update_channel(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateChannelError::unhandled)?;
output.build()
})
}
| 44.844834 | 229 | 0.566544 |
f4a116d350bbefa0cb4e0a9abeed681584c33dd6 | 3,259 | //! Device module.
//!
//! # Overview
//!
//! Device is an abstraction over output device which provides unified way of communication with
//! output device.
#[cfg(target_os = "windows")]
mod dsound;
#[cfg(target_os = "linux")]
mod alsa;
#[cfg(target_os = "macos")]
mod coreaudio;
// The dummy target works on all platforms
#[cfg(not(any(target_os = "windows", target_os = "linux", target_os = "macos", target_arch = "wasm32")))]
mod dummy;
#[cfg(target_arch = "wasm32")]
mod web;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct NativeSample {
pub left: i16,
pub right: i16,
}
impl Default for NativeSample {
fn default() -> Self {
Self { left: 0, right: 0 }
}
}
pub type FeedCallback = dyn FnMut(&mut [(f32, f32)]) + Send;
pub struct MixContext<'a> {
mix_buffer: &'a mut [(f32, f32)],
out_data: &'a mut [NativeSample],
callback: &'a mut FeedCallback,
}
fn sample_to_i16(sample: f32) -> i16 {
const SCALE: f32 = i16::MAX as f32;
let clamped = if sample > 1.0 {
1.0
} else if sample < -1.0 {
-1.0
} else {
sample
};
(clamped * SCALE) as i16
}
trait Device {
fn get_mix_context(&mut self) -> Option<MixContext>;
fn run(&mut self);
fn mix(&mut self) {
if let Some(context) = self.get_mix_context() {
// Clear mixer buffer.
for (left, right) in context.mix_buffer.iter_mut() {
*left = 0.0;
*right = 0.0;
}
// Fill it.
(context.callback)(context.mix_buffer);
// Convert to i16 - device expects samples in this format.
assert_eq!(context.mix_buffer.len(), context.out_data.len());
for ((left, right), ref mut out_sample) in
context.mix_buffer.iter().zip(context.out_data)
{
out_sample.left = sample_to_i16(*left);
out_sample.right = sample_to_i16(*right);
}
}
}
}
/// Transfer ownership of device to separate mixer thread. It will
/// call the callback with a specified rate to get data to send to a physical device.
pub(in crate) fn run_device<F: FnMut(&mut [(f32, f32)]) + Send + 'static>(
#[allow(unused_variables)] buffer_len_bytes: u32,
#[allow(unused_variables)] callback: F,
) {
#[cfg(not(target_arch = "wasm32"))]
{
std::thread::spawn(move || {
#[cfg(target_os = "windows")]
let mut device = dsound::DirectSoundDevice::new(buffer_len_bytes, callback).unwrap();
#[cfg(target_os = "linux")]
let mut device = alsa::AlsaSoundDevice::new(buffer_len_bytes, callback).unwrap();
#[cfg(target_os = "macos")]
let mut device =
coreaudio::CoreaudioSoundDevice::new(buffer_len_bytes, callback).unwrap();
#[cfg(not(any(target_os = "windows", target_os = "linux", target_os = "macos")))]
let mut device = dummy::DummySoundDevice::new(buffer_len_bytes, callback).unwrap();
device.run()
});
}
#[cfg(target_arch = "wasm32")]
{
let mut device = web::WebAudioDevice::new(buffer_len_bytes, callback);
device.run();
std::mem::forget(device);
}
}
| 28.587719 | 105 | 0.582694 |
f7f674d36d582bb080ac4d9775a8e649e6a7c12c | 4,375 | #![allow(non_snake_case)]
/*
This file is part of Curv library
Copyright 2018 by Kzen Networks
(https://github.com/KZen-networks/curv)
License MIT: https://github.com/KZen-networks/curv/blob/master/LICENSE
*/
use super::ProofError;
use crate::cryptographic_primitives::hashing::hash_sha256::HSha256;
use crate::cryptographic_primitives::hashing::traits::Hash;
use crate::elliptic::curves::traits::*;
use crate::FE;
use crate::GE;
use zeroize::Zeroize;
#[cfg(feature="serde")]
use serde::ser::{Serialize, Serializer};
#[cfg(feature="serde")]
use serde::{Deserialize, Deserializer};
/// This is a proof of knowledge that a pair of group elements {D, E}
/// form a valid homomorphic ElGamal encryption (”in the exponent”) using public key Y .
/// (HEG is defined in B. Schoenmakers and P. Tuyls. Practical Two-Party Computation Based on the Conditional Gate)
/// Specifically, the witness is ω = (x, r), the statement is δ = (G, Y, Q, D, E).
/// The relation R outputs 1 if D = xG+rY , E = rG, Q = xG
///
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct HomoELGamalDlogProof {
pub A1: GE,
pub A2: GE,
pub A3: GE,
pub z1: FE,
pub z2: FE,
}
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct HomoElGamalDlogWitness {
pub r: FE,
pub x: FE,
}
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct HomoElGamalDlogStatement {
pub G: GE,
pub Y: GE,
pub Q: GE,
pub D: GE,
pub E: GE,
}
impl HomoELGamalDlogProof {
pub fn prove(
w: &HomoElGamalDlogWitness,
delta: &HomoElGamalDlogStatement,
) -> HomoELGamalDlogProof {
let mut s1: FE = ECScalar::new_random();
let mut s2: FE = ECScalar::new_random();
let A1 = delta.G * s1;
let A2 = delta.Y * s2;
let A3 = delta.G * s2;
let e =
HSha256::create_hash_from_ge(&[&A1, &A2, &A3, &delta.G, &delta.Y, &delta.D, &delta.E]);
let z1 = s1 + e * w.x;
let z2 = s2 + e * w.r;
s1.zeroize();
s2.zeroize();
HomoELGamalDlogProof { A1, A2, A3, z1, z2 }
}
pub fn verify(&self, delta: &HomoElGamalDlogStatement) -> Result<(), ProofError> {
let e = HSha256::create_hash_from_ge(&[
&self.A1, &self.A2, &self.A3, &delta.G, &delta.Y, &delta.D, &delta.E,
]);
let z1G = delta.G * self.z1;
let z2Y = delta.Y * self.z2;
let z2G = delta.G * self.z2;
let A1_plus_eQ = self.A1 + delta.Q * e;
let A3_plus_eE = self.A3 + delta.E * e;
let D_minus_Q = delta.D.sub_point(&delta.Q.get_element());
let A2_plus_eDmQ = self.A2 + D_minus_Q * e;
if z1G == A1_plus_eQ && z2G == A3_plus_eE && z2Y == A2_plus_eDmQ {
Ok(())
} else {
Err(ProofError)
}
}
}
#[cfg(test)]
mod tests {
use crate::cryptographic_primitives::proofs::sigma_correct_homomorphic_elgamal_encryption_of_dlog::*;
use crate::{FE, GE};
#[test]
fn test_correct_homo_elgamal() {
let witness = HomoElGamalDlogWitness {
r: ECScalar::new_random(),
x: ECScalar::new_random(),
};
let G: GE = ECPoint::generator();
let y: FE = ECScalar::new_random();
let Y = G.clone() * &y;
let D = G.clone() * &witness.x + Y.clone() * &witness.r;
let E = G.clone() * &witness.r;
let Q = G.clone() * &witness.x;
let delta = HomoElGamalDlogStatement { G, Y, Q, D, E };
let proof = HomoELGamalDlogProof::prove(&witness, &delta);
assert!(proof.verify(&delta).is_ok());
}
// TODO: add more fail scenarios
#[test]
#[should_panic]
fn test_wrong_homo_elgamal() {
// test for Q = (x+1)G
let witness = HomoElGamalDlogWitness {
r: ECScalar::new_random(),
x: ECScalar::new_random(),
};
let G: GE = ECPoint::generator();
let y: FE = ECScalar::new_random();
let Y = G.clone() * &y;
let D = G.clone() * &witness.x + Y.clone() * &witness.r;
let E = G.clone() * &witness.r + G.clone();
let Q = G.clone() * &witness.x + G.clone();
let delta = HomoElGamalDlogStatement { G, Y, Q, D, E };
let proof = HomoELGamalDlogProof::prove(&witness, &delta);
assert!(proof.verify(&delta).is_ok());
}
}
| 33.143939 | 115 | 0.589029 |
2866a7e33cabc960d61632adf35f0b033f345168 | 1,897 | use serde_json::Value;
use domain::protocol_type::ProtocolTypes;
#[derive(Clone, Debug, Deserialize)]
pub struct Config {
pub app: AppConfig,
pub forward_agent: ForwardAgentConfig,
pub server: ServerConfig,
pub wallet_storage: WalletStorageConfig,
pub protocol_type: Option<ProtocolTypes>,
}
#[derive(Clone, Debug, Deserialize)]
pub struct ForwardAgentConfig {
// Forward Agent DID
pub did: String,
// Seed for deterministic generation of Forward Agent did key
pub did_seed: Option<String>,
// Forward Agent Endpoint
pub endpoint: String,
// Forward Agent wallet id
pub wallet_id: String,
// Forward Agent wallet passphrase
pub wallet_passphrase: String,
}
#[derive(Clone, Debug, Deserialize)]
pub struct AppConfig {
// Http application prefix
pub prefix: String,
// enable or disable http api for fetching information about agency status
pub enable_admin_api: Option<bool>
}
#[derive(Clone, Debug, Deserialize)]
pub struct ServerConfig {
// List of ip:port to bind
pub addresses: Vec<String>,
// Amount of http workers (instances of app). By default amount of logical CPU cores.
pub workers: Option<usize>,
}
#[derive(Clone, Debug, Deserialize)]
pub struct WalletStorageConfig {
// Wallet storage type for agents wallets
#[serde(rename = "type")]
pub xtype: Option<String>,
// Optional to override default library path. Default value is determined based on value of
// xtype and OS
pub plugin_library_path: Option<String>,
// Optional to override default storage initialization function. Default value is determined
// based on value of xtype and OS
pub plugin_init_function: Option<String>,
// Wallet storage config for agents wallets
pub config: Option<Value>,
// Wallet storage credentials for agents wallets
pub credentials: Option<Value>,
}
| 32.152542 | 97 | 0.717449 |
14dd2a1ac9b587208c3426338331c8c498808f72 | 21,456 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Thread local storage
//!
//! This module provides an implementation of thread local storage for Rust
//! programs. Thread local storage is a method of storing data into a global
//! variable which each thread in the program will have its own copy of.
//! Threads do not share this data, so accesses do not need to be synchronized.
//!
//! At a high level, this module provides two variants of storage:
//!
//! * Owning thread local storage. This is a type of thread local key which
//! owns the value that it contains, and will destroy the value when the
//! thread exits. This variant is created with the `thread_local!` macro and
//! can contain any value which is `'static` (no borrowed pointers.
//!
//! * Scoped thread local storage. This type of key is used to store a reference
//! to a value into local storage temporarily for the scope of a function
//! call. There are no restrictions on what types of values can be placed
//! into this key.
//!
//! Both forms of thread local storage provide an accessor function, `with`,
//! which will yield a shared reference to the value to the specified
//! closure. Thread local keys only allow shared access to values as there is no
//! way to guarantee uniqueness if a mutable borrow was allowed. Most values
//! will want to make use of some form of **interior mutability** through the
//! `Cell` or `RefCell` types.
#![macro_escape]
#![experimental]
use prelude::*;
use cell::UnsafeCell;
// Sure wish we had macro hygiene, no?
#[doc(hidden)] pub use self::imp::Key as KeyInner;
#[doc(hidden)] pub use self::imp::destroy_value;
#[doc(hidden)] pub use sys_common::thread_local::INIT_INNER as OS_INIT_INNER;
#[doc(hidden)] pub use sys_common::thread_local::StaticKey as OsStaticKey;
pub mod scoped;
/// A thread local storage key which owns its contents.
///
/// This key uses the fastest possible implementation available to it for the
/// target platform. It is instantiated with the `thread_local!` macro and the
/// primary method is the `with` method.
///
/// The `with` method yields a reference to the contained value which cannot be
/// sent across tasks or escape the given closure.
///
/// # Initialization and Destruction
///
/// Initialization is dynamically performed on the first call to `with()`
/// within a thread, and values support destructors which will be run when a
/// thread exits.
///
/// # Example
///
/// ```
/// use std::cell::RefCell;
/// use std::thread::Thread;
///
/// thread_local!(static FOO: RefCell<uint> = RefCell::new(1));
///
/// FOO.with(|f| {
/// assert_eq!(*f.borrow(), 1);
/// *f.borrow_mut() = 2;
/// });
///
/// // each thread starts out with the initial value of 1
/// Thread::spawn(move|| {
/// FOO.with(|f| {
/// assert_eq!(*f.borrow(), 1);
/// *f.borrow_mut() = 3;
/// });
/// }).detach();
///
/// // we retain our original value of 2 despite the child thread
/// FOO.with(|f| {
/// assert_eq!(*f.borrow(), 2);
/// });
/// ```
pub struct Key<T> {
// The key itself may be tagged with #[thread_local], and this `Key` is
// stored as a `static`, and it's not valid for a static to reference the
// address of another thread_local static. For this reason we kinda wonkily
// work around this by generating a shim function which will give us the
// address of the inner TLS key at runtime.
//
// This is trivially devirtualizable by LLVM because we never store anything
// to this field and rustc can declare the `static` as constant as well.
#[doc(hidden)]
pub inner: fn() -> &'static KeyInner<UnsafeCell<Option<T>>>,
// initialization routine to invoke to create a value
#[doc(hidden)]
pub init: fn() -> T,
}
/// Declare a new thread local storage key of type `std::thread_local::Key`.
#[macro_export]
#[doc(hidden)]
macro_rules! thread_local {
(static $name:ident: $t:ty = $init:expr) => (
static $name: ::std::thread_local::Key<$t> = {
use std::cell::UnsafeCell as __UnsafeCell;
use std::thread_local::KeyInner as __KeyInner;
use std::option::Option as __Option;
use std::option::Option::None as __None;
__thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = {
__UnsafeCell { value: __None }
});
fn __init() -> $t { $init }
fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> {
&__KEY
}
::std::thread_local::Key { inner: __getit, init: __init }
};
);
(pub static $name:ident: $t:ty = $init:expr) => (
pub static $name: ::std::thread_local::Key<$t> = {
use std::cell::UnsafeCell as __UnsafeCell;
use std::thread_local::KeyInner as __KeyInner;
use std::option::Option as __Option;
use std::option::Option::None as __None;
__thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = {
__UnsafeCell { value: __None }
});
fn __init() -> $t { $init }
fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> {
&__KEY
}
::std::thread_local::Key { inner: __getit, init: __init }
};
);
}
// Macro pain #4586:
//
// When cross compiling, rustc will load plugins and macros from the *host*
// platform before search for macros from the target platform. This is primarily
// done to detect, for example, plugins. Ideally the macro below would be
// defined once per module below, but unfortunately this means we have the
// following situation:
//
// 1. We compile libstd for x86_64-unknown-linux-gnu, this thread_local!() macro
// will inject #[thread_local] statics.
// 2. We then try to compile a program for arm-linux-androideabi
// 3. The compiler has a host of linux and a target of android, so it loads
// macros from the *linux* libstd.
// 4. The macro generates a #[thread_local] field, but the android libstd does
// not use #[thread_local]
// 5. Compile error about structs with wrong fields.
//
// To get around this, we're forced to inject the #[cfg] logic into the macro
// itself. Woohoo.
#[macro_export]
macro_rules! __thread_local_inner {
(static $name:ident: $t:ty = $init:expr) => (
#[cfg_attr(any(target_os = "macos", target_os = "linux"), thread_local)]
static $name: ::std::thread_local::KeyInner<$t> =
__thread_local_inner!($init, $t);
);
(pub static $name:ident: $t:ty = $init:expr) => (
#[cfg_attr(any(target_os = "macos", target_os = "linux"), thread_local)]
pub static $name: ::std::thread_local::KeyInner<$t> =
__thread_local_inner!($init, $t);
);
($init:expr, $t:ty) => ({
#[cfg(any(target_os = "macos", target_os = "linux"))]
const INIT: ::std::thread_local::KeyInner<$t> = {
::std::thread_local::KeyInner {
inner: ::std::cell::UnsafeCell { value: $init },
dtor_registered: ::std::cell::UnsafeCell { value: false },
dtor_running: ::std::cell::UnsafeCell { value: false },
}
};
#[cfg(all(not(any(target_os = "macos", target_os = "linux"))))]
const INIT: ::std::thread_local::KeyInner<$t> = {
unsafe extern fn __destroy(ptr: *mut u8) {
::std::thread_local::destroy_value::<$t>(ptr);
}
::std::thread_local::KeyInner {
inner: ::std::cell::UnsafeCell { value: $init },
os: ::std::thread_local::OsStaticKey {
inner: ::std::thread_local::OS_INIT_INNER,
dtor: ::std::option::Option::Some(__destroy as unsafe extern fn(*mut u8)),
},
}
};
INIT
});
}
impl<T: 'static> Key<T> {
/// Acquire a reference to the value in this TLS key.
///
/// This will lazily initialize the value if this thread has not referenced
/// this key yet.
///
/// # Panics
///
/// This function will `panic!()` if the key currently has its
/// destructor running, and it **may** panic if the destructor has
/// previously been run for this thread.
pub fn with<F, R>(&'static self, f: F) -> R
where F: FnOnce(&T) -> R {
let slot = (self.inner)();
unsafe {
let slot = slot.get().expect("cannot access a TLS value during or \
after it is destroyed");
f(match *slot.get() {
Some(ref inner) => inner,
None => self.init(slot),
})
}
}
unsafe fn init(&self, slot: &UnsafeCell<Option<T>>) -> &T {
*slot.get() = Some((self.init)());
(*slot.get()).as_ref().unwrap()
}
/// Test this TLS key to determine whether its value has been destroyed for
/// the current thread or not.
///
/// This will not initialize the key if it is not already initialized.
pub fn destroyed(&'static self) -> bool {
unsafe { (self.inner)().get().is_none() }
}
}
#[cfg(any(target_os = "macos", target_os = "linux"))]
mod imp {
use prelude::*;
use cell::UnsafeCell;
use intrinsics;
use ptr;
#[doc(hidden)]
pub struct Key<T> {
// Place the inner bits in an `UnsafeCell` to currently get around the
// "only Sync statics" restriction. This allows any type to be placed in
// the cell.
//
// Note that all access requires `T: 'static` so it can't be a type with
// any borrowed pointers still.
pub inner: UnsafeCell<T>,
// Metadata to keep track of the state of the destructor. Remember that
// these variables are thread-local, not global.
pub dtor_registered: UnsafeCell<bool>, // should be Cell
pub dtor_running: UnsafeCell<bool>, // should be Cell
}
unsafe impl<T> ::kinds::Sync for Key<T> { }
#[doc(hidden)]
impl<T> Key<T> {
pub unsafe fn get(&'static self) -> Option<&'static T> {
if intrinsics::needs_drop::<T>() && *self.dtor_running.get() {
return None
}
self.register_dtor();
Some(&*self.inner.get())
}
unsafe fn register_dtor(&self) {
if !intrinsics::needs_drop::<T>() || *self.dtor_registered.get() {
return
}
register_dtor(self as *const _ as *mut u8,
destroy_value::<T>);
*self.dtor_registered.get() = true;
}
}
// Since what appears to be glibc 2.18 this symbol has been shipped which
// GCC and clang both use to invoke destructors in thread_local globals, so
// let's do the same!
//
// Note, however, that we run on lots older linuxes, as well as cross
// compiling from a newer linux to an older linux, so we also have a
// fallback implementation to use as well.
//
// Due to rust-lang/rust#18804, make sure this is not generic!
#[cfg(target_os = "linux")]
unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
use mem;
use libc;
use sys_common::thread_local as os;
extern {
static __dso_handle: *mut u8;
#[linkage = "extern_weak"]
static __cxa_thread_atexit_impl: *const ();
}
if !__cxa_thread_atexit_impl.is_null() {
type F = unsafe extern fn(dtor: unsafe extern fn(*mut u8),
arg: *mut u8,
dso_handle: *mut u8) -> libc::c_int;
mem::transmute::<*const (), F>(__cxa_thread_atexit_impl)
(dtor, t, __dso_handle);
return
}
// The fallback implementation uses a vanilla OS-based TLS key to track
// the list of destructors that need to be run for this thread. The key
// then has its own destructor which runs all the other destructors.
//
// The destructor for DTORS is a little special in that it has a `while`
// loop to continuously drain the list of registered destructors. It
// *should* be the case that this loop always terminates because we
// provide the guarantee that a TLS key cannot be set after it is
// flagged for destruction.
static DTORS: os::StaticKey = os::StaticKey {
inner: os::INIT_INNER,
dtor: Some(run_dtors as unsafe extern "C" fn(*mut u8)),
};
type List = Vec<(*mut u8, unsafe extern fn(*mut u8))>;
if DTORS.get().is_null() {
let v: Box<List> = box Vec::new();
DTORS.set(mem::transmute(v));
}
let list: &mut List = &mut *(DTORS.get() as *mut List);
list.push((t, dtor));
unsafe extern fn run_dtors(mut ptr: *mut u8) {
while !ptr.is_null() {
let list: Box<List> = mem::transmute(ptr);
for &(ptr, dtor) in list.iter() {
dtor(ptr);
}
ptr = DTORS.get();
DTORS.set(0 as *mut _);
}
}
}
// OSX's analog of the above linux function is this _tlv_atexit function.
// The disassembly of thread_local globals in C++ (at least produced by
// clang) will have this show up in the output.
#[cfg(target_os = "macos")]
unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
extern {
fn _tlv_atexit(dtor: unsafe extern fn(*mut u8),
arg: *mut u8);
}
_tlv_atexit(dtor, t);
}
#[doc(hidden)]
pub unsafe extern fn destroy_value<T>(ptr: *mut u8) {
let ptr = ptr as *mut Key<T>;
// Right before we run the user destructor be sure to flag the
// destructor as running for this thread so calls to `get` will return
// `None`.
*(*ptr).dtor_running.get() = true;
ptr::read((*ptr).inner.get() as *const T);
}
}
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
mod imp {
use prelude::*;
use cell::UnsafeCell;
use mem;
use sys_common::thread_local::StaticKey as OsStaticKey;
#[doc(hidden)]
pub struct Key<T> {
// Statically allocated initialization expression, using an `UnsafeCell`
// for the same reasons as above.
pub inner: UnsafeCell<T>,
// OS-TLS key that we'll use to key off.
pub os: OsStaticKey,
}
unsafe impl<T> ::kinds::Sync for Key<T> { }
struct Value<T: 'static> {
key: &'static Key<T>,
value: T,
}
#[doc(hidden)]
impl<T> Key<T> {
pub unsafe fn get(&'static self) -> Option<&'static T> {
self.ptr().map(|p| &*p)
}
unsafe fn ptr(&'static self) -> Option<*mut T> {
let ptr = self.os.get() as *mut Value<T>;
if !ptr.is_null() {
if ptr as uint == 1 {
return None
}
return Some(&mut (*ptr).value as *mut T);
}
// If the lookup returned null, we haven't initialized our own local
// copy, so do that now.
//
// Also note that this transmute_copy should be ok because the value
// `inner` is already validated to be a valid `static` value, so we
// should be able to freely copy the bits.
let ptr: Box<Value<T>> = box Value {
key: self,
value: mem::transmute_copy(&self.inner),
};
let ptr: *mut Value<T> = mem::transmute(ptr);
self.os.set(ptr as *mut u8);
Some(&mut (*ptr).value as *mut T)
}
}
#[doc(hidden)]
pub unsafe extern fn destroy_value<T: 'static>(ptr: *mut u8) {
// The OS TLS ensures that this key contains a NULL value when this
// destructor starts to run. We set it back to a sentinel value of 1 to
// ensure that any future calls to `get` for this thread will return
// `None`.
//
// Note that to prevent an infinite loop we reset it back to null right
// before we return from the destructor ourselves.
let ptr: Box<Value<T>> = mem::transmute(ptr);
let key = ptr.key;
key.os.set(1 as *mut u8);
drop(ptr);
key.os.set(0 as *mut u8);
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use cell::UnsafeCell;
use thread::Thread;
struct Foo(Sender<()>);
impl Drop for Foo {
fn drop(&mut self) {
let Foo(ref s) = *self;
s.send(());
}
}
#[test]
fn smoke_no_dtor() {
thread_local!(static FOO: UnsafeCell<int> = UnsafeCell { value: 1 });
FOO.with(|f| unsafe {
assert_eq!(*f.get(), 1);
*f.get() = 2;
});
let (tx, rx) = channel();
spawn(move|| {
FOO.with(|f| unsafe {
assert_eq!(*f.get(), 1);
});
tx.send(());
});
rx.recv();
FOO.with(|f| unsafe {
assert_eq!(*f.get(), 2);
});
}
#[test]
fn smoke_dtor() {
thread_local!(static FOO: UnsafeCell<Option<Foo>> = UnsafeCell {
value: None
});
let (tx, rx) = channel();
spawn(move|| unsafe {
let mut tx = Some(tx);
FOO.with(|f| {
*f.get() = Some(Foo(tx.take().unwrap()));
});
});
rx.recv();
}
#[test]
fn circular() {
struct S1;
struct S2;
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell {
value: None
});
thread_local!(static K2: UnsafeCell<Option<S2>> = UnsafeCell {
value: None
});
static mut HITS: uint = 0;
impl Drop for S1 {
fn drop(&mut self) {
unsafe {
HITS += 1;
if K2.destroyed() {
assert_eq!(HITS, 3);
} else {
if HITS == 1 {
K2.with(|s| *s.get() = Some(S2));
} else {
assert_eq!(HITS, 3);
}
}
}
}
}
impl Drop for S2 {
fn drop(&mut self) {
unsafe {
HITS += 1;
assert!(!K1.destroyed());
assert_eq!(HITS, 2);
K1.with(|s| *s.get() = Some(S1));
}
}
}
Thread::spawn(move|| {
drop(S1);
}).join();
}
#[test]
fn self_referential() {
struct S1;
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell {
value: None
});
impl Drop for S1 {
fn drop(&mut self) {
assert!(K1.destroyed());
}
}
Thread::spawn(move|| unsafe {
K1.with(|s| *s.get() = Some(S1));
}).join();
}
#[test]
fn dtors_in_dtors_in_dtors() {
struct S1(Sender<()>);
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell {
value: None
});
thread_local!(static K2: UnsafeCell<Option<Foo>> = UnsafeCell {
value: None
});
impl Drop for S1 {
fn drop(&mut self) {
let S1(ref tx) = *self;
unsafe {
if !K2.destroyed() {
K2.with(|s| *s.get() = Some(Foo(tx.clone())));
}
}
}
}
let (tx, rx) = channel();
spawn(move|| unsafe {
let mut tx = Some(tx);
K1.with(|s| *s.get() = Some(S1(tx.take().unwrap())));
});
rx.recv();
}
}
#[cfg(test)]
mod dynamic_tests {
use prelude::*;
use cell::RefCell;
use collections::HashMap;
#[test]
fn smoke() {
fn square(i: int) -> int { i * i }
thread_local!(static FOO: int = square(3));
FOO.with(|f| {
assert_eq!(*f, 9);
});
}
#[test]
fn hashmap() {
fn map() -> RefCell<HashMap<int, int>> {
let mut m = HashMap::new();
m.insert(1, 2);
RefCell::new(m)
}
thread_local!(static FOO: RefCell<HashMap<int, int>> = map());
FOO.with(|map| {
assert_eq!(map.borrow()[1], 2);
});
}
#[test]
fn refcell_vec() {
thread_local!(static FOO: RefCell<Vec<uint>> = RefCell::new(vec![1, 2, 3]));
FOO.with(|vec| {
assert_eq!(vec.borrow().len(), 3);
vec.borrow_mut().push(4);
assert_eq!(vec.borrow()[3], 4);
});
}
}
| 33.420561 | 94 | 0.544976 |
1d264c100264700ae0e70f75e4e4b9364eb1c169 | 1,441 | pub enum WrapMode {
ClampToBorder,
ClampToEdge,
MirrorRepeat,
Repeat,
}
pub enum FilterMode {
Linear,
Nearest,
}
pub struct Sampler {
mag_filter: FilterMode,
min_filter: FilterMode,
mipmap_filter: FilterMode,
wrap_u: WrapMode,
wrap_v: WrapMode,
wrap_w: WrapMode,
}
pub struct SamplerDescriptor {
pub mag_filter: FilterMode,
pub min_filter: FilterMode,
pub mipmap_filter: FilterMode,
pub wrap_u: WrapMode,
pub wrap_v: WrapMode,
pub wrap_w: WrapMode,
}
impl Default for SamplerDescriptor {
fn default() -> Self {
SamplerDescriptor {
mag_filter: FilterMode::Linear,
min_filter: FilterMode::Linear,
mipmap_filter: FilterMode::Linear,
wrap_u: WrapMode::ClampToEdge,
wrap_v: WrapMode::ClampToEdge,
wrap_w: WrapMode::ClampToEdge,
}
}
}
impl Sampler {
pub fn new(desc: SamplerDescriptor) -> Self {
// @TODO: Fix default parameters
Sampler {
mag_filter: desc.mag_filter,
min_filter: desc.min_filter,
mipmap_filter: desc.mipmap_filter,
wrap_u: desc.wrap_u,
wrap_v: desc.wrap_v,
wrap_w: desc.wrap_w,
}
}
pub fn mag_filter(&self) -> &FilterMode {
&self.mag_filter
}
pub fn min_filter(&self) -> &FilterMode {
&self.min_filter
}
pub fn mipmap_filter(&self) -> &FilterMode {
&self.mipmap_filter
}
pub fn wrap_u(&self) -> &WrapMode {
&self.wrap_u
}
pub fn wrap_v(&self) -> &WrapMode {
&self.wrap_v
}
pub fn wrap_w(&self) -> &WrapMode {
&self.wrap_w
}
}
| 17.790123 | 46 | 0.700208 |
8a3746ca82c8cdd28717843587eee245b829e05e | 639 | use std::str;
use minitt::ast::Expression;
use minitt::parser::{expression_to_expression, parse_str, Tok};
use minitt_util::io::read_file;
pub fn parse_file(file_arg: &str, print_lexical_json: bool) -> Option<Expression> {
// If cannot read input, return.
let file_content = read_file(file_arg)?;
// Read file
let file_content_utf8 = str::from_utf8(file_content.as_slice()).unwrap();
// Parse
let tok: Tok = parse_str(file_content_utf8)
.map_err(|err| eprintln!("{}", err))
.ok()?;
if print_lexical_json {
println!("{}", tok.to_json());
}
Some(expression_to_expression(tok))
}
| 30.428571 | 83 | 0.661972 |
642a62c87aaa3f8ad54c898a85957dca224195d2 | 149 | mod center_mass;
mod constant_acceleration;
pub use center_mass::CenterMass;
pub use constant_acceleration::ConstantAcceleration;
use super::core;
| 18.625 | 52 | 0.832215 |
14afaa7d111cb93e2547e3f500a72864dae36296 | 2,699 | /*!
This is a Snappy compression algorithm library written in Rust.
It is ported from [Go implementation](https://github.com/golang/snappy) with similar interface.
Provide basic interfaces:
- `max_encode_len(src_len)`: Get the max length of encoded data.
- `encode(dst, src)`: Encode `src` to `dst`.
- `decode_len(src)`: Get the exact length of decoded data.
- `decode_len(dst, src)`: Decode `src` to `dst`.
# Examples:
Compress:
```rust
use xsnappy::{max_encode_len, encode};
let src = "Jingle bell, jingle bell, jingle bell rock \
Jingle bells swing and jingle bells ring".as_bytes();
let mut dst = vec![0; max_encode_len(src.len())];
let size = encode(&mut dst, src);
dst.resize(size, 0);
println!("{:?}", dst);
```
Decompress:
```rust
use xsnappy::{decode_len, decode};
use std::str::from_utf8;
let src = vec![83, 52, 74, 105, 110, 103, 108, 101, 32, 98, 101, 108, 108, 44, 32, 106, 90,
13, 0, 20, 32, 114, 111, 99, 107, 32, 29, 43, 40, 115, 32, 115, 119, 105, 110,
103, 32, 97, 110, 100, 46, 53, 0, 20, 115, 32, 114, 105, 110, 103];
let dec_len = match decode_len(&src) {
Ok(len) => len,
Err(err) => panic!("{}", err)
};
let mut dst = vec![0; dec_len];
match decode(&mut dst, &src) {
Ok(len) => {},
Err(err) => panic!("{}", err)
}
println!("{}", from_utf8(&dst).unwrap());
```
*/
mod binary;
mod encode;
mod decode;
pub mod error;
use error::SnappyError;
/// Encode `src` to `dst`. The `dst` must be initialized with a certain length.
/// Return the exact length of encoded data.
/// # Examples:
///
/// ```rust
/// use xsnappy::{max_encode_len, encode};
///
/// let mut src = b"hello world! hello world!";
/// let mut dst = vec![0; max_encode_len(src.len())];
/// let len = encode(&mut dst, src);
/// dst.resize(len, 0); // resize to exact length
/// ```
/// # Panics:
/// Panics if the length of `dst` is less then `max_encode_len(src.len())`
pub fn encode(dst: &mut [u8], src: &[u8]) -> usize {
encode::encode(dst, src)
}
/// Decode `src` to `dst`. The `dst` must be initialized with a certain length.
/// Return the exact length of decoded data.
/// # Examples:
///
/// ```rust
/// use xsnappy::{decode_len, decode};
///
/// let dec_len = decode_len(src).unwrap();
/// let mut dst = vec![0; dec_len];
/// decode(&mut dst, src);
/// ```
pub fn decode(dst: &mut [u8], src: &[u8]) -> Result<usize, SnappyError> {
decode::decode(dst, src)
}
/// Return the max length of encoded data
pub fn max_encode_len(src_len: usize) -> usize {
encode::max_encode_len(src_len)
}
/// Return the exact length of decoded data.
pub fn decode_len(src: &[u8]) -> Result<usize, SnappyError> {
decode::decode_len(src)
} | 28.114583 | 95 | 0.623935 |
71859fdf34333057ea2247732b13533ab50bd580 | 3,166 | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use super::*;
use diem_types::{block_info::BlockInfo, epoch_state::EpochState};
fn id(index: u64) -> HashValue {
let bytes = index.to_be_bytes();
let mut buf = [0; HashValue::LENGTH];
buf[HashValue::LENGTH - 8..].copy_from_slice(&bytes);
HashValue::new(buf)
}
fn gen_block(id: HashValue) -> (HashValue, Vec<Transaction>, ProcessedVMOutput) {
(
id,
vec![],
ProcessedVMOutput::new(vec![], ExecutedTrees::new_empty(), None),
)
}
fn gen_ledger_info(block_id: HashValue, reconfig: bool) -> LedgerInfo {
LedgerInfo::new(
BlockInfo::new(
1,
0,
block_id,
HashValue::zero(),
0,
0,
if reconfig {
Some(EpochState::empty())
} else {
None
},
),
HashValue::zero(),
)
}
fn create_cache() -> SpeculationCache {
// * ---> 1 ---> 2
// | |
// | └----> 3 ---> 4
// | |
// | └----> 5
// |
// └----> 6 ---> 7 ---> 8
// |
// └----> 9 ---> 10
// |
// └----> 11
// *: PRE_GENESIS_BLOCK_ID
let mut cache = SpeculationCache::new();
cache
.add_block(*PRE_GENESIS_BLOCK_ID, gen_block(id(1)))
.unwrap();
cache.add_block(id(1), gen_block(id(2))).unwrap();
cache.add_block(id(1), gen_block(id(3))).unwrap();
cache.add_block(id(3), gen_block(id(4))).unwrap();
cache.add_block(id(3), gen_block(id(5))).unwrap();
cache
.add_block(*PRE_GENESIS_BLOCK_ID, gen_block(id(6)))
.unwrap();
cache.add_block(id(6), gen_block(id(7))).unwrap();
cache.add_block(id(7), gen_block(id(8))).unwrap();
cache.add_block(id(6), gen_block(id(9))).unwrap();
cache.add_block(id(9), gen_block(id(10))).unwrap();
cache.add_block(id(9), gen_block(id(11))).unwrap();
cache
}
#[test]
fn test_branch() {
let mut cache = create_cache();
// put counting blocks as a separate line to avoid core dump
// if assertion fails.
let mut num_blocks = cache.block_map.lock().len();
assert_eq!(num_blocks, 11);
cache
.prune(&gen_ledger_info(id(9), false), vec![], vec![])
.unwrap();
num_blocks = cache.block_map.lock().len();
assert_eq!(num_blocks, 2);
assert_eq!(cache.committed_block_id, id(9));
}
#[test]
fn test_reconfig_id_update() {
let mut cache = create_cache();
cache
.prune(&gen_ledger_info(id(1), true), vec![], vec![])
.unwrap();
let num_blocks = cache.block_map.lock().len();
assert_eq!(num_blocks, 4);
assert_ne!(cache.committed_block_id, id(1));
}
#[test]
fn test_add_duplicate_block() {
let mut cache = create_cache();
cache.add_block(id(1), gen_block(id(7))).unwrap();
cache.add_block(id(1), gen_block(id(7))).unwrap();
}
#[test]
fn test_add_block_missing_parent() {
let mut cache = create_cache();
assert!(cache.add_block(id(99), gen_block(id(100))).is_err());
}
| 28.267857 | 81 | 0.549589 |
2988d80346a0d75202aa949384efa639b190ceec | 3,342 | // Copyright 2013-2019, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
// TODO: Introduction
#![cfg_attr(feature = "cargo-clippy", allow(let_unit_value))]
#![cfg_attr(feature = "cargo-clippy", allow(new_without_default))]
#![cfg_attr(feature = "cargo-clippy", allow(type_complexity))]
#![cfg_attr(feature = "cargo-clippy", allow(transmute_ptr_to_ref))]
#![cfg_attr(feature = "cargo-clippy", allow(trivially_copy_pass_by_ref))]
#![cfg_attr(feature = "cargo-clippy", allow(derive_hash_xor_eq))]
#![allow(deprecated)]
extern crate libc;
#[macro_use]
extern crate bitflags;
extern crate once_cell;
extern crate atk_sys;
extern crate cairo_sys;
extern crate gdk4_sys as gdk_sys;
extern crate gdk_pixbuf_sys;
extern crate gio_sys;
extern crate glib_sys;
extern crate gobject_sys;
extern crate graphene_sys;
extern crate gsk4_sys as gsk_sys;
extern crate gtk4_sys as gtk_sys;
extern crate pango_sys;
#[macro_use]
extern crate glib;
extern crate atk;
extern crate cairo;
extern crate gdk4 as gdk;
extern crate gdk_pixbuf;
extern crate gio;
extern crate graphene;
extern crate gsk4 as gsk;
extern crate pango;
pub const STYLE_PROVIDER_PRIORITY_FALLBACK: u32 =
gtk_sys::GTK_STYLE_PROVIDER_PRIORITY_FALLBACK as u32;
pub const STYLE_PROVIDER_PRIORITY_THEME: u32 = gtk_sys::GTK_STYLE_PROVIDER_PRIORITY_THEME as u32;
pub const STYLE_PROVIDER_PRIORITY_SETTINGS: u32 =
gtk_sys::GTK_STYLE_PROVIDER_PRIORITY_SETTINGS as u32;
pub const STYLE_PROVIDER_PRIORITY_APPLICATION: u32 =
gtk_sys::GTK_STYLE_PROVIDER_PRIORITY_APPLICATION as u32;
pub const STYLE_PROVIDER_PRIORITY_USER: u32 = gtk_sys::GTK_STYLE_PROVIDER_PRIORITY_USER as u32;
#[macro_use]
mod rt;
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
#[cfg_attr(feature = "cargo-clippy", allow(match_same_arms))]
#[cfg_attr(feature = "cargo-clippy", allow(let_and_return))]
#[cfg_attr(feature = "cargo-clippy", allow(many_single_char_names))]
#[cfg_attr(feature = "cargo-clippy", allow(wrong_self_convention))]
#[allow(unused_imports)]
mod auto;
mod functions;
mod signal;
#[macro_use]
pub mod subclass;
pub mod prelude;
pub use auto::functions::*;
pub use auto::*;
pub use functions::*;
pub use rt::*;
mod accel_key;
mod accel_map;
mod actionable;
mod application;
mod border;
mod builder;
mod color_chooser;
mod combo_box;
mod css_location;
mod dialog;
mod editable;
mod entry_buffer;
mod entry_completion;
mod file_chooser_dialog;
mod file_filter_info;
mod im_context_simple;
mod list_store;
mod message_dialog;
mod notebook;
mod overlay;
mod pad_action_entry;
mod pad_controller;
mod page_range;
mod recent_data;
mod requisition;
mod response_type;
mod shortcuts_section;
mod snapshot;
mod spin_button;
mod text;
mod text_buffer;
mod tree_model_filter;
mod tree_model_sort;
mod tree_path;
mod tree_sortable;
mod tree_store;
mod widget;
pub use accel_key::AccelKey;
pub use border::Border;
pub use css_location::CssLocation;
pub use file_filter_info::FileFilterInfo;
pub use pad_action_entry::PadActionEntry;
pub use page_range::PageRange;
pub use recent_data::RecentData;
pub use requisition::Requisition;
pub use response_type::ResponseType;
pub use tree_sortable::SortColumn;
pub use widget::TickCallbackId;
| 26.951613 | 97 | 0.794135 |
eb908dc53027a21a9f1b48dbe943b4893ea0ede4 | 3,349 | use super::Pure;
use crate::mode::Mode;
use swc_common::{util::take::Take, Spanned};
use swc_ecma_ast::*;
use swc_ecma_utils::{ExprExt, Value};
impl<M> Pure<'_, M>
where
M: Mode,
{
///
/// - `while(test);` => `for(;;test);
/// - `do; while(true)` => `for(;;);
pub(super) fn loop_to_for_stmt(&mut self, s: &mut Stmt) {
if !self.options.loops {
return;
}
match s {
Stmt::While(stmt) => {
self.changed = true;
tracing::debug!("loops: Converting a while loop to a for loop");
*s = Stmt::For(ForStmt {
span: stmt.span,
init: None,
test: Some(stmt.test.take()),
update: None,
body: stmt.body.take(),
});
}
Stmt::DoWhile(stmt) => {
let val = stmt.test.as_pure_bool();
if let Value::Known(true) = val {
self.changed = true;
tracing::debug!("loops: Converting an always-true do-while loop to a for loop");
*s = Stmt::For(ForStmt {
span: stmt.span,
init: None,
test: Some(stmt.test.take()),
update: None,
body: stmt.body.take(),
});
}
}
_ => {}
}
}
/// # Input
///
/// ```js
/// for(; size--;)
/// if (!(result = eq(a[size], b[size], aStack, bStack)))
/// break;
/// ```
///
///
/// # Output
///
/// ```js
/// for (; size-- && (result = eq(a[size], b[size], aStack, bStack)););
/// ```
pub(super) fn merge_for_if_break(&mut self, s: &mut ForStmt) {
if let Stmt::If(IfStmt {
test,
cons,
alt: None,
..
}) = &mut *s.body
{
if let Stmt::Break(BreakStmt { label: None, .. }) = &**cons {
// We only care about instant breaks.
//
// Note: As the minifier of swc is very fast, we don't
// care about block statements with a single break as a
// body.
//
// If it's optimizable, other pass for if statements
// will remove block and with the next pass we can apply
// this pass.
self.changed = true;
tracing::debug!("loops: Compressing for-if-break into a for statement");
// We negate because this `test` is used as a condition for `break`.
self.negate(test, true, false);
match s.test.take() {
Some(left) => {
s.test = Some(Box::new(Expr::Bin(BinExpr {
span: s.test.span(),
op: op!("&&"),
left,
right: test.take(),
})));
}
None => {
s.test = Some(test.take());
}
}
// Remove body
s.body.take();
}
}
}
}
| 31.009259 | 100 | 0.385488 |
89c5e60e3431430611ffd6c0e49bff211575b6b8 | 884 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(optin_builtin_traits)]
pub trait AnOibit {}
impl AnOibit for .. {}
pub struct Foo<T> { field: T }
// @has impl_parts/struct.Foo.html '//*[@class="impl"]//code' \
// "impl<T: Clone> !AnOibit for Foo<T> where T: Sync"
// @has impl_parts/trait.AnOibit.html '//*[@class="item-list"]//code' \
// "impl<T: Clone> !AnOibit for Foo<T> where T: Sync"
impl<T: Clone> !AnOibit for Foo<T> where T: Sync {}
| 36.833333 | 71 | 0.690045 |
56247bc350791b313b8f9c051934715301de4db2 | 1,093 | // Copyright 2020 Tetrate
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! `Unit Test Framework` accompanying `Rust` SDK for WebAssembly-based `Envoy` extensions.
//!
//! ## Supported "fakes"
//!
//! * [`FakeClock`]
//! * [`FakeHttpClient`]
//! * [`FakeStats`]
//! * [`FakeStreamInfo`]
//!
//! [`FakeClock`]: host/time/index.html
//! [`FakeHttpClient`]: host/http/client/index.html
//! [`FakeStats`]: host/stats/index.html
//! [`FakeStreamInfo`]: host/stream_info/index.html
#![doc(html_root_url = "https://docs.rs/envoy-sdk-test/0.0.1")]
pub use self::host::*;
pub mod host;
| 32.147059 | 91 | 0.695334 |
e6b08e4f1ce623dc1afdbb8942290c386459c4e2 | 7,618 | //! B+-tree node pool.
use super::{Forest, Node, NodeData};
use entity::PrimaryMap;
#[cfg(test)]
use std::fmt;
use std::ops::{Index, IndexMut};
/// A pool of nodes, including a free list.
pub(super) struct NodePool<F: Forest> {
nodes: PrimaryMap<Node, NodeData<F>>,
freelist: Option<Node>,
}
impl<F: Forest> NodePool<F> {
/// Allocate a new empty pool of nodes.
pub fn new() -> Self {
Self {
nodes: PrimaryMap::new(),
freelist: None,
}
}
/// Free all nodes.
pub fn clear(&mut self) {
self.nodes.clear();
self.freelist = None;
}
/// Allocate a new node containing `data`.
pub fn alloc_node(&mut self, data: NodeData<F>) -> Node {
debug_assert!(!data.is_free(), "can't allocate free node");
match self.freelist {
Some(node) => {
// Remove this node from the free list.
match self.nodes[node] {
NodeData::Free { next } => self.freelist = next,
_ => panic!("Invalid {} on free list", node),
}
self.nodes[node] = data;
node
}
None => {
// The free list is empty. Allocate a new node.
self.nodes.push(data)
}
}
}
/// Free a node.
pub fn free_node(&mut self, node: Node) {
// Quick check for a double free.
debug_assert!(!self.nodes[node].is_free(), "{} is already free", node);
self.nodes[node] = NodeData::Free {
next: self.freelist,
};
self.freelist = Some(node);
}
/// Free the entire tree rooted at `node`.
pub fn free_tree(&mut self, node: Node) {
if let NodeData::Inner { size, tree, .. } = self[node] {
// Note that we have to capture `tree` by value to avoid borrow checker trouble.
#[cfg_attr(feature = "cargo-clippy", allow(needless_range_loop))]
for i in 0..usize::from(size + 1) {
// Recursively free sub-trees. This recursion can never be deeper than `MAX_PATH`,
// and since most trees have less than a handful of nodes, it is worthwhile to
// avoid the heap allocation for an iterative tree traversal.
self.free_tree(tree[i]);
}
}
self.free_node(node);
}
}
#[cfg(test)]
impl<F: Forest> NodePool<F> {
/// Verify the consistency of the tree rooted at `node`.
pub fn verify_tree(&self, node: Node, comp: &F::Comparator)
where
NodeData<F>: fmt::Display,
F::Key: fmt::Display,
{
use super::Comparator;
use entity::SparseSet;
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::vec::Vec;
// The root node can't be an inner node with just a single sub-tree. It should have been
// pruned.
if let &NodeData::Inner { size, .. } = &self[node] {
assert!(size > 0, "Root must have more than one sub-tree");
}
let mut done = SparseSet::new();
let mut todo = Vec::new();
// Todo-list entries are:
// 1. Optional LHS key which must be <= all node entries.
// 2. The node reference.
// 3. Optional RHS key which must be > all node entries.
todo.push((None, node, None));
while let Some((lkey, node, rkey)) = todo.pop() {
assert_eq!(
done.insert(node),
None,
"Node appears more than once in tree"
);
let mut lower = lkey;
match self[node] {
NodeData::Inner { size, keys, tree } => {
let size = size as usize;
let capacity = tree.len();
let keys = &keys[0..size];
// Verify occupancy.
// Right-most nodes can be small, but others must be at least half full.
assert!(
rkey.is_none() || (size + 1) * 2 >= capacity,
"Only {}/{} entries in {}:{}, upper={}",
size + 1,
capacity,
node,
self[node],
rkey.unwrap()
);
// Queue up the sub-trees, checking for duplicates.
for i in 0..size + 1 {
// Get an upper bound for node[i].
let upper = keys.get(i).cloned().or(rkey);
// Check that keys are strictly monotonic.
if let (Some(a), Some(b)) = (lower, upper) {
assert_eq!(
comp.cmp(a, b),
Ordering::Less,
"Key order {} < {} failed in {}: {}",
a,
b,
node,
self[node]
);
}
// Queue up the sub-tree.
todo.push((lower, tree[i], upper));
// Set a lower bound for the next tree.
lower = upper;
}
}
NodeData::Leaf { size, keys, .. } => {
let size = size as usize;
let capacity = keys.borrow().len();
let keys = &keys.borrow()[0..size];
// Verify occupancy.
// Right-most nodes can be small, but others must be at least half full.
assert!(size > 0, "Leaf {} is empty", node);
assert!(
rkey.is_none() || size * 2 >= capacity,
"Only {}/{} entries in {}:{}, upper={}",
size,
capacity,
node,
self[node],
rkey.unwrap()
);
for i in 0..size + 1 {
let upper = keys.get(i).cloned().or(rkey);
// Check that keys are strictly monotonic.
if let (Some(a), Some(b)) = (lower, upper) {
let wanted = if i == 0 {
Ordering::Equal
} else {
Ordering::Less
};
assert_eq!(
comp.cmp(a, b),
wanted,
"Key order for {} - {} failed in {}: {}",
a,
b,
node,
self[node]
);
}
// Set a lower bound for the next key.
lower = upper;
}
}
NodeData::Free { .. } => panic!("Free {} reached", node),
}
}
}
}
impl<F: Forest> Index<Node> for NodePool<F> {
type Output = NodeData<F>;
fn index(&self, index: Node) -> &Self::Output {
self.nodes.index(index)
}
}
impl<F: Forest> IndexMut<Node> for NodePool<F> {
fn index_mut(&mut self, index: Node) -> &mut Self::Output {
self.nodes.index_mut(index)
}
}
| 34.944954 | 98 | 0.419139 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.