hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
9c5f35ebd9e80b0af73757d5b9e5b0a1ed25d2d4 | 2,450 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::C0V {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R { bits: self.register.get() }
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct VALR {
bits: u16,
}
impl VALR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _VALW<'a> {
w: &'a mut W,
}
impl<'a> _VALW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:15 - Channel Value"]
#[inline]
pub fn val(&self) -> VALR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
VALR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:15 - Channel Value"]
#[inline]
pub fn val(&mut self) -> _VALW {
_VALW { w: self }
}
}
| 23.557692 | 59 | 0.493061 |
0196a10fd3fd5c12699ff458277296fce0b745e1 | 1,912 | #[doc = "Register `CACHE_STATE` reader"]
pub struct R(crate::R<CACHE_STATE_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<CACHE_STATE_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<CACHE_STATE_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<CACHE_STATE_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Field `ICACHE_STATE` reader - The bit is used to indicate whether icache main fsm is in idle state or not. 1: in idle state, 0: not in idle state"]
pub struct ICACHE_STATE_R(crate::FieldReader<u16, u16>);
impl ICACHE_STATE_R {
#[inline(always)]
pub(crate) fn new(bits: u16) -> Self {
ICACHE_STATE_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for ICACHE_STATE_R {
type Target = crate::FieldReader<u16, u16>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl R {
#[doc = "Bits 0:11 - The bit is used to indicate whether icache main fsm is in idle state or not. 1: in idle state, 0: not in idle state"]
#[inline(always)]
pub fn icache_state(&self) -> ICACHE_STATE_R {
ICACHE_STATE_R::new((self.bits & 0x0fff) as u16)
}
}
#[doc = "This description will be updated in the near future.\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [cache_state](index.html) module"]
pub struct CACHE_STATE_SPEC;
impl crate::RegisterSpec for CACHE_STATE_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [cache_state::R](R) reader structure"]
impl crate::Readable for CACHE_STATE_SPEC {
type Reader = R;
}
#[doc = "`reset()` method sets CACHE_STATE to value 0x01"]
impl crate::Resettable for CACHE_STATE_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0x01
}
}
| 35.407407 | 266 | 0.657427 |
d9ab4a3255f37a8dfa0ad59fba6ce8d3f75961b5 | 138 | use serde::Deserialize;
use super::SessionId;
#[derive(Deserialize, Debug)]
pub struct CreateSessionResponse {
pub id: SessionId,
}
| 15.333333 | 34 | 0.73913 |
76485322aa6b5facb82322bd9866fec3038752eb | 8,055 | // Copyleft 2016 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MSRCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHSOPHY IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//! Functionality shared by operations on private keys (ECC keygen and
//! ECDSA signing).
use {ec, error, rand};
use super::ops::*;
use super::verify_affine_point_is_on_the_curve;
use untrusted;
pub fn generate_private_key(ops: &PrivateKeyOps, rng: &rand::SecureRandom)
-> Result<ec::PrivateKey, error::Unspecified> {
// [NSA Suite B Implementer's Guide to ECDSA] Appendix A.1.2, and
// [NSA Suite B Implementer's Guide to NIST SP 800-56A] Appendix B.2,
// "Key Pair Generation by Testing Candidates".
//
// [NSA Suite B Implementer's Guide to ECDSA]: doc/ecdsa.pdf.
// [NSA Suite B Implementer's Guide to NIST SP 800-56A]: doc/ecdh.pdf.
// TODO: The NSA guide also suggests, in appendix B.1, another mechanism
// that would avoid the need to use `rng.fill()` more than once. It works
// by generating an extra 64 bits of random bytes and then reducing the
// output (mod n). Supposedly, this removes enough of the bias towards
// small values from the modular reduction, but it isn't obvious that it is
// sufficient. TODO: Figure out what we can do to mitigate the bias issue
// and switch to the other mechanism.
let num_limbs = ops.common.num_limbs;
// XXX: The value 100 was chosen to match OpenSSL due to uncertainty of
// what specific value would be better, but it seems bad to try 100 times.
for _ in 0..100 {
let mut candidate = [0; ec::SCALAR_MAX_BYTES];
{
// NSA Guide Steps 1, 2, and 3.
//
// Since we calculate the length ourselves, it is pointless to check
// it, since we can only check it by doing the same calculation.
let candidate = &mut candidate[..(num_limbs * LIMB_BYTES)];
// NSA Guide Step 4.
//
// The requirement that the random number generator has the
// requested security strength is delegated to `rng`.
rng.fill(candidate)?;
// NSA Guide Steps 5, 6, and 7.
if check_scalar_big_endian_bytes(ops, candidate).is_err() {
continue;
}
}
// NSA Guide Step 8 is done in `public_from_private()`.
// NSA Guide Step 9.
return Ok(ec::PrivateKey {
bytes: candidate,
});
}
Err(error::Unspecified)
}
// The underlying X25519 and Ed25519 code uses an [u8; 32] to store the private
// key. To make the ECDH and ECDSA code similar to that, we also store the
// private key that way, which means we have to convert it to a Scalar whenever
// we need to use it.
#[inline]
pub fn private_key_as_scalar(ops: &PrivateKeyOps,
private_key: &ec::PrivateKey) -> Scalar {
// This cannot fail because we know the private key is valid.
scalar_from_big_endian_bytes(
ops, &private_key.bytes[..(ops.common.num_limbs * LIMB_BYTES)]).unwrap()
}
pub fn check_scalar_big_endian_bytes(ops: &PrivateKeyOps, bytes: &[u8])
-> Result<(), error::Unspecified> {
debug_assert_eq!(bytes.len(), ops.common.num_limbs * LIMB_BYTES);
scalar_from_big_endian_bytes(ops, bytes).map(|_| ())
}
// Parses a fixed-length (zero-padded) big-endian-encoded scalar in the range
// [1, n). This is constant-time with respect to the actual value *only if* the
// value is actually in range. In other words, this won't leak anything about a
// valid value, but it might leak small amounts of information about an invalid
// value (which constraint it failed).
pub fn scalar_from_big_endian_bytes(ops: &PrivateKeyOps, bytes: &[u8])
-> Result<Scalar, error::Unspecified> {
// [NSA Suite B Implementer's Guide to ECDSA] Appendix A.1.2, and
// [NSA Suite B Implementer's Guide to NIST SP 800-56A] Appendix B.2,
// "Key Pair Generation by Testing Candidates".
//
// [NSA Suite B Implementer's Guide to ECDSA]: doc/ecdsa.pdf.
// [NSA Suite B Implementer's Guide to NIST SP 800-56A]: doc/ecdh.pdf.
//
// Steps 5, 6, and 7.
//
// XXX: The NSA guide says that we should verify that the random scalar is
// in the range [0, n - 1) and then add one to it so that it is in the range
// [1, n). Instead, we verify that the scalar is in the range [1, n). This
// way, we avoid needing to compute or store the value (n - 1), we avoid the
// need to implement a function to add one to a scalar, and we avoid needing
// to convert the scalar back into an array of bytes.
scalar_parse_big_endian_fixed_consttime(
ops.common, untrusted::Input::from(bytes))
}
pub fn public_from_private(ops: &PrivateKeyOps, public_out: &mut [u8],
my_private_key: &ec::PrivateKey)
-> Result<(), error::Unspecified> {
let elem_and_scalar_bytes = ops.common.num_limbs * LIMB_BYTES;
debug_assert_eq!(public_out.len(), 1 + (2 * elem_and_scalar_bytes));
let my_private_key = private_key_as_scalar(ops, my_private_key);
let my_public_key = ops.point_mul_base(&my_private_key);
public_out[0] = 4; // Uncompressed encoding.
let (x_out, y_out) =
(&mut public_out[1..]).split_at_mut(elem_and_scalar_bytes);
// `big_endian_affine_from_jacobian` verifies that the point is not at
// infinity and is on the curve.
big_endian_affine_from_jacobian(ops, Some(x_out), Some(y_out),
&my_public_key)
}
pub fn big_endian_affine_from_jacobian(ops: &PrivateKeyOps,
x_out: Option<&mut [u8]>,
y_out: Option<&mut [u8]>, p: &Point)
-> Result<(), error::Unspecified> {
let z = ops.common.point_z(p);
// Since we restrict our private key to the range [1, n), the curve has
// prime order, and we verify that the peer's point is on the curve,
// there's no way that the result can be at infinity. But, use `assert!`
// instead of `debug_assert!` anyway
assert!(ops.common.elem_verify_is_not_zero(&z).is_ok());
let x = ops.common.point_x(p);
let y = ops.common.point_y(p);
let zz_inv = ops.elem_inverse_squared(&z);
let x_aff = ops.common.elem_product(&x, &zz_inv);
// `y_aff` is needed to validate the point is on the curve. It is also
// needed in the non-ECDH case where we need to output it.
let y_aff = {
let zzzz_inv = ops.common.elem_squared(&zz_inv);
let zzz_inv = ops.common.elem_product(&z, &zzzz_inv);
ops.common.elem_product(&y, &zzz_inv)
};
// If we validated our inputs correctly and then computed (x, y, z), then
// (x, y, z) will be on the curve. See
// `verify_affine_point_is_on_the_curve_scaled` for the motivation.
verify_affine_point_is_on_the_curve(ops.common, (&x_aff, &y_aff))?;
let num_limbs = ops.common.num_limbs;
if let Some(x_out) = x_out {
let x = ops.common.elem_unencoded(&x_aff);
big_endian_from_limbs_padded(&x.limbs[..num_limbs], x_out);
}
if let Some(y_out) = y_out {
let y = ops.common.elem_unencoded(&y_aff);
big_endian_from_limbs_padded(&y.limbs[..num_limbs], y_out);
}
Ok(())
}
| 44.016393 | 80 | 0.650031 |
91f8735d4ef55a5959fdbaaa7b32f4f2bbf96615 | 22,066 | #[cfg(test)]
mod tests {
use amethyst::{
core::TransformBundle,
ecs::WorldExt,
renderer::{sprite::SpriteRender, types::DefaultBackend, RenderEmptyBundle},
Error,
};
use amethyst_test::AmethystApplication;
use approx::assert_relative_eq;
use asset_gfx_gen::{ColourSpriteSheetGen, ColourSpriteSheetGenData, ColourSpriteSheetParams};
#[test]
fn solid_returns_sprite_render() -> Result<(), Error> {
const RED: [f32; 4] = [1., 0.2, 0.1, 1.];
AmethystApplication::blank()
.with_bundle(TransformBundle::new())
.with_bundle_event_fn(|event_loop| RenderEmptyBundle::<DefaultBackend>::new(event_loop))
.with_effect(|world| {
let sprite_render = {
let colour_sprite_gen_data = world.system_data::<ColourSpriteSheetGenData>();
ColourSpriteSheetGen::solid(&colour_sprite_gen_data, RED)
};
world.insert(sprite_render);
})
.with_assertion(|world| {
let sprite_render = &*world.read_resource::<SpriteRender>();
let ColourSpriteSheetGenData {
texture_assets,
sprite_sheet_assets,
..
} = world.system_data::<ColourSpriteSheetGenData>();
assert_eq!(0, sprite_render.sprite_number);
let sprite_sheet = sprite_sheet_assets.get(&sprite_render.sprite_sheet);
assert!(sprite_sheet.is_some());
let sprite_sheet = sprite_sheet.expect("Expected `SpriteSheet` to exist.");
assert!(texture_assets.get(&sprite_sheet.texture).is_some());
})
.run_winit_loop()
}
#[test]
fn gradient_returns_sprite_render() -> Result<(), Error> {
const COLOUR_BEGIN: [f32; 4] = [1., 0., 0., 0.5];
const COLOUR_END: [f32; 4] = [0., 1., 0., 1.];
AmethystApplication::blank()
.with_bundle(TransformBundle::new())
.with_bundle_event_fn(|event_loop| RenderEmptyBundle::<DefaultBackend>::new(event_loop))
.with_effect(|world| {
let sprite_render = {
let colour_sprite_gen_data = world.system_data::<ColourSpriteSheetGenData>();
ColourSpriteSheetGen::gradient(
&colour_sprite_gen_data,
COLOUR_BEGIN,
COLOUR_END,
5,
)
};
world.insert(sprite_render);
})
.with_assertion(|world| {
let sprite_render = &*world.read_resource::<SpriteRender>();
let ColourSpriteSheetGenData {
texture_assets,
sprite_sheet_assets,
..
} = world.system_data::<ColourSpriteSheetGenData>();
assert_eq!(0, sprite_render.sprite_number);
let sprite_sheet = sprite_sheet_assets.get(&sprite_render.sprite_sheet);
assert!(sprite_sheet.is_some());
let sprite_sheet = sprite_sheet.expect("Expected `SpriteSheet` to exist.");
assert!(texture_assets.get(&sprite_sheet.texture).is_some());
})
.run_winit_loop()
}
#[test]
fn gradient_colours_generates_pixel_data_1x1_sprite_padded() {
let colour_sprite_sheet_params = ColourSpriteSheetParams {
sprite_w: 1,
sprite_h: 1,
padded: true,
row_count: 2,
column_count: 3,
};
let colour_begin = [1., 0.2, 0., 0.6];
let colour_end = [0.2, 1., 0., 1.];
let sprite_count = 5;
let (colours, _, _) = ColourSpriteSheetGen::gradient_colours(
colour_sprite_sheet_params,
colour_begin,
colour_end,
sprite_count,
);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[0]);
assert_relative_eq!([0.; 4][..], colours[1]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[2]);
assert_relative_eq!([0.; 4][..], colours[3]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[4]);
assert_relative_eq!([0.; 4][..], colours[5]);
// Padding row.
// row_length
// = (1 sprite_pixel + 1 padding_pixel) * column_count * 4 channels
// = 2 * 3 * 4
// = 24
// 1 padding pixel * row_length
assert_relative_eq!([0.; 4][..], colours[6]);
assert_relative_eq!([0.; 4][..], colours[7]);
assert_relative_eq!([0.; 4][..], colours[8]);
assert_relative_eq!([0.; 4][..], colours[9]);
assert_relative_eq!([0.; 4][..], colours[10]);
assert_relative_eq!([0.; 4][..], colours[11]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[12]);
assert_relative_eq!([0.; 4][..], colours[13]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[14]);
assert_relative_eq!([0.; 4][..], colours[15]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[16]);
assert_relative_eq!([0.; 4][..], colours[17]);
assert_relative_eq!([0.; 4][..], colours[18]);
assert_relative_eq!([0.; 4][..], colours[19]);
assert_relative_eq!([0.; 4][..], colours[20]);
assert_relative_eq!([0.; 4][..], colours[21]);
assert_relative_eq!([0.; 4][..], colours[22]);
assert_relative_eq!([0.; 4][..], colours[23]);
}
#[test]
fn gradient_colours_generates_pixel_data_2x1_sprite_padded() {
let colour_sprite_sheet_params = ColourSpriteSheetParams {
sprite_w: 2,
sprite_h: 1,
padded: true,
row_count: 2,
column_count: 3,
};
let colour_begin = [1., 0.2, 0., 0.6];
let colour_end = [0.2, 1., 0., 1.];
let sprite_count = 5;
let (colours, _, _) = ColourSpriteSheetGen::gradient_colours(
colour_sprite_sheet_params,
colour_begin,
colour_end,
sprite_count,
);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[0]);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[1]);
assert_relative_eq!([0.; 4][..], colours[2]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[3]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[4]);
assert_relative_eq!([0.; 4][..], colours[5]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[6]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[7]);
assert_relative_eq!([0.; 4][..], colours[8]);
// Padding row.
// row_length
// = (2 sprite_pixels + 1 padding_pixel) * column_count * 4 channels
// = 3 * 3 * 4
// = 36
// 1 padding pixel * row_length
assert_relative_eq!([0.; 4][..], colours[9]);
assert_relative_eq!([0.; 4][..], colours[10]);
assert_relative_eq!([0.; 4][..], colours[11]);
assert_relative_eq!([0.; 4][..], colours[12]);
assert_relative_eq!([0.; 4][..], colours[13]);
assert_relative_eq!([0.; 4][..], colours[14]);
assert_relative_eq!([0.; 4][..], colours[15]);
assert_relative_eq!([0.; 4][..], colours[16]);
assert_relative_eq!([0.; 4][..], colours[17]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[18]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[19]);
assert_relative_eq!([0.; 4][..], colours[20]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[21]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[22]);
assert_relative_eq!([0.; 4][..], colours[23]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[24]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[25]);
assert_relative_eq!([0.; 4][..], colours[26]);
assert_relative_eq!([0.; 4][..], colours[27]);
assert_relative_eq!([0.; 4][..], colours[28]);
assert_relative_eq!([0.; 4][..], colours[29]);
assert_relative_eq!([0.; 4][..], colours[30]);
assert_relative_eq!([0.; 4][..], colours[31]);
assert_relative_eq!([0.; 4][..], colours[32]);
assert_relative_eq!([0.; 4][..], colours[33]);
assert_relative_eq!([0.; 4][..], colours[34]);
assert_relative_eq!([0.; 4][..], colours[35]);
}
#[test]
fn gradient_colours_generates_pixel_data_1x2_sprite_padded() {
let colour_sprite_sheet_params = ColourSpriteSheetParams {
sprite_w: 1,
sprite_h: 2,
padded: true,
row_count: 2,
column_count: 3,
};
let colour_begin = [1., 0.2, 0., 0.6];
let colour_end = [0.2, 1., 0., 1.];
let sprite_count = 5;
let (colours, _, _) = ColourSpriteSheetGen::gradient_colours(
colour_sprite_sheet_params,
colour_begin,
colour_end,
sprite_count,
);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[0]);
assert_relative_eq!([0.; 4][..], colours[1]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[2]);
assert_relative_eq!([0.; 4][..], colours[3]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[4]);
assert_relative_eq!([0.; 4][..], colours[5]);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[6]);
assert_relative_eq!([0.; 4][..], colours[7]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[8]);
assert_relative_eq!([0.; 4][..], colours[9]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[10]);
assert_relative_eq!([0.; 4][..], colours[11]);
// Padding row.
// row_length
// = (1 sprite_pixel + 1 padding_pixel) * column_count
// = 2 * 3
// = 6
// 1 padding pixel * row_length
assert_relative_eq!([0.; 4][..], colours[12]);
assert_relative_eq!([0.; 4][..], colours[13]);
assert_relative_eq!([0.; 4][..], colours[14]);
assert_relative_eq!([0.; 4][..], colours[15]);
assert_relative_eq!([0.; 4][..], colours[16]);
assert_relative_eq!([0.; 4][..], colours[17]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[18]);
assert_relative_eq!([0.; 4][..], colours[19]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[20]);
assert_relative_eq!([0.; 4][..], colours[21]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[22]);
assert_relative_eq!([0.; 4][..], colours[23]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[24]);
assert_relative_eq!([0.; 4][..], colours[25]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[26]);
assert_relative_eq!([0.; 4][..], colours[27]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[28]);
assert_relative_eq!([0.; 4][..], colours[29]);
assert_relative_eq!([0.; 4][..], colours[30]);
assert_relative_eq!([0.; 4][..], colours[31]);
assert_relative_eq!([0.; 4][..], colours[32]);
assert_relative_eq!([0.; 4][..], colours[33]);
assert_relative_eq!([0.; 4][..], colours[34]);
assert_relative_eq!([0.; 4][..], colours[35]);
}
#[test]
fn gradient_colours_generates_pixel_data_2x2_sprite_padded() {
let colour_sprite_sheet_params = ColourSpriteSheetParams {
sprite_w: 2,
sprite_h: 2,
padded: true,
row_count: 2,
column_count: 3,
};
let colour_begin = [1., 0.2, 0., 0.6];
let colour_end = [0.2, 1., 0., 1.];
let sprite_count = 5;
let (colours, _, _) = ColourSpriteSheetGen::gradient_colours(
colour_sprite_sheet_params,
colour_begin,
colour_end,
sprite_count,
);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[0]);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[1]);
assert_relative_eq!([0.; 4][..], colours[2]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[3]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[4]);
assert_relative_eq!([0.; 4][..], colours[5]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[6]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[7]);
assert_relative_eq!([0.; 4][..], colours[8]);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[9]);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[10]);
assert_relative_eq!([0.; 4][..], colours[11]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[12]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[13]);
assert_relative_eq!([0.; 4][..], colours[14]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[15]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[16]);
assert_relative_eq!([0.; 4][..], colours[17]);
// Padding row.
// row_length
// = (2 sprite_pixels + 1 padding_pixel) * column_count * 4 channels
// = 3 * 3 * 4
// = 36
// 1 padding pixel * row_length
assert_relative_eq!([0.; 4][..], colours[18]);
assert_relative_eq!([0.; 4][..], colours[19]);
assert_relative_eq!([0.; 4][..], colours[20]);
assert_relative_eq!([0.; 4][..], colours[21]);
assert_relative_eq!([0.; 4][..], colours[22]);
assert_relative_eq!([0.; 4][..], colours[23]);
assert_relative_eq!([0.; 4][..], colours[24]);
assert_relative_eq!([0.; 4][..], colours[25]);
assert_relative_eq!([0.; 4][..], colours[26]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[27]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[28]);
assert_relative_eq!([0.; 4][..], colours[29]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[30]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[31]);
assert_relative_eq!([0.; 4][..], colours[32]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[33]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[34]);
assert_relative_eq!([0.; 4][..], colours[35]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[36]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[37]);
assert_relative_eq!([0.; 4][..], colours[38]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[39]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[40]);
assert_relative_eq!([0.; 4][..], colours[41]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[42]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[43]);
assert_relative_eq!([0.; 4][..], colours[44]);
assert_relative_eq!([0.; 4][..], colours[45]);
assert_relative_eq!([0.; 4][..], colours[46]);
assert_relative_eq!([0.; 4][..], colours[47]);
assert_relative_eq!([0.; 4][..], colours[48]);
assert_relative_eq!([0.; 4][..], colours[49]);
assert_relative_eq!([0.; 4][..], colours[50]);
assert_relative_eq!([0.; 4][..], colours[51]);
assert_relative_eq!([0.; 4][..], colours[52]);
assert_relative_eq!([0.; 4][..], colours[53]);
}
#[test]
fn gradient_colours_generates_pixel_data_1x1_sprite_unpadded() {
let colour_sprite_sheet_params = ColourSpriteSheetParams {
sprite_w: 1,
sprite_h: 1,
padded: false,
row_count: 2,
column_count: 3,
};
let colour_begin = [1., 0.2, 0., 0.6];
let colour_end = [0.2, 1., 0., 1.];
let sprite_count = 5;
let (colours, _, _) = ColourSpriteSheetGen::gradient_colours(
colour_sprite_sheet_params,
colour_begin,
colour_end,
sprite_count,
);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[0]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[1]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[2]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[3]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[4]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[5]);
}
#[test]
fn gradient_colours_generates_pixel_data_2x1_sprite_unpadded() {
let colour_sprite_sheet_params = ColourSpriteSheetParams {
sprite_w: 2,
sprite_h: 1,
padded: false,
row_count: 2,
column_count: 3,
};
let colour_begin = [1., 0.2, 0., 0.6];
let colour_end = [0.2, 1., 0., 1.];
let sprite_count = 5;
let (colours, _, _) = ColourSpriteSheetGen::gradient_colours(
colour_sprite_sheet_params,
colour_begin,
colour_end,
sprite_count,
);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[0]);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[1]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[2]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[3]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[4]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[5]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[6]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[7]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[8]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[9]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[10]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[11]);
}
#[test]
fn gradient_colours_generates_pixel_data_1x2_sprite_unpadded() {
let colour_sprite_sheet_params = ColourSpriteSheetParams {
sprite_w: 1,
sprite_h: 2,
padded: false,
row_count: 2,
column_count: 3,
};
let colour_begin = [1., 0.2, 0., 0.6];
let colour_end = [0.2, 1., 0., 1.];
let sprite_count = 5;
let (colours, _, _) = ColourSpriteSheetGen::gradient_colours(
colour_sprite_sheet_params,
colour_begin,
colour_end,
sprite_count,
);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[0]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[1]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[2]);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[3]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[4]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[5]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[6]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[7]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[8]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[9]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[10]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[11]);
}
#[test]
fn gradient_colours_generates_pixel_data_2x2_sprite_unpadded() {
let colour_sprite_sheet_params = ColourSpriteSheetParams {
sprite_w: 2,
sprite_h: 2,
padded: false,
row_count: 2,
column_count: 3,
};
let colour_begin = [1., 0.2, 0., 0.6];
let colour_end = [0.2, 1., 0., 1.];
let sprite_count = 5;
let (colours, _, _) = ColourSpriteSheetGen::gradient_colours(
colour_sprite_sheet_params,
colour_begin,
colour_end,
sprite_count,
);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[0]);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[1]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[2]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[3]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[4]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[5]);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[6]);
assert_relative_eq!([1.0, 0.2, 0.0, 0.6][..], colours[7]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[8]);
assert_relative_eq!([0.8, 0.4, 0.0, 0.7][..], colours[9]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[10]);
assert_relative_eq!([0.6, 0.6, 0.0, 0.8][..], colours[11]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[12]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[13]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[14]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[15]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[16]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[17]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[18]);
assert_relative_eq!([0.4, 0.8, 0.0, 0.9][..], colours[19]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[20]);
assert_relative_eq!([0.2, 1.0, 0.0, 1.0][..], colours[21]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[22]);
assert_relative_eq!([0.0, 0.0, 0.0, 0.0][..], colours[23]);
}
#[test]
fn channel_steps_calculates_step_correctly() {
let sprite_count = 6;
let colour_begin = [1., 0., 0., 0.5];
let colour_end = [0., 1., 0., 1.];
assert_eq!(
[-0.2, 0.2, 0., 0.1],
ColourSpriteSheetGen::channel_steps(sprite_count, colour_begin, colour_end,)
)
}
}
| 41.870968 | 100 | 0.522841 |
18ca284811080625e20397f26a8a8e3bf52fa0f6 | 13,889 | //! This module contains the machinery necessary to detect infinite loops
//! during const-evaluation by taking snapshots of the state of the interpreter
//! at regular intervals.
// This lives in `interpret` because it needs access to all sots of private state. However,
// it is not used by the general miri engine, just by CTFE.
use std::hash::{Hash, Hasher};
use std::mem;
use rustc::ich::{StableHashingContext, StableHashingContextProvider};
use rustc::mir;
use rustc::mir::interpret::{
AllocId, Pointer, Scalar, ScalarMaybeUndef,
Relocations, Allocation, UndefMask,
EvalResult, EvalErrorKind,
};
use rustc::ty::{self, TyCtxt};
use rustc::ty::layout::Align;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult};
use syntax::ast::Mutability;
use syntax::source_map::Span;
use super::eval_context::{LocalValue, StackPopCleanup};
use super::{Frame, Memory, Operand, MemPlace, Place, Value};
use const_eval::CompileTimeInterpreter;
#[derive(Default)]
pub(crate) struct InfiniteLoopDetector<'a, 'mir, 'tcx: 'a + 'mir> {
/// The set of all `EvalSnapshot` *hashes* observed by this detector.
///
/// When a collision occurs in this table, we store the full snapshot in
/// `snapshots`.
hashes: FxHashSet<u64>,
/// The set of all `EvalSnapshot`s observed by this detector.
///
/// An `EvalSnapshot` will only be fully cloned once it has caused a
/// collision in `hashes`. As a result, the detector must observe at least
/// *two* full cycles of an infinite loop before it triggers.
snapshots: FxHashSet<EvalSnapshot<'a, 'mir, 'tcx>>,
}
impl<'a, 'mir, 'tcx> InfiniteLoopDetector<'a, 'mir, 'tcx>
{
pub fn observe_and_analyze<'b>(
&mut self,
tcx: &TyCtxt<'b, 'tcx, 'tcx>,
span: Span,
memory: &Memory<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>,
stack: &[Frame<'mir, 'tcx>],
) -> EvalResult<'tcx, ()> {
// Compute stack's hash before copying anything
let mut hcx = tcx.get_stable_hashing_context();
let mut hasher = StableHasher::<u64>::new();
stack.hash_stable(&mut hcx, &mut hasher);
let hash = hasher.finish();
// Check if we know that hash already
if self.hashes.is_empty() {
// FIXME(#49980): make this warning a lint
tcx.sess.span_warn(span,
"Constant evaluating a complex constant, this might take some time");
}
if self.hashes.insert(hash) {
// No collision
return Ok(())
}
// We need to make a full copy. NOW things that to get really expensive.
info!("snapshotting the state of the interpreter");
if self.snapshots.insert(EvalSnapshot::new(memory, stack)) {
// Spurious collision or first cycle
return Ok(())
}
// Second cycle
Err(EvalErrorKind::InfiniteLoop.into())
}
}
trait SnapshotContext<'a> {
fn resolve(&'a self, id: &AllocId) -> Option<&'a Allocation>;
}
/// Taking a snapshot of the evaluation context produces a view of
/// the state of the interpreter that is invariant to `AllocId`s.
trait Snapshot<'a, Ctx: SnapshotContext<'a>> {
type Item;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item;
}
macro_rules! __impl_snapshot_field {
($field:ident, $ctx:expr) => ($field.snapshot($ctx));
($field:ident, $ctx:expr, $delegate:expr) => ($delegate);
}
macro_rules! impl_snapshot_for {
// FIXME(mark-i-m): Some of these should be `?` rather than `*`.
(enum $enum_name:ident {
$( $variant:ident $( ( $($field:ident $(-> $delegate:expr)*),* ) )* ),* $(,)*
}) => {
impl<'a, Ctx> self::Snapshot<'a, Ctx> for $enum_name
where Ctx: self::SnapshotContext<'a>,
{
type Item = $enum_name<AllocIdSnapshot<'a>>;
#[inline]
fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item {
match *self {
$(
$enum_name::$variant $( ( $(ref $field),* ) )* =>
$enum_name::$variant $(
( $( __impl_snapshot_field!($field, __ctx $(, $delegate)*) ),* ),
)*
)*
}
}
}
};
// FIXME(mark-i-m): same here.
(struct $struct_name:ident { $($field:ident $(-> $delegate:expr)*),* $(,)* }) => {
impl<'a, Ctx> self::Snapshot<'a, Ctx> for $struct_name
where Ctx: self::SnapshotContext<'a>,
{
type Item = $struct_name<AllocIdSnapshot<'a>>;
#[inline]
fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item {
let $struct_name {
$(ref $field),*
} = *self;
$struct_name {
$( $field: __impl_snapshot_field!($field, __ctx $(, $delegate)*) ),*
}
}
}
};
}
impl<'a, Ctx, T> Snapshot<'a, Ctx> for Option<T>
where Ctx: SnapshotContext<'a>,
T: Snapshot<'a, Ctx>
{
type Item = Option<<T as Snapshot<'a, Ctx>>::Item>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
match self {
Some(x) => Some(x.snapshot(ctx)),
None => None,
}
}
}
#[derive(Eq, PartialEq)]
struct AllocIdSnapshot<'a>(Option<AllocationSnapshot<'a>>);
impl<'a, Ctx> Snapshot<'a, Ctx> for AllocId
where Ctx: SnapshotContext<'a>,
{
type Item = AllocIdSnapshot<'a>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
AllocIdSnapshot(ctx.resolve(self).map(|alloc| alloc.snapshot(ctx)))
}
}
impl_snapshot_for!(struct Pointer {
alloc_id,
offset -> *offset, // just copy offset verbatim
});
impl<'a, Ctx> Snapshot<'a, Ctx> for Scalar
where Ctx: SnapshotContext<'a>,
{
type Item = Scalar<AllocIdSnapshot<'a>>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
match self {
Scalar::Ptr(p) => Scalar::Ptr(p.snapshot(ctx)),
Scalar::Bits{ size, bits } => Scalar::Bits {
size: *size,
bits: *bits,
},
}
}
}
impl_snapshot_for!(enum ScalarMaybeUndef {
Scalar(s),
Undef,
});
impl_stable_hash_for!(struct ::interpret::MemPlace {
ptr,
align,
extra,
});
impl_snapshot_for!(struct MemPlace {
ptr,
extra,
align -> *align, // just copy alignment verbatim
});
// Can't use the macro here because that does not support named enum fields.
impl<'a> HashStable<StableHashingContext<'a>> for Place {
fn hash_stable<W: StableHasherResult>(
&self, hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>)
{
mem::discriminant(self).hash_stable(hcx, hasher);
match self {
Place::Ptr(mem_place) => mem_place.hash_stable(hcx, hasher),
Place::Local { frame, local } => {
frame.hash_stable(hcx, hasher);
local.hash_stable(hcx, hasher);
},
}
}
}
impl<'a, Ctx> Snapshot<'a, Ctx> for Place
where Ctx: SnapshotContext<'a>,
{
type Item = Place<AllocIdSnapshot<'a>>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
match self {
Place::Ptr(p) => Place::Ptr(p.snapshot(ctx)),
Place::Local{ frame, local } => Place::Local{
frame: *frame,
local: *local,
},
}
}
}
impl_stable_hash_for!(enum ::interpret::Value {
Scalar(x),
ScalarPair(x, y),
});
impl_snapshot_for!(enum Value {
Scalar(s),
ScalarPair(s, t),
});
impl_stable_hash_for!(enum ::interpret::Operand {
Immediate(x),
Indirect(x),
});
impl_snapshot_for!(enum Operand {
Immediate(v),
Indirect(m),
});
impl_stable_hash_for!(enum ::interpret::LocalValue {
Dead,
Live(x),
});
impl_snapshot_for!(enum LocalValue {
Live(v),
Dead,
});
impl<'a, Ctx> Snapshot<'a, Ctx> for Relocations
where Ctx: SnapshotContext<'a>,
{
type Item = Relocations<AllocIdSnapshot<'a>>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
Relocations::from_presorted(self.iter()
.map(|(size, id)| (*size, id.snapshot(ctx)))
.collect())
}
}
#[derive(Eq, PartialEq)]
struct AllocationSnapshot<'a> {
bytes: &'a [u8],
relocations: Relocations<AllocIdSnapshot<'a>>,
undef_mask: &'a UndefMask,
align: &'a Align,
mutability: &'a Mutability,
}
impl<'a, Ctx> Snapshot<'a, Ctx> for &'a Allocation
where Ctx: SnapshotContext<'a>,
{
type Item = AllocationSnapshot<'a>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
let Allocation { bytes, relocations, undef_mask, align, mutability } = self;
AllocationSnapshot {
bytes,
undef_mask,
align,
mutability,
relocations: relocations.snapshot(ctx),
}
}
}
// Can't use the macro here because that does not support named enum fields.
impl<'a> HashStable<StableHashingContext<'a>> for StackPopCleanup {
fn hash_stable<W: StableHasherResult>(
&self,
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>)
{
mem::discriminant(self).hash_stable(hcx, hasher);
match self {
StackPopCleanup::Goto(ref block) => block.hash_stable(hcx, hasher),
StackPopCleanup::None { cleanup } => cleanup.hash_stable(hcx, hasher),
}
}
}
#[derive(Eq, PartialEq)]
struct FrameSnapshot<'a, 'tcx: 'a> {
instance: &'a ty::Instance<'tcx>,
span: &'a Span,
return_to_block: &'a StackPopCleanup,
return_place: Place<AllocIdSnapshot<'a>>,
locals: IndexVec<mir::Local, LocalValue<AllocIdSnapshot<'a>>>,
block: &'a mir::BasicBlock,
stmt: usize,
}
// Not using the macro because that does not support types depending on two lifetimes
impl<'a, 'mir, 'tcx: 'mir> HashStable<StableHashingContext<'a>> for Frame<'mir, 'tcx> {
fn hash_stable<W: StableHasherResult>(
&self,
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>) {
let Frame {
mir,
instance,
span,
return_to_block,
return_place,
locals,
block,
stmt,
} = self;
(mir, instance, span, return_to_block).hash_stable(hcx, hasher);
(return_place, locals, block, stmt).hash_stable(hcx, hasher);
}
}
impl<'a, 'mir, 'tcx, Ctx> Snapshot<'a, Ctx> for &'a Frame<'mir, 'tcx>
where Ctx: SnapshotContext<'a>,
{
type Item = FrameSnapshot<'a, 'tcx>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
let Frame {
mir: _,
instance,
span,
return_to_block,
return_place,
locals,
block,
stmt,
} = self;
FrameSnapshot {
instance,
span,
return_to_block,
block,
stmt: *stmt,
return_place: return_place.snapshot(ctx),
locals: locals.iter().map(|local| local.snapshot(ctx)).collect(),
}
}
}
impl<'a, 'b, 'mir, 'tcx: 'a+'mir> SnapshotContext<'b>
for Memory<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>
{
fn resolve(&'b self, id: &AllocId) -> Option<&'b Allocation> {
self.get(*id).ok()
}
}
/// The virtual machine state during const-evaluation at a given point in time.
/// We assume the `CompileTimeInterpreter` has no interesting extra state that
/// is worth considering here.
struct EvalSnapshot<'a, 'mir, 'tcx: 'a + 'mir> {
memory: Memory<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>,
stack: Vec<Frame<'mir, 'tcx>>,
}
impl<'a, 'mir, 'tcx: 'a + 'mir> EvalSnapshot<'a, 'mir, 'tcx>
{
fn new(
memory: &Memory<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>,
stack: &[Frame<'mir, 'tcx>]
) -> Self {
EvalSnapshot {
memory: memory.clone(),
stack: stack.into(),
}
}
// Used to compare two snapshots
fn snapshot(&'b self)
-> Vec<FrameSnapshot<'b, 'tcx>>
{
// Start with the stack, iterate and recursively snapshot
self.stack.iter().map(|frame| frame.snapshot(&self.memory)).collect()
}
}
impl<'a, 'mir, 'tcx> Hash for EvalSnapshot<'a, 'mir, 'tcx>
{
fn hash<H: Hasher>(&self, state: &mut H) {
// Implement in terms of hash stable, so that k1 == k2 -> hash(k1) == hash(k2)
let mut hcx = self.memory.tcx.get_stable_hashing_context();
let mut hasher = StableHasher::<u64>::new();
self.hash_stable(&mut hcx, &mut hasher);
hasher.finish().hash(state)
}
}
// Not using the macro because we need special handling for `memory`, which the macro
// does not support at the same time as the extra bounds on the type.
impl<'a, 'b, 'mir, 'tcx> HashStable<StableHashingContext<'b>>
for EvalSnapshot<'a, 'mir, 'tcx>
{
fn hash_stable<W: StableHasherResult>(
&self,
hcx: &mut StableHashingContext<'b>,
hasher: &mut StableHasher<W>)
{
// Not hashing memory: Avoid hashing memory all the time during execution
let EvalSnapshot{ memory: _, stack } = self;
stack.hash_stable(hcx, hasher);
}
}
impl<'a, 'mir, 'tcx> Eq for EvalSnapshot<'a, 'mir, 'tcx>
{}
impl<'a, 'mir, 'tcx> PartialEq for EvalSnapshot<'a, 'mir, 'tcx>
{
fn eq(&self, other: &Self) -> bool {
// FIXME: This looks to be a *ridicolously expensive* comparison operation.
// Doesn't this make tons of copies? Either `snapshot` is very badly named,
// or it does!
self.snapshot() == other.snapshot()
}
}
| 29.804721 | 97 | 0.578731 |
5b8cbb325b76fa540d1d2a4dd34c8ec4e5f3a334 | 29,378 | use crate::bytecode::Instruction;
use crate::vm::*;
use crate::*;
pub struct VM {
stack: Stack<Arc<Object>>,
call_stack: CallStack,
program: Vec<Instruction>,
pc: usize,
classes: HashMap<u64, Arc<Class>>,
methods: HashMap<u64, Arc<Method>>,
variables: HashMap<u64, Arc<Variable>>,
globals: HashMap<u64, Arc<Object>>,
declaring_class: u64,
constant_holder: Vec<Arc<Object>>,
}
impl VM {
pub fn new() -> VM {
VM {
stack: Stack::new(),
call_stack: CallStack::new(),
program: Vec::new(),
pc: 0,
classes: HashMap::new(),
methods: HashMap::new(),
variables: HashMap::new(),
globals: HashMap::new(),
declaring_class: 0,
constant_holder: vec![],
}
}
pub fn panic<T>(&mut self, message: String) -> VMResult<T> {
VMResult::Panic(message, self.call_stack.detach())
}
#[inline]
fn log_stack(&self) {
#[cfg(debug_assertions)]
log::info!("{:?}", self.stack);
}
pub fn stack(&self) -> &Stack<Arc<Object>> {
&self.stack
}
pub fn print_stack(&self) {
println!("{:?}", self.stack);
}
#[inline]
fn raw_class_ptr(&mut self, address: u64) -> VMResult<*const Class> {
VMResult::Ok(
expect!(
self,
self.classes.get(&address),
"no class found at {:X}",
address
)
.as_ref() as *const _,
)
}
#[inline]
fn load_object(&mut self, object: Arc<Object>) {
self.push(object);
self.pc += 1;
}
fn do_eval<M: Runtime>(&mut self) -> VMResult<()> {
loop {
match self.program[self.pc] {
Instruction::Noop => {
self.pc += 1;
}
Instruction::Halt => {
break;
}
Instruction::Panic => {
return VMResult::Panic(
format!(
"{:?}",
self.stack
.pop()
.as_ref()
.map(ToString::to_string)
.unwrap_or(String::new())
),
self.call_stack.detach(),
)
}
Instruction::DumpStack => {
self.print_stack();
self.pc += 1;
}
Instruction::DeclareClass(ref name) => {
let class = Class::new(name.clone(), self.pc);
let class_id = self.pc as u64;
self.classes.insert(class_id, class);
self.declaring_class = class_id;
self.pc += 1;
}
Instruction::DeclareVariable(ref name, id, getter_id, setter_id) => {
self.pc += 1;
let variable = Arc::new(Variable {
name: name.clone(),
id,
getter_id,
setter_id,
});
self.variables.insert(id, variable);
}
Instruction::UseVariable(id) => {
let variable =
expect!(self, self.variables.get(&id), "cannot use unknown variable");
let class = expect!(
self,
self.classes.get_mut(&self.declaring_class),
"variable outside class"
);
let class = expect!(self, Arc::get_mut(class), "class in use");
class.variables.insert(id, variable.clone());
class
.variable_setters
.insert(variable.setter_id, variable.clone());
class
.variable_getters
.insert(variable.getter_id, variable.clone());
self.pc += 1;
}
Instruction::DeclareMethod(ref name, offset) => {
let method = Arc::new(Method {
name: name.clone(),
offset: offset as usize,
});
self.methods.insert(offset, method);
self.pc += 1;
}
Instruction::UseMethod(offset) => {
let method =
expect!(self, self.methods.get(&offset), "cannot use unknown method");
let class = expect!(
self,
self.classes.get_mut(&self.declaring_class),
"method outside class"
);
let class = expect!(self, Arc::get_mut(class), "class in use");
class.methods.insert(offset, method.clone());
self.pc += 1;
}
Instruction::OverrideMethod(source_offset, target_offset) => {
let method = expect!(
self,
self.methods.get(&target_offset),
"cannot use unknown method"
);
let class = expect!(
self,
self.classes.get_mut(&self.declaring_class),
"method outside class"
);
let class = expect!(self, Arc::get_mut(class), "class in use");
class.methods.insert(source_offset, method.clone());
self.pc += 1;
}
Instruction::LoadObject(offset) => {
let class = expect!(self, self.classes.get(&offset), "unknown class");
let object = Object::new(class);
self.push(object);
self.pc += 1;
}
// TODO: Optimize this so Instruction doesn't have to be cloned
ref i @ Instruction::CallMethod(_, _, _, _) => {
if let Instruction::CallMethod(ref offset, ref uri, line, character) = i.clone()
{
let top = expect!(self, self.stack.top(), "empty stack").clone();
let receiver = match self.eval_lazy::<M>(top) {
None => continue,
Some(r) => r,
};
let class = expect!(
self,
&receiver.class,
"cannot call method on object without class"
);
if let Some(var) = class.variable_getters.get(offset) {
let receiver = unwrap!(self, self.pop_eval::<M>());
let value = expect!(
self,
receiver.get_variable(var),
"{} has no value in variable {}",
receiver,
var.name
);
self.push(value);
self.pc += 1;
continue;
}
if let Some(var) = class.variable_setters.get(offset) {
let receiver = unwrap!(self, self.pop());
let value = unwrap!(self, self.pop());
self.push(receiver.set_variable(var, value));
self.pc += 1;
continue;
}
let method = expect!(
self,
class.methods.get(offset),
"message #{} not understood by {}",
self.methods
.get(offset)
.map(|m| &m.name)
.unwrap_or(&"?".into()),
receiver
)
.clone();
let return_address = self.pc + 1;
self.pc = method.offset;
self.call_stack.push(
receiver,
method,
return_address,
SourceCodeLocation(uri.clone(), line, character),
);
}
}
Instruction::CallNative(ref method) => {
let method = method.clone();
unwrap!(self, M::call(self, method));
self.pc += 1;
}
Instruction::LoadLocal(index) => {
let local = expect!(
self,
self.stack.at(index as usize),
"not enough locals on the stack"
)
.clone();
self.push(local);
self.pc += 1;
}
Instruction::DropLocal(index) => {
self.stack.drop(index as usize);
self.pc += 1;
}
Instruction::StoreGlobal(offset) => {
self.globals.insert(
offset,
expect!(self, self.stack.pop(), "nothing on stack to store"),
);
self.pc += 1;
}
Instruction::LoadGlobal(offset) => {
self.push(expect!(self, self.globals.get(&offset), "global not found").clone());
self.pc += 1;
}
Instruction::LoadLazy(arity, offset) => {
let mut dependencies = vec![];
for _ in 0..arity {
dependencies.push(unwrap!(self, self.pop()));
}
self.push(Object::lazy(offset, self.call_stack.clone(), dependencies));
self.pc += 1;
}
Instruction::Return(arity) => {
let result = unwrap!(self, self.pop());
for _ in 0..arity {
unwrap!(self, self.pop());
}
self.push(result);
self.pc = expect!(self, self.call_stack.ret(), "empty call stack");
}
Instruction::ReturnLazy(arity) => {
let result = unwrap!(self, self.pop());
for _ in 0..arity {
unwrap!(self, self.pop());
}
self.push(result);
break;
}
Instruction::MarkClassTrue(id) => unsafe {
let true_class = expect!(self, self.classes.get(&id), "True not loaded");
self.constant_holder.push(Object::new(true_class));
TRUE = self.constant_holder.last().unwrap() as *const _;
self.pc += 1;
},
Instruction::MarkClassFalse(id) => unsafe {
let false_class = expect!(self, self.classes.get(&id), "False not loaded");
self.constant_holder.push(Object::new(false_class));
FALSE = self.constant_holder.last().unwrap() as *const _;
self.pc += 1;
},
Instruction::MarkClassString(id) => unsafe {
STRING_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassCharacter(id) => unsafe {
CHARACTER_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassSymbol(id) => unsafe {
SYMBOL_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassU8(id) => unsafe {
U8_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassU16(id) => unsafe {
U16_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassU32(id) => unsafe {
U32_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassU64(id) => unsafe {
U64_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassU128(id) => unsafe {
U128_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassUBig(id) => unsafe {
UBIG_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassI8(id) => unsafe {
I8_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassI16(id) => unsafe {
I16_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassI32(id) => unsafe {
I32_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassI64(id) => unsafe {
I64_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassI128(id) => unsafe {
I128_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassIBig(id) => unsafe {
IBIG_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassF32(id) => unsafe {
F32_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassF64(id) => unsafe {
F64_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::MarkClassFBig(id) => unsafe {
FBIG_CLASS = unwrap!(self, self.raw_class_ptr(id));
self.pc += 1;
},
Instruction::LoadConstString(ref value) => {
let value = value.clone();
self.load_object(Object::box_string(value))
}
Instruction::LoadConstCharacter(ref value) => {
let value = value.clone();
self.load_object(Object::box_character(value))
}
Instruction::LoadConstSymbol(ref value) => {
let value = value.clone();
self.load_object(Object::box_symbol(value))
}
Instruction::LoadConstU8(ref value) => {
let value = value.clone();
self.load_object(Object::box_u8(value))
}
Instruction::LoadConstU16(ref value) => {
let value = value.clone();
self.load_object(Object::box_u16(value))
}
Instruction::LoadConstU32(ref value) => {
let value = value.clone();
self.load_object(Object::box_u32(value))
}
Instruction::LoadConstU64(ref value) => {
let value = value.clone();
self.load_object(Object::box_u64(value))
}
Instruction::LoadConstU128(ref value) => {
let value = value.clone();
self.load_object(Object::box_u128(value))
}
Instruction::LoadConstUBig(ref value) => {
let value = value.clone();
self.load_object(Object::box_ubig(value))
}
Instruction::LoadConstI8(ref value) => {
let value = value.clone();
self.load_object(Object::box_i8(value))
}
Instruction::LoadConstI16(ref value) => {
let value = value.clone();
self.load_object(Object::box_i16(value))
}
Instruction::LoadConstI32(ref value) => {
let value = value.clone();
self.load_object(Object::box_i32(value))
}
Instruction::LoadConstI64(ref value) => {
let value = value.clone();
self.load_object(Object::box_i64(value))
}
Instruction::LoadConstI128(ref value) => {
let value = value.clone();
self.load_object(Object::box_i128(value))
}
Instruction::LoadConstIBig(ref value) => {
let value = value.clone();
self.load_object(Object::box_ibig(value))
}
Instruction::LoadConstF32(ref value) => {
let value = value.clone();
self.load_object(Object::box_f32(value))
}
Instruction::LoadConstF64(ref value) => {
let value = value.clone();
self.load_object(Object::box_f64(value))
}
Instruction::LoadConstFBig(ref value) => {
let value = value.clone();
self.load_object(Object::box_fbig(value))
}
}
}
VMResult::Ok(())
}
#[inline]
fn eval_lazy<M: Runtime>(&mut self, mut object: Arc<Object>) -> Option<Arc<Object>> {
loop {
match object.const_value {
ConstValue::Lazy(offset, ref call_stack, ref dependencies) => {
for dep in dependencies.iter().cloned().rev() {
self.push(dep);
}
let return_offset = self.pc;
let call_stack = std::mem::replace(&mut self.call_stack, call_stack.clone());
self.pc = offset as usize;
self.do_eval::<M>().report::<M>()?;
let result = self.pop().report::<M>()?;
self.pc = return_offset;
self.call_stack = call_stack;
object = result;
}
_ => return Some(object),
}
}
}
fn eval_catch<M: Runtime>(&mut self, instructions: Vec<Instruction>) -> bool {
self.pc = self.program.len();
self.program.extend(instructions);
self.do_eval::<M>().report::<M>().is_none()
}
pub fn eval<M: Runtime>(&mut self, instructions: Vec<Instruction>) {
self.eval_catch::<M>(instructions);
}
pub fn eval_pop<M: Runtime>(&mut self, instructions: Vec<Instruction>) -> Option<Arc<Object>> {
if self.eval_catch::<M>(instructions) {
None
} else {
let result = self.stack.pop();
if self.stack.size() > 0 {
self.log_stack()
}
self.eval_lazy::<M>(result?)
}
}
pub fn pop(&mut self) -> VMResult<Arc<Object>> {
VMResult::Ok(expect!(self, self.stack.pop(), "tried to pop empty stack"))
}
pub fn pop_eval<M: Runtime>(&mut self) -> VMResult<Arc<Object>> {
let o = unwrap!(self, self.pop());
let o = expect!(self, self.eval_lazy::<M>(o), "failed to eval lazy");
VMResult::Ok(o)
}
#[inline]
pub fn top(&mut self) -> VMResult<&Arc<Object>> {
VMResult::Ok(expect!(
self,
self.stack.top(),
"tried to peek at top of empty stack"
))
}
pub fn push(&mut self, object: Arc<Object>) {
self.stack.push(object);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::assembly::*;
use crate::bytecode::{BytecodeEncoding, Instruction as BytecodeInstruction};
fn assert_evaluates(input: &str) {
let assembly = Parser::new().parse(input).unwrap();
let mut vm = VM::new();
let instructions: Vec<BytecodeInstruction> = assembly.into();
vm.eval::<()>(instructions.rotate().unwrap());
}
fn assert_evaluates_to(input: &str, expected: &str) {
let assembly = Parser::new().parse(input).unwrap();
let mut vm = VM::new();
let instructions: Vec<BytecodeInstruction> = assembly.into();
let result = vm.eval_pop::<()>(instructions.rotate().unwrap()).unwrap();
assert_eq!(result.to_string(), expected);
assert_eq!(vm.stack().size(), 0, "stack should be empty");
}
#[test]
fn noops_and_halt() {
assert_evaluates(
r#"
Noop
Noop
Halt
"#,
);
}
#[test]
fn declare_and_instantiate_class() {
assert_evaluates_to(
r#"
@SomeClass$methods
DeclareMethod "someMethod" @SomeClass#someMethod
@SomeClass
DeclareClass "SomeClass"
UseMethod @SomeClass#someMethod
LoadObject @SomeClass
CallMethod @SomeClass#someMethod "call site" 1 1
Halt
@SomeClass#someMethod
LoadLocal 0
Return 1
"#,
"SomeClass",
);
}
#[test]
fn inherited_method() {
assert_evaluates_to(
r#"
@B$methods
DeclareMethod "a" @A#a
@A$methods
DeclareMethod "a" @A#a
@B
DeclareClass "B"
UseMethod @A#a
@A
DeclareClass "A"
UseMethod @A#a
LoadObject @B
CallMethod @A#a "call site" 1 1
Halt
@A#a
LoadLocal 0
Return 1
"#,
"B",
);
}
#[test]
fn all_consts() {
assert_evaluates_to(
r#"
@String
DeclareClass "String"
MarkClassString @String
LoadConstString "Hello"
Halt
"#,
"Hello",
);
assert_evaluates_to(
r#"
@Character
DeclareClass "Character"
MarkClassCharacter @Character
LoadConstCharacter 'x'
Halt
"#,
"x",
);
assert_evaluates_to(
r#"
@Symbol
DeclareClass "Symbol"
MarkClassSymbol @Symbol
LoadConstSymbol #hello
Halt
"#,
"#hello",
);
fn assert_number_evaluates(name: &str, literal: &str) {
assert_evaluates_to(
format!(
r#"
@Class
DeclareClass "Class"
MarkClass{} @Class
LoadConst{} {}
Halt
"#,
name, name, literal
)
.as_ref(),
literal,
);
}
assert_number_evaluates("U8", "255");
assert_number_evaluates("U16", "1024");
assert_number_evaluates("U32", "1024");
assert_number_evaluates("U64", "1024");
assert_number_evaluates("U128", "1024");
assert_number_evaluates("UBig", "1024");
assert_number_evaluates("I8", "25");
assert_number_evaluates("I16", "1024");
assert_number_evaluates("I32", "1024");
assert_number_evaluates("I64", "1024");
assert_number_evaluates("I128", "1024");
assert_number_evaluates("IBig", "1024");
assert_number_evaluates("I8", "-25");
assert_number_evaluates("I16", "-1024");
assert_number_evaluates("I32", "-1024");
assert_number_evaluates("I64", "-1024");
assert_number_evaluates("I128", "-1024");
assert_number_evaluates("IBig", "-1024");
}
#[test]
fn globals() {
assert_evaluates_to(
r#"
@String
DeclareClass "String"
MarkClassString @String
@global
LoadConstString "global value"
StoreGlobal @global
LoadGlobal @global
Halt
"#,
"global value",
);
}
#[test]
fn native_method() {
assert_evaluates_to(
r#"
@UInt8
DeclareClass "UInt8"
MarkClassU8 @UInt8
LoadConstU8 12
LoadConstU8 13
CallNative Loa/Number#+
Halt
"#,
"25",
);
}
#[test]
fn binary_call() {
assert_evaluates_to(
r#"
@A$methods
DeclareMethod "+" @A#+
@A
DeclareClass "A"
UseMethod @A#+
@B
DeclareClass "B"
; Right-hand operand
LoadObject @B
; Left-hand operand (receiver)
LoadObject @A
CallMethod @A#+ "call site" 42 42
Halt
@A#+
LoadLocal 1
Return 2
"#,
"B",
);
}
#[test]
fn lazy_object_with_no_dependencies() {
assert_evaluates_to(
r#"
@SomeClass
DeclareClass "SomeClass"
LoadLazy 0 @lazy
Halt
@lazy
LoadObject @SomeClass
ReturnLazy 0
"#,
"SomeClass",
);
}
#[test]
fn lazy_object_with_dependencies() {
assert_evaluates_to(
r#"
@UInt8
DeclareClass "UInt8"
MarkClassU8 @UInt8
LoadConstU8 1
LoadConstU8 2
LoadLazy 2 @lazy
Halt
@lazy
LoadLocal 1
LoadLocal 1
CallNative Loa/Number#+
ReturnLazy 2
"#,
"3",
);
}
#[test]
fn initializer() {
assert_evaluates_to(
r#"
@A$class$methods
DeclareMethod "new" @A$class#new
@A$class
DeclareClass "A class"
UseMethod @A$class#new
@A
DeclareClass "A"
LoadObject @A$class
CallMethod @A$class#new "test:" 0 0
Halt
@A$class#new
LoadObject @A
Return 1
"#,
"A",
);
}
#[test]
fn single_variable() {
assert_evaluates_to(
r#"
@N/A$class$methods
DeclareMethod "new:" @N/A$class#new:
@N/A$methods
DeclareVariable "b" @N/A(b) @N/A#b @N/A#b:
@N/A$class
DeclareClass "N/A class"
UseMethod @N/A$class#new:
@N/A
DeclareClass "N/A"
UseVariable @N/A(b)
@N/B
DeclareClass "N/B"
LoadObject @N/B
LoadObject @N/A$class
CallMethod @N/A$class#new: "test:" 0 0
Halt
@N/A$class#new:
LoadLocal 1
LoadObject @N/A
CallMethod @N/A#b: "test:" 6 30
Return 2
@N/A(b)
Noop
@N/A#b
Noop
@N/A#b:
Noop
"#,
"a N/A(b=N/B)",
);
}
#[test]
fn native_eq_method() {
assert_evaluates_to(
r#"
@A
DeclareClass "A"
@B
DeclareClass "B"
@True
DeclareClass "True"
MarkClassTrue @True
@False
DeclareClass "False"
MarkClassFalse @False
LoadObject @A
LoadObject @B
CallNative Loa/Object#==
Halt
"#,
"False",
);
}
}
| 32.212719 | 100 | 0.417387 |
1827d64a0c0659f64158a461b2224a388e92cb4e | 1,922 |
pub struct IconContactSupport {
props: crate::Props,
}
impl yew::Component for IconContactSupport {
type Properties = crate::Props;
type Message = ();
fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender
{
true
}
fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0V0z" fill="none"/><path d="M11.5 2C6.81 2 3 5.81 3 10.5S6.81 19 11.5 19h.5v3c4.86-2.34 8-7 8-11.5C20 5.81 16.19 2 11.5 2zm1 14.5h-2v-2h2v2zm.4-4.78c-.01.01-.02.03-.03.05-.05.08-.1.16-.14.24-.02.03-.03.07-.04.11-.03.07-.06.14-.08.21-.07.21-.1.43-.1.68H10.5c0-.51.08-.94.2-1.3 0-.01 0-.02.01-.03.01-.04.04-.06.05-.1.06-.16.13-.3.22-.44.03-.05.07-.1.1-.15.03-.04.05-.09.08-.12l.01.01c.84-1.1 2.21-1.44 2.32-2.68.09-.98-.61-1.93-1.57-2.13-1.04-.22-1.98.39-2.3 1.28-.14.36-.47.65-.88.65h-.2c-.6 0-1.04-.59-.87-1.17.55-1.82 2.37-3.09 4.43-2.79 1.69.25 3.04 1.64 3.33 3.33.44 2.44-1.63 3.03-2.53 4.35z"/></svg>
</svg>
}
}
}
| 41.782609 | 719 | 0.575963 |
ac1baf49fc8b860aa9e75b729500db7fca39cacb | 3,950 |
use std::env;
use std::process;
pub struct Options {
pub input : String,
pub output : String,
pub weight : String,
pub tolerate : String,
pub bgdist : String,
pub colorize : String,
}
impl Options {
pub fn new() -> Options
{
let argv : Vec<String> = env::args().collect();
let argc : usize = argv.len();
let mut arg_i : &String = &String::new();
let mut arg_o : &String = &String::new();
let mut arg_w : &String = &String::from( "hen" );
let mut arg_t : &String = &String::from( "yes" );
let mut arg_b : &String = &String::from( "blosum62" );
let mut arg_c : &String = &String::from( "no" );
if argc < 5 { show_usage( &argv[ 0 ] ) };
let mut i : usize = 1;
while i < argc {
match argv[ i ].as_str() {
"-i" => { i += 1; arg_i = &argv[ i ]; }
"-o" => { i += 1; arg_o = &argv[ i ]; }
"-w" => { i += 1; arg_w = &argv[ i ]; }
"-t" => { i += 1; arg_t = &argv[ i ]; }
"-b" => { i += 1; arg_b = &argv[ i ]; }
"-c" => { i += 1; arg_c = &argv[ i ]; }
"-h" => { show_usage( &argv[ 0 ] ); }
_ => { show_usage( &argv[ 0 ] ); }
}
i += 1;
}
match ( *arg_w ).as_str() {
"hen" | "va" => (),
_ => show_usage( &argv[ 0 ] ),
}
match ( *arg_t ).as_str() {
"yes" | "no" => (),
_ => show_usage( &argv[ 0 ] ),
}
match ( *arg_b ).as_str() {
"blosum62" => (),
"swissprot" => (),
"extra" => (),
"membrane" => (),
"intra" => (),
"jtt" => (),
"wag" => (),
"lg" => (),
"equal" => (),
_ => show_usage( &argv[ 0 ] ),
}
match ( *arg_c ).as_str() {
"yes" | "no" => (),
_ => show_usage( &argv[ 0 ] ),
}
Options {
input : arg_i.to_string(),
output : arg_o.to_string(),
weight : arg_w.to_string(),
tolerate : arg_t.to_string(),
bgdist : arg_b.to_string(),
colorize : arg_c.to_string(),
}
}
pub fn show_parameter( &self )
{
println!( "\nParameter set :" );
println!( "===========================================" );
println!( "Input filename : {}", self.input );
println!( "Onput filename : {}", self.output );
println!( "Weighting method : {}", self.weight );
println!( "Non-standard AA : {}", self.tolerate );
println!( "B.G. distribution : {}", self.bgdist );
println!( "Colorize AA : {}", self.colorize );
println!( "===========================================" );
}
}
fn show_usage( arg : &String )
{
println!( "Usage: {} [Options] \n\nOptions :\n\n", *arg );
println!( " -i Input filename in aligned Multi-FASTA format, REQUIRED." );
println!( " -o Onput filename, REQUIRED." );
println!( " -w Method of sequence weighting ('hen' or 'va', default 'hen').\n hen : Position-Based method by Henikoff and Henikoff\n va : Distance-Based method by Vingron and Argos" );
println!( " -t Tolerate non-standard AA types (such as B, Z and X) in input file ('yes' or 'no', default 'yes').\n yes : All non-standard AAs are converted to gaps.\n no : The program halts if the input file includes non-standard AA types." );
println!( " -b Back ground distribution in the relative entropy (default 'blosum62').
blosum62 : BLOSUM62
swissprot : Swiss-Prot
extra : AA composition in extracellular proteins
membrane : AA composition in membrane proteins
intra : AA composition in intracellular proteins
jtt : JTT
wag : WAG
lg : LG
equal : No background distribution, equal rate (= 0.05)" );
println!( " -c Colorize each AA displayed on the terminal based on their stereochemical properties ('yes' or 'no', default 'no')." );
println!( " -h Print this help, ignore all other arguments." );
println!( "\n" );
process::exit( 1 );
}
| 33.193277 | 278 | 0.50481 |
62dcca4d73d9a36717ff958c781ed4a6604488e8 | 4,909 | use super::command::ZmqCommand;
use super::error::CodecError;
use super::greeting::ZmqGreeting;
use super::Message;
use crate::ZmqMessage;
use asynchronous_codec::{Decoder, Encoder};
use bytes::{Buf, BufMut, Bytes, BytesMut};
use std::convert::TryFrom;
#[derive(Debug, Clone, Copy)]
struct Frame {
command: bool,
long: bool,
more: bool,
}
#[derive(Debug)]
enum DecoderState {
Greeting,
FrameHeader,
FrameLen(Frame),
Frame(Frame),
}
#[derive(Debug)]
pub struct ZmqCodec {
state: DecoderState,
waiting_for: usize, // Number of bytes needed to decode frame
// Needed to store incoming multipart message
// This allows to incapsulate it's processing inside codec and not expose
// internal details to higher levels
buffered_message: Option<ZmqMessage>,
}
impl ZmqCodec {
pub fn new() -> Self {
Self {
state: DecoderState::Greeting,
waiting_for: 64, // len of the greeting frame,
buffered_message: None,
}
}
}
impl Default for ZmqCodec {
fn default() -> Self {
Self::new()
}
}
impl Decoder for ZmqCodec {
type Error = CodecError;
type Item = Message;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if src.len() < self.waiting_for {
src.reserve(self.waiting_for - src.len());
return Ok(None);
}
match self.state {
DecoderState::Greeting => {
if src[0] != 0xff {
return Err(CodecError::Decode("Bad first byte of greeting"));
}
self.state = DecoderState::FrameHeader;
self.waiting_for = 1;
Ok(Some(Message::Greeting(ZmqGreeting::try_from(
src.split_to(64).freeze(),
)?)))
}
DecoderState::FrameHeader => {
let flags = src.get_u8();
let frame = Frame {
command: (flags & 0b0000_0100) != 0,
long: (flags & 0b0000_0010) != 0,
more: (flags & 0b0000_0001) != 0,
};
self.state = DecoderState::FrameLen(frame);
self.waiting_for = if frame.long { 8 } else { 1 };
self.decode(src)
}
DecoderState::FrameLen(frame) => {
self.state = DecoderState::Frame(frame);
self.waiting_for = if frame.long {
src.get_u64() as usize
} else {
src.get_u8() as usize
};
self.decode(src)
}
DecoderState::Frame(frame) => {
let data = src.split_to(self.waiting_for);
self.state = DecoderState::FrameHeader;
self.waiting_for = 1;
if frame.command {
return Ok(Some(Message::Command(ZmqCommand::try_from(data)?)));
}
// process incoming message frame
match &mut self.buffered_message {
Some(v) => v.push_back(data.freeze()),
None => self.buffered_message = Some(ZmqMessage::from(data.freeze())),
}
if frame.more {
self.decode(src)
} else {
// Quoth the Raven “Nevermore.”
Ok(Some(Message::Message(
self.buffered_message
.take()
.expect("Corrupted decoder state"),
)))
}
}
}
}
}
impl ZmqCodec {
fn _encode_frame(&mut self, frame: &Bytes, dst: &mut BytesMut, more: bool) {
let mut flags: u8 = 0;
if more {
flags |= 0b0000_0001;
}
let len = frame.len();
if len > 255 {
flags |= 0b0000_0010;
dst.reserve(len + 9);
} else {
dst.reserve(len + 2);
}
dst.put_u8(flags);
if len > 255 {
dst.put_u64(len as u64);
} else {
dst.put_u8(len as u8);
}
dst.extend_from_slice(frame.as_ref());
}
}
impl Encoder for ZmqCodec {
type Error = CodecError;
type Item = Message;
fn encode(&mut self, message: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> {
match message {
Message::Greeting(payload) => dst.unsplit(payload.into()),
Message::Command(command) => dst.unsplit(command.into()),
Message::Message(message) => {
let last_element = message.len() - 1;
for (idx, part) in message.iter().enumerate() {
self._encode_frame(part, dst, idx != last_element);
}
}
}
Ok(())
}
}
| 30.116564 | 94 | 0.497046 |
7a0503d68f348d04a350d608bd928b025ee1383d | 68,844 | use std::cmp::Reverse;
use std::ptr;
use rustc_ast::util::lev_distance::find_best_match_for_name;
use rustc_ast::{self as ast, Path};
use rustc_ast_pretty::pprust;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder};
use rustc_feature::BUILTIN_ATTRIBUTES;
use rustc_hir::def::Namespace::{self, *};
use rustc_hir::def::{self, CtorKind, CtorOf, DefKind, NonMacroAttrKind};
use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_middle::bug;
use rustc_middle::ty::{self, DefIdTree};
use rustc_session::Session;
use rustc_span::hygiene::MacroKind;
use rustc_span::source_map::SourceMap;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{BytePos, MultiSpan, Span};
use tracing::debug;
use crate::imports::{Import, ImportKind, ImportResolver};
use crate::path_names_to_string;
use crate::{AmbiguityError, AmbiguityErrorMisc, AmbiguityKind};
use crate::{
BindingError, CrateLint, HasGenericParams, MacroRulesScope, Module, ModuleOrUniformRoot,
};
use crate::{NameBinding, NameBindingKind, PrivacyError, VisResolutionError};
use crate::{ParentScope, PathResult, ResolutionError, Resolver, Scope, ScopeSet, Segment};
type Res = def::Res<ast::NodeId>;
/// A vector of spans and replacements, a message and applicability.
crate type Suggestion = (Vec<(Span, String)>, String, Applicability);
/// Potential candidate for an undeclared or out-of-scope label - contains the ident of a
/// similarly named label and whether or not it is reachable.
crate type LabelSuggestion = (Ident, bool);
crate struct TypoSuggestion {
pub candidate: Symbol,
pub res: Res,
}
impl TypoSuggestion {
crate fn from_res(candidate: Symbol, res: Res) -> TypoSuggestion {
TypoSuggestion { candidate, res }
}
}
/// A free importable items suggested in case of resolution failure.
crate struct ImportSuggestion {
pub did: Option<DefId>,
pub descr: &'static str,
pub path: Path,
pub accessible: bool,
}
/// Adjust the impl span so that just the `impl` keyword is taken by removing
/// everything after `<` (`"impl<T> Iterator for A<T> {}" -> "impl"`) and
/// everything after the first whitespace (`"impl Iterator for A" -> "impl"`).
///
/// *Attention*: the method used is very fragile since it essentially duplicates the work of the
/// parser. If you need to use this function or something similar, please consider updating the
/// `source_map` functions and this function to something more robust.
fn reduce_impl_span_to_impl_keyword(sm: &SourceMap, impl_span: Span) -> Span {
let impl_span = sm.span_until_char(impl_span, '<');
sm.span_until_whitespace(impl_span)
}
impl<'a> Resolver<'a> {
crate fn add_module_candidates(
&mut self,
module: Module<'a>,
names: &mut Vec<TypoSuggestion>,
filter_fn: &impl Fn(Res) -> bool,
) {
for (key, resolution) in self.resolutions(module).borrow().iter() {
if let Some(binding) = resolution.borrow().binding {
let res = binding.res();
if filter_fn(res) {
names.push(TypoSuggestion::from_res(key.ident.name, res));
}
}
}
}
/// Combines an error with provided span and emits it.
///
/// This takes the error provided, combines it with the span and any additional spans inside the
/// error and emits it.
crate fn report_error(&self, span: Span, resolution_error: ResolutionError<'_>) {
self.into_struct_error(span, resolution_error).emit();
}
crate fn into_struct_error(
&self,
span: Span,
resolution_error: ResolutionError<'_>,
) -> DiagnosticBuilder<'_> {
match resolution_error {
ResolutionError::GenericParamsFromOuterFunction(outer_res, has_generic_params) => {
let mut err = struct_span_err!(
self.session,
span,
E0401,
"can't use generic parameters from outer function",
);
err.span_label(span, "use of generic parameter from outer function".to_string());
let sm = self.session.source_map();
match outer_res {
Res::SelfTy(maybe_trait_defid, maybe_impl_defid) => {
if let Some(impl_span) =
maybe_impl_defid.and_then(|def_id| self.opt_span(def_id))
{
err.span_label(
reduce_impl_span_to_impl_keyword(sm, impl_span),
"`Self` type implicitly declared here, by this `impl`",
);
}
match (maybe_trait_defid, maybe_impl_defid) {
(Some(_), None) => {
err.span_label(span, "can't use `Self` here");
}
(_, Some(_)) => {
err.span_label(span, "use a type here instead");
}
(None, None) => bug!("`impl` without trait nor type?"),
}
return err;
}
Res::Def(DefKind::TyParam, def_id) => {
if let Some(span) = self.opt_span(def_id) {
err.span_label(span, "type parameter from outer function");
}
}
Res::Def(DefKind::ConstParam, def_id) => {
if let Some(span) = self.opt_span(def_id) {
err.span_label(span, "const parameter from outer function");
}
}
_ => {
bug!(
"GenericParamsFromOuterFunction should only be used with Res::SelfTy, \
DefKind::TyParam"
);
}
}
if has_generic_params == HasGenericParams::Yes {
// Try to retrieve the span of the function signature and generate a new
// message with a local type or const parameter.
let sugg_msg = "try using a local generic parameter instead";
if let Some((sugg_span, snippet)) = sm.generate_local_type_param_snippet(span) {
// Suggest the modification to the user
err.span_suggestion(
sugg_span,
sugg_msg,
snippet,
Applicability::MachineApplicable,
);
} else if let Some(sp) = sm.generate_fn_name_span(span) {
err.span_label(
sp,
"try adding a local generic parameter in this method instead"
.to_string(),
);
} else {
err.help("try using a local generic parameter instead");
}
}
err
}
ResolutionError::NameAlreadyUsedInParameterList(name, first_use_span) => {
let mut err = struct_span_err!(
self.session,
span,
E0403,
"the name `{}` is already used for a generic \
parameter in this item's generic parameters",
name,
);
err.span_label(span, "already used");
err.span_label(first_use_span, format!("first use of `{}`", name));
err
}
ResolutionError::MethodNotMemberOfTrait(method, trait_) => {
let mut err = struct_span_err!(
self.session,
span,
E0407,
"method `{}` is not a member of trait `{}`",
method,
trait_
);
err.span_label(span, format!("not a member of trait `{}`", trait_));
err
}
ResolutionError::TypeNotMemberOfTrait(type_, trait_) => {
let mut err = struct_span_err!(
self.session,
span,
E0437,
"type `{}` is not a member of trait `{}`",
type_,
trait_
);
err.span_label(span, format!("not a member of trait `{}`", trait_));
err
}
ResolutionError::ConstNotMemberOfTrait(const_, trait_) => {
let mut err = struct_span_err!(
self.session,
span,
E0438,
"const `{}` is not a member of trait `{}`",
const_,
trait_
);
err.span_label(span, format!("not a member of trait `{}`", trait_));
err
}
ResolutionError::VariableNotBoundInPattern(binding_error) => {
let BindingError { name, target, origin, could_be_path } = binding_error;
let target_sp = target.iter().copied().collect::<Vec<_>>();
let origin_sp = origin.iter().copied().collect::<Vec<_>>();
let msp = MultiSpan::from_spans(target_sp.clone());
let mut err = struct_span_err!(
self.session,
msp,
E0408,
"variable `{}` is not bound in all patterns",
name,
);
for sp in target_sp {
err.span_label(sp, format!("pattern doesn't bind `{}`", name));
}
for sp in origin_sp {
err.span_label(sp, "variable not in all patterns");
}
if *could_be_path {
let help_msg = format!(
"if you meant to match on a variant or a `const` item, consider \
making the path in the pattern qualified: `?::{}`",
name,
);
err.span_help(span, &help_msg);
}
err
}
ResolutionError::VariableBoundWithDifferentMode(variable_name, first_binding_span) => {
let mut err = struct_span_err!(
self.session,
span,
E0409,
"variable `{}` is bound inconsistently across alternatives separated by `|`",
variable_name
);
err.span_label(span, "bound in different ways");
err.span_label(first_binding_span, "first binding");
err
}
ResolutionError::IdentifierBoundMoreThanOnceInParameterList(identifier) => {
let mut err = struct_span_err!(
self.session,
span,
E0415,
"identifier `{}` is bound more than once in this parameter list",
identifier
);
err.span_label(span, "used as parameter more than once");
err
}
ResolutionError::IdentifierBoundMoreThanOnceInSamePattern(identifier) => {
let mut err = struct_span_err!(
self.session,
span,
E0416,
"identifier `{}` is bound more than once in the same pattern",
identifier
);
err.span_label(span, "used in a pattern more than once");
err
}
ResolutionError::UndeclaredLabel { name, suggestion } => {
let mut err = struct_span_err!(
self.session,
span,
E0426,
"use of undeclared label `{}`",
name
);
err.span_label(span, format!("undeclared label `{}`", name));
match suggestion {
// A reachable label with a similar name exists.
Some((ident, true)) => {
err.span_label(ident.span, "a label with a similar name is reachable");
err.span_suggestion(
span,
"try using similarly named label",
ident.name.to_string(),
Applicability::MaybeIncorrect,
);
}
// An unreachable label with a similar name exists.
Some((ident, false)) => {
err.span_label(
ident.span,
"a label with a similar name exists but is unreachable",
);
}
// No similarly-named labels exist.
None => (),
}
err
}
ResolutionError::SelfImportsOnlyAllowedWithin { root, span_with_rename } => {
let mut err = struct_span_err!(
self.session,
span,
E0429,
"{}",
"`self` imports are only allowed within a { } list"
);
// None of the suggestions below would help with a case like `use self`.
if !root {
// use foo::bar::self -> foo::bar
// use foo::bar::self as abc -> foo::bar as abc
err.span_suggestion(
span,
"consider importing the module directly",
"".to_string(),
Applicability::MachineApplicable,
);
// use foo::bar::self -> foo::bar::{self}
// use foo::bar::self as abc -> foo::bar::{self as abc}
let braces = vec![
(span_with_rename.shrink_to_lo(), "{".to_string()),
(span_with_rename.shrink_to_hi(), "}".to_string()),
];
err.multipart_suggestion(
"alternatively, use the multi-path `use` syntax to import `self`",
braces,
Applicability::MachineApplicable,
);
}
err
}
ResolutionError::SelfImportCanOnlyAppearOnceInTheList => {
let mut err = struct_span_err!(
self.session,
span,
E0430,
"`self` import can only appear once in an import list"
);
err.span_label(span, "can only appear once in an import list");
err
}
ResolutionError::SelfImportOnlyInImportListWithNonEmptyPrefix => {
let mut err = struct_span_err!(
self.session,
span,
E0431,
"`self` import can only appear in an import list with \
a non-empty prefix"
);
err.span_label(span, "can only appear in an import list with a non-empty prefix");
err
}
ResolutionError::FailedToResolve { label, suggestion } => {
let mut err =
struct_span_err!(self.session, span, E0433, "failed to resolve: {}", &label);
err.span_label(span, label);
if let Some((suggestions, msg, applicability)) = suggestion {
err.multipart_suggestion(&msg, suggestions, applicability);
}
err
}
ResolutionError::CannotCaptureDynamicEnvironmentInFnItem => {
let mut err = struct_span_err!(
self.session,
span,
E0434,
"{}",
"can't capture dynamic environment in a fn item"
);
err.help("use the `|| { ... }` closure form instead");
err
}
ResolutionError::AttemptToUseNonConstantValueInConstant => {
let mut err = struct_span_err!(
self.session,
span,
E0435,
"attempt to use a non-constant value in a constant"
);
err.span_label(span, "non-constant value");
err
}
ResolutionError::BindingShadowsSomethingUnacceptable(what_binding, name, binding) => {
let res = binding.res();
let shadows_what = res.descr();
let mut err = struct_span_err!(
self.session,
span,
E0530,
"{}s cannot shadow {}s",
what_binding,
shadows_what
);
err.span_label(
span,
format!("cannot be named the same as {} {}", res.article(), shadows_what),
);
let participle = if binding.is_import() { "imported" } else { "defined" };
let msg = format!("the {} `{}` is {} here", shadows_what, name, participle);
err.span_label(binding.span, msg);
err
}
ResolutionError::ForwardDeclaredTyParam => {
let mut err = struct_span_err!(
self.session,
span,
E0128,
"type parameters with a default cannot use \
forward declared identifiers"
);
err.span_label(
span,
"defaulted type parameters cannot be forward declared".to_string(),
);
err
}
ResolutionError::ParamInTyOfConstParam(name) => {
let mut err = struct_span_err!(
self.session,
span,
E0770,
"the type of const parameters must not depend on other generic parameters"
);
err.span_label(
span,
format!("the type must not depend on the parameter `{}`", name),
);
err
}
ResolutionError::ParamInAnonConstInTyDefault(name) => {
let mut err = self.session.struct_span_err(
span,
"constant values inside of type parameter defaults must not depend on generic parameters",
);
err.span_label(
span,
format!("the anonymous constant must not depend on the parameter `{}`", name),
);
err
}
ResolutionError::ParamInNonTrivialAnonConst(name) => {
let mut err = self.session.struct_span_err(
span,
"generic parameters must not be used inside of non trivial constant values",
);
err.span_label(
span,
&format!(
"non-trivial anonymous constants must not depend on the parameter `{}`",
name
),
);
err.help(
&format!("it is currently only allowed to use either `{0}` or `{{ {0} }}` as generic constants", name)
);
err
}
ResolutionError::SelfInTyParamDefault => {
let mut err = struct_span_err!(
self.session,
span,
E0735,
"type parameters cannot use `Self` in their defaults"
);
err.span_label(span, "`Self` in type parameter default".to_string());
err
}
ResolutionError::UnreachableLabel { name, definition_span, suggestion } => {
let mut err = struct_span_err!(
self.session,
span,
E0767,
"use of unreachable label `{}`",
name,
);
err.span_label(definition_span, "unreachable label defined here");
err.span_label(span, format!("unreachable label `{}`", name));
err.note(
"labels are unreachable through functions, closures, async blocks and modules",
);
match suggestion {
// A reachable label with a similar name exists.
Some((ident, true)) => {
err.span_label(ident.span, "a label with a similar name is reachable");
err.span_suggestion(
span,
"try using similarly named label",
ident.name.to_string(),
Applicability::MaybeIncorrect,
);
}
// An unreachable label with a similar name exists.
Some((ident, false)) => {
err.span_label(
ident.span,
"a label with a similar name exists but is also unreachable",
);
}
// No similarly-named labels exist.
None => (),
}
err
}
}
}
crate fn report_vis_error(&self, vis_resolution_error: VisResolutionError<'_>) {
match vis_resolution_error {
VisResolutionError::Relative2018(span, path) => {
let mut err = self.session.struct_span_err(
span,
"relative paths are not supported in visibilities on 2018 edition",
);
err.span_suggestion(
path.span,
"try",
format!("crate::{}", pprust::path_to_string(&path)),
Applicability::MaybeIncorrect,
);
err
}
VisResolutionError::AncestorOnly(span) => struct_span_err!(
self.session,
span,
E0742,
"visibilities can only be restricted to ancestor modules"
),
VisResolutionError::FailedToResolve(span, label, suggestion) => {
self.into_struct_error(span, ResolutionError::FailedToResolve { label, suggestion })
}
VisResolutionError::ExpectedFound(span, path_str, res) => {
let mut err = struct_span_err!(
self.session,
span,
E0577,
"expected module, found {} `{}`",
res.descr(),
path_str
);
err.span_label(span, "not a module");
err
}
VisResolutionError::Indeterminate(span) => struct_span_err!(
self.session,
span,
E0578,
"cannot determine resolution for the visibility"
),
VisResolutionError::ModuleOnly(span) => {
self.session.struct_span_err(span, "visibility must resolve to a module")
}
}
.emit()
}
/// Lookup typo candidate in scope for a macro or import.
fn early_lookup_typo_candidate(
&mut self,
scope_set: ScopeSet,
parent_scope: &ParentScope<'a>,
ident: Ident,
filter_fn: &impl Fn(Res) -> bool,
) -> Option<TypoSuggestion> {
let mut suggestions = Vec::new();
self.visit_scopes(scope_set, parent_scope, ident, |this, scope, use_prelude, _| {
match scope {
Scope::DeriveHelpers(expn_id) => {
let res = Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper);
if filter_fn(res) {
suggestions.extend(
this.helper_attrs
.get(&expn_id)
.into_iter()
.flatten()
.map(|ident| TypoSuggestion::from_res(ident.name, res)),
);
}
}
Scope::DeriveHelpersCompat => {
let res = Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper);
if filter_fn(res) {
for derive in parent_scope.derives {
let parent_scope = &ParentScope { derives: &[], ..*parent_scope };
if let Ok((Some(ext), _)) = this.resolve_macro_path(
derive,
Some(MacroKind::Derive),
parent_scope,
false,
false,
) {
suggestions.extend(
ext.helper_attrs
.iter()
.map(|name| TypoSuggestion::from_res(*name, res)),
);
}
}
}
}
Scope::MacroRules(macro_rules_scope) => {
if let MacroRulesScope::Binding(macro_rules_binding) = macro_rules_scope {
let res = macro_rules_binding.binding.res();
if filter_fn(res) {
suggestions
.push(TypoSuggestion::from_res(macro_rules_binding.ident.name, res))
}
}
}
Scope::CrateRoot => {
let root_ident = Ident::new(kw::PathRoot, ident.span);
let root_module = this.resolve_crate_root(root_ident);
this.add_module_candidates(root_module, &mut suggestions, filter_fn);
}
Scope::Module(module) => {
this.add_module_candidates(module, &mut suggestions, filter_fn);
}
Scope::RegisteredAttrs => {
let res = Res::NonMacroAttr(NonMacroAttrKind::Registered);
if filter_fn(res) {
suggestions.extend(
this.registered_attrs
.iter()
.map(|ident| TypoSuggestion::from_res(ident.name, res)),
);
}
}
Scope::MacroUsePrelude => {
suggestions.extend(this.macro_use_prelude.iter().filter_map(
|(name, binding)| {
let res = binding.res();
filter_fn(res).then_some(TypoSuggestion::from_res(*name, res))
},
));
}
Scope::BuiltinAttrs => {
let res = Res::NonMacroAttr(NonMacroAttrKind::Builtin);
if filter_fn(res) {
suggestions.extend(
BUILTIN_ATTRIBUTES
.iter()
.map(|(name, ..)| TypoSuggestion::from_res(*name, res)),
);
}
}
Scope::ExternPrelude => {
suggestions.extend(this.extern_prelude.iter().filter_map(|(ident, _)| {
let res = Res::Def(DefKind::Mod, DefId::local(CRATE_DEF_INDEX));
filter_fn(res).then_some(TypoSuggestion::from_res(ident.name, res))
}));
}
Scope::ToolPrelude => {
let res = Res::NonMacroAttr(NonMacroAttrKind::Tool);
suggestions.extend(
this.registered_tools
.iter()
.map(|ident| TypoSuggestion::from_res(ident.name, res)),
);
}
Scope::StdLibPrelude => {
if let Some(prelude) = this.prelude {
let mut tmp_suggestions = Vec::new();
this.add_module_candidates(prelude, &mut tmp_suggestions, filter_fn);
suggestions.extend(
tmp_suggestions
.into_iter()
.filter(|s| use_prelude || this.is_builtin_macro(s.res)),
);
}
}
Scope::BuiltinTypes => {
let primitive_types = &this.primitive_type_table.primitive_types;
suggestions.extend(primitive_types.iter().flat_map(|(name, prim_ty)| {
let res = Res::PrimTy(*prim_ty);
filter_fn(res).then_some(TypoSuggestion::from_res(*name, res))
}))
}
}
None::<()>
});
// Make sure error reporting is deterministic.
suggestions.sort_by_cached_key(|suggestion| suggestion.candidate.as_str());
match find_best_match_for_name(
suggestions.iter().map(|suggestion| &suggestion.candidate),
ident.name,
None,
) {
Some(found) if found != ident.name => {
suggestions.into_iter().find(|suggestion| suggestion.candidate == found)
}
_ => None,
}
}
fn lookup_import_candidates_from_module<FilterFn>(
&mut self,
lookup_ident: Ident,
namespace: Namespace,
parent_scope: &ParentScope<'a>,
start_module: Module<'a>,
crate_name: Ident,
filter_fn: FilterFn,
) -> Vec<ImportSuggestion>
where
FilterFn: Fn(Res) -> bool,
{
let mut candidates = Vec::new();
let mut seen_modules = FxHashSet::default();
let not_local_module = crate_name.name != kw::Crate;
let mut worklist =
vec![(start_module, Vec::<ast::PathSegment>::new(), true, not_local_module)];
let mut worklist_via_import = vec![];
while let Some((in_module, path_segments, accessible, in_module_is_extern)) =
match worklist.pop() {
None => worklist_via_import.pop(),
Some(x) => Some(x),
}
{
// We have to visit module children in deterministic order to avoid
// instabilities in reported imports (#43552).
in_module.for_each_child(self, |this, ident, ns, name_binding| {
// avoid non-importable candidates
if !name_binding.is_importable() {
return;
}
let child_accessible =
accessible && this.is_accessible_from(name_binding.vis, parent_scope.module);
// do not venture inside inaccessible items of other crates
if in_module_is_extern && !child_accessible {
return;
}
let via_import = name_binding.is_import() && !name_binding.is_extern_crate();
// There is an assumption elsewhere that paths of variants are in the enum's
// declaration and not imported. With this assumption, the variant component is
// chopped and the rest of the path is assumed to be the enum's own path. For
// errors where a variant is used as the type instead of the enum, this causes
// funny looking invalid suggestions, i.e `foo` instead of `foo::MyEnum`.
if via_import && name_binding.is_possibly_imported_variant() {
return;
}
// collect results based on the filter function
// avoid suggesting anything from the same module in which we are resolving
if ident.name == lookup_ident.name
&& ns == namespace
&& !ptr::eq(in_module, parent_scope.module)
{
let res = name_binding.res();
if filter_fn(res) {
// create the path
let mut segms = path_segments.clone();
if lookup_ident.span.rust_2018() {
// crate-local absolute paths start with `crate::` in edition 2018
// FIXME: may also be stabilized for Rust 2015 (Issues #45477, #44660)
segms.insert(0, ast::PathSegment::from_ident(crate_name));
}
segms.push(ast::PathSegment::from_ident(ident));
let path = Path { span: name_binding.span, segments: segms, tokens: None };
let did = match res {
Res::Def(DefKind::Ctor(..), did) => this.parent(did),
_ => res.opt_def_id(),
};
if child_accessible {
// Remove invisible match if exists
if let Some(idx) = candidates
.iter()
.position(|v: &ImportSuggestion| v.did == did && !v.accessible)
{
candidates.remove(idx);
}
}
if candidates.iter().all(|v: &ImportSuggestion| v.did != did) {
candidates.push(ImportSuggestion {
did,
descr: res.descr(),
path,
accessible: child_accessible,
});
}
}
}
// collect submodules to explore
if let Some(module) = name_binding.module() {
// form the path
let mut path_segments = path_segments.clone();
path_segments.push(ast::PathSegment::from_ident(ident));
let is_extern_crate_that_also_appears_in_prelude =
name_binding.is_extern_crate() && lookup_ident.span.rust_2018();
if !is_extern_crate_that_also_appears_in_prelude {
let is_extern = in_module_is_extern || name_binding.is_extern_crate();
// add the module to the lookup
if seen_modules.insert(module.def_id().unwrap()) {
if via_import { &mut worklist_via_import } else { &mut worklist }
.push((module, path_segments, child_accessible, is_extern));
}
}
}
})
}
// If only some candidates are accessible, take just them
if !candidates.iter().all(|v: &ImportSuggestion| !v.accessible) {
candidates = candidates.into_iter().filter(|x| x.accessible).collect();
}
candidates
}
/// When name resolution fails, this method can be used to look up candidate
/// entities with the expected name. It allows filtering them using the
/// supplied predicate (which should be used to only accept the types of
/// definitions expected, e.g., traits). The lookup spans across all crates.
///
/// N.B., the method does not look into imports, but this is not a problem,
/// since we report the definitions (thus, the de-aliased imports).
crate fn lookup_import_candidates<FilterFn>(
&mut self,
lookup_ident: Ident,
namespace: Namespace,
parent_scope: &ParentScope<'a>,
filter_fn: FilterFn,
) -> Vec<ImportSuggestion>
where
FilterFn: Fn(Res) -> bool,
{
let mut suggestions = self.lookup_import_candidates_from_module(
lookup_ident,
namespace,
parent_scope,
self.graph_root,
Ident::with_dummy_span(kw::Crate),
&filter_fn,
);
if lookup_ident.span.rust_2018() {
let extern_prelude_names = self.extern_prelude.clone();
for (ident, _) in extern_prelude_names.into_iter() {
if ident.span.from_expansion() {
// Idents are adjusted to the root context before being
// resolved in the extern prelude, so reporting this to the
// user is no help. This skips the injected
// `extern crate std` in the 2018 edition, which would
// otherwise cause duplicate suggestions.
continue;
}
if let Some(crate_id) = self.crate_loader.maybe_process_path_extern(ident.name) {
let crate_root =
self.get_module(DefId { krate: crate_id, index: CRATE_DEF_INDEX });
suggestions.extend(self.lookup_import_candidates_from_module(
lookup_ident,
namespace,
parent_scope,
crate_root,
ident,
&filter_fn,
));
}
}
}
suggestions
}
crate fn unresolved_macro_suggestions(
&mut self,
err: &mut DiagnosticBuilder<'a>,
macro_kind: MacroKind,
parent_scope: &ParentScope<'a>,
ident: Ident,
) {
let is_expected = &|res: Res| res.macro_kind() == Some(macro_kind);
let suggestion = self.early_lookup_typo_candidate(
ScopeSet::Macro(macro_kind),
parent_scope,
ident,
is_expected,
);
self.add_typo_suggestion(err, suggestion, ident.span);
if macro_kind == MacroKind::Derive && (ident.name == sym::Send || ident.name == sym::Sync) {
let msg = format!("unsafe traits like `{}` should be implemented explicitly", ident);
err.span_note(ident.span, &msg);
}
if self.macro_names.contains(&ident.normalize_to_macros_2_0()) {
err.help("have you added the `#[macro_use]` on the module/import?");
}
}
crate fn add_typo_suggestion(
&self,
err: &mut DiagnosticBuilder<'_>,
suggestion: Option<TypoSuggestion>,
span: Span,
) -> bool {
let suggestion = match suggestion {
None => return false,
// We shouldn't suggest underscore.
Some(suggestion) if suggestion.candidate == kw::Underscore => return false,
Some(suggestion) => suggestion,
};
let def_span = suggestion.res.opt_def_id().and_then(|def_id| match def_id.krate {
LOCAL_CRATE => self.opt_span(def_id),
_ => Some(
self.session
.source_map()
.guess_head_span(self.cstore().get_span_untracked(def_id, self.session)),
),
});
if let Some(def_span) = def_span {
if span.overlaps(def_span) {
// Don't suggest typo suggestion for itself like in the followoing:
// error[E0423]: expected function, tuple struct or tuple variant, found struct `X`
// --> $DIR/issue-64792-bad-unicode-ctor.rs:3:14
// |
// LL | struct X {}
// | ----------- `X` defined here
// LL |
// LL | const Y: X = X("ö");
// | -------------^^^^^^- similarly named constant `Y` defined here
// |
// help: use struct literal syntax instead
// |
// LL | const Y: X = X {};
// | ^^^^
// help: a constant with a similar name exists
// |
// LL | const Y: X = Y("ö");
// | ^
return false;
}
err.span_label(
self.session.source_map().guess_head_span(def_span),
&format!(
"similarly named {} `{}` defined here",
suggestion.res.descr(),
suggestion.candidate.as_str(),
),
);
}
let msg = format!(
"{} {} with a similar name exists",
suggestion.res.article(),
suggestion.res.descr()
);
err.span_suggestion(
span,
&msg,
suggestion.candidate.to_string(),
Applicability::MaybeIncorrect,
);
true
}
fn binding_description(&self, b: &NameBinding<'_>, ident: Ident, from_prelude: bool) -> String {
let res = b.res();
if b.span.is_dummy() {
let add_built_in = match b.res() {
// These already contain the "built-in" prefix or look bad with it.
Res::NonMacroAttr(..) | Res::PrimTy(..) | Res::ToolMod => false,
_ => true,
};
let (built_in, from) = if from_prelude {
("", " from prelude")
} else if b.is_extern_crate()
&& !b.is_import()
&& self.session.opts.externs.get(&ident.as_str()).is_some()
{
("", " passed with `--extern`")
} else if add_built_in {
(" built-in", "")
} else {
("", "")
};
let article = if built_in.is_empty() { res.article() } else { "a" };
format!(
"{a}{built_in} {thing}{from}",
a = article,
thing = res.descr(),
built_in = built_in,
from = from
)
} else {
let introduced = if b.is_import() { "imported" } else { "defined" };
format!("the {thing} {introduced} here", thing = res.descr(), introduced = introduced)
}
}
crate fn report_ambiguity_error(&self, ambiguity_error: &AmbiguityError<'_>) {
let AmbiguityError { kind, ident, b1, b2, misc1, misc2 } = *ambiguity_error;
let (b1, b2, misc1, misc2, swapped) = if b2.span.is_dummy() && !b1.span.is_dummy() {
// We have to print the span-less alternative first, otherwise formatting looks bad.
(b2, b1, misc2, misc1, true)
} else {
(b1, b2, misc1, misc2, false)
};
let mut err = struct_span_err!(
self.session,
ident.span,
E0659,
"`{ident}` is ambiguous ({why})",
ident = ident,
why = kind.descr()
);
err.span_label(ident.span, "ambiguous name");
let mut could_refer_to = |b: &NameBinding<'_>, misc: AmbiguityErrorMisc, also: &str| {
let what = self.binding_description(b, ident, misc == AmbiguityErrorMisc::FromPrelude);
let note_msg = format!(
"`{ident}` could{also} refer to {what}",
ident = ident,
also = also,
what = what
);
let thing = b.res().descr();
let mut help_msgs = Vec::new();
if b.is_glob_import()
&& (kind == AmbiguityKind::GlobVsGlob
|| kind == AmbiguityKind::GlobVsExpanded
|| kind == AmbiguityKind::GlobVsOuter && swapped != also.is_empty())
{
help_msgs.push(format!(
"consider adding an explicit import of \
`{ident}` to disambiguate",
ident = ident
))
}
if b.is_extern_crate() && ident.span.rust_2018() {
help_msgs.push(format!(
"use `::{ident}` to refer to this {thing} unambiguously",
ident = ident,
thing = thing,
))
}
if misc == AmbiguityErrorMisc::SuggestCrate {
help_msgs.push(format!(
"use `crate::{ident}` to refer to this {thing} unambiguously",
ident = ident,
thing = thing,
))
} else if misc == AmbiguityErrorMisc::SuggestSelf {
help_msgs.push(format!(
"use `self::{ident}` to refer to this {thing} unambiguously",
ident = ident,
thing = thing,
))
}
err.span_note(b.span, ¬e_msg);
for (i, help_msg) in help_msgs.iter().enumerate() {
let or = if i == 0 { "" } else { "or " };
err.help(&format!("{}{}", or, help_msg));
}
};
could_refer_to(b1, misc1, "");
could_refer_to(b2, misc2, " also");
err.emit();
}
/// If the binding refers to a tuple struct constructor with fields,
/// returns the span of its fields.
fn ctor_fields_span(&self, binding: &NameBinding<'_>) -> Option<Span> {
if let NameBindingKind::Res(
Res::Def(DefKind::Ctor(CtorOf::Struct, CtorKind::Fn), ctor_def_id),
_,
) = binding.kind
{
let def_id = (&*self).parent(ctor_def_id).expect("no parent for a constructor");
let fields = self.field_names.get(&def_id)?;
let first_field = fields.first()?; // Handle `struct Foo()`
return Some(fields.iter().fold(first_field.span, |acc, field| acc.to(field.span)));
}
None
}
crate fn report_privacy_error(&self, privacy_error: &PrivacyError<'_>) {
let PrivacyError { ident, binding, .. } = *privacy_error;
let res = binding.res();
let ctor_fields_span = self.ctor_fields_span(binding);
let plain_descr = res.descr().to_string();
let nonimport_descr =
if ctor_fields_span.is_some() { plain_descr + " constructor" } else { plain_descr };
let import_descr = nonimport_descr.clone() + " import";
let get_descr =
|b: &NameBinding<'_>| if b.is_import() { &import_descr } else { &nonimport_descr };
// Print the primary message.
let descr = get_descr(binding);
let mut err =
struct_span_err!(self.session, ident.span, E0603, "{} `{}` is private", descr, ident);
err.span_label(ident.span, &format!("private {}", descr));
if let Some(span) = ctor_fields_span {
err.span_label(span, "a constructor is private if any of the fields is private");
}
// Print the whole import chain to make it easier to see what happens.
let first_binding = binding;
let mut next_binding = Some(binding);
let mut next_ident = ident;
while let Some(binding) = next_binding {
let name = next_ident;
next_binding = match binding.kind {
_ if res == Res::Err => None,
NameBindingKind::Import { binding, import, .. } => match import.kind {
_ if binding.span.is_dummy() => None,
ImportKind::Single { source, .. } => {
next_ident = source;
Some(binding)
}
ImportKind::Glob { .. } | ImportKind::MacroUse => Some(binding),
ImportKind::ExternCrate { .. } => None,
},
_ => None,
};
let first = ptr::eq(binding, first_binding);
let descr = get_descr(binding);
let msg = format!(
"{and_refers_to}the {item} `{name}`{which} is defined here{dots}",
and_refers_to = if first { "" } else { "...and refers to " },
item = descr,
name = name,
which = if first { "" } else { " which" },
dots = if next_binding.is_some() { "..." } else { "" },
);
let def_span = self.session.source_map().guess_head_span(binding.span);
let mut note_span = MultiSpan::from_span(def_span);
if !first && binding.vis == ty::Visibility::Public {
note_span.push_span_label(def_span, "consider importing it directly".into());
}
err.span_note(note_span, &msg);
}
err.emit();
}
}
impl<'a, 'b> ImportResolver<'a, 'b> {
/// Adds suggestions for a path that cannot be resolved.
pub(crate) fn make_path_suggestion(
&mut self,
span: Span,
mut path: Vec<Segment>,
parent_scope: &ParentScope<'b>,
) -> Option<(Vec<Segment>, Vec<String>)> {
debug!("make_path_suggestion: span={:?} path={:?}", span, path);
match (path.get(0), path.get(1)) {
// `{{root}}::ident::...` on both editions.
// On 2015 `{{root}}` is usually added implicitly.
(Some(fst), Some(snd))
if fst.ident.name == kw::PathRoot && !snd.ident.is_path_segment_keyword() => {}
// `ident::...` on 2018.
(Some(fst), _)
if fst.ident.span.rust_2018() && !fst.ident.is_path_segment_keyword() =>
{
// Insert a placeholder that's later replaced by `self`/`super`/etc.
path.insert(0, Segment::from_ident(Ident::invalid()));
}
_ => return None,
}
self.make_missing_self_suggestion(span, path.clone(), parent_scope)
.or_else(|| self.make_missing_crate_suggestion(span, path.clone(), parent_scope))
.or_else(|| self.make_missing_super_suggestion(span, path.clone(), parent_scope))
.or_else(|| self.make_external_crate_suggestion(span, path, parent_scope))
}
/// Suggest a missing `self::` if that resolves to an correct module.
///
/// ```text
/// |
/// LL | use foo::Bar;
/// | ^^^ did you mean `self::foo`?
/// ```
fn make_missing_self_suggestion(
&mut self,
span: Span,
mut path: Vec<Segment>,
parent_scope: &ParentScope<'b>,
) -> Option<(Vec<Segment>, Vec<String>)> {
// Replace first ident with `self` and check if that is valid.
path[0].ident.name = kw::SelfLower;
let result = self.r.resolve_path(&path, None, parent_scope, false, span, CrateLint::No);
debug!("make_missing_self_suggestion: path={:?} result={:?}", path, result);
if let PathResult::Module(..) = result { Some((path, Vec::new())) } else { None }
}
/// Suggests a missing `crate::` if that resolves to an correct module.
///
/// ```text
/// |
/// LL | use foo::Bar;
/// | ^^^ did you mean `crate::foo`?
/// ```
fn make_missing_crate_suggestion(
&mut self,
span: Span,
mut path: Vec<Segment>,
parent_scope: &ParentScope<'b>,
) -> Option<(Vec<Segment>, Vec<String>)> {
// Replace first ident with `crate` and check if that is valid.
path[0].ident.name = kw::Crate;
let result = self.r.resolve_path(&path, None, parent_scope, false, span, CrateLint::No);
debug!("make_missing_crate_suggestion: path={:?} result={:?}", path, result);
if let PathResult::Module(..) = result {
Some((
path,
vec![
"`use` statements changed in Rust 2018; read more at \
<https://doc.rust-lang.org/edition-guide/rust-2018/module-system/path-\
clarity.html>"
.to_string(),
],
))
} else {
None
}
}
/// Suggests a missing `super::` if that resolves to an correct module.
///
/// ```text
/// |
/// LL | use foo::Bar;
/// | ^^^ did you mean `super::foo`?
/// ```
fn make_missing_super_suggestion(
&mut self,
span: Span,
mut path: Vec<Segment>,
parent_scope: &ParentScope<'b>,
) -> Option<(Vec<Segment>, Vec<String>)> {
// Replace first ident with `crate` and check if that is valid.
path[0].ident.name = kw::Super;
let result = self.r.resolve_path(&path, None, parent_scope, false, span, CrateLint::No);
debug!("make_missing_super_suggestion: path={:?} result={:?}", path, result);
if let PathResult::Module(..) = result { Some((path, Vec::new())) } else { None }
}
/// Suggests a missing external crate name if that resolves to an correct module.
///
/// ```text
/// |
/// LL | use foobar::Baz;
/// | ^^^^^^ did you mean `baz::foobar`?
/// ```
///
/// Used when importing a submodule of an external crate but missing that crate's
/// name as the first part of path.
fn make_external_crate_suggestion(
&mut self,
span: Span,
mut path: Vec<Segment>,
parent_scope: &ParentScope<'b>,
) -> Option<(Vec<Segment>, Vec<String>)> {
if path[1].ident.span.rust_2015() {
return None;
}
// Sort extern crate names in reverse order to get
// 1) some consistent ordering for emitted diagnostics, and
// 2) `std` suggestions before `core` suggestions.
let mut extern_crate_names =
self.r.extern_prelude.iter().map(|(ident, _)| ident.name).collect::<Vec<_>>();
extern_crate_names.sort_by_key(|name| Reverse(name.as_str()));
for name in extern_crate_names.into_iter() {
// Replace first ident with a crate name and check if that is valid.
path[0].ident.name = name;
let result = self.r.resolve_path(&path, None, parent_scope, false, span, CrateLint::No);
debug!(
"make_external_crate_suggestion: name={:?} path={:?} result={:?}",
name, path, result
);
if let PathResult::Module(..) = result {
return Some((path, Vec::new()));
}
}
None
}
/// Suggests importing a macro from the root of the crate rather than a module within
/// the crate.
///
/// ```text
/// help: a macro with this name exists at the root of the crate
/// |
/// LL | use issue_59764::makro;
/// | ^^^^^^^^^^^^^^^^^^
/// |
/// = note: this could be because a macro annotated with `#[macro_export]` will be exported
/// at the root of the crate instead of the module where it is defined
/// ```
pub(crate) fn check_for_module_export_macro(
&mut self,
import: &'b Import<'b>,
module: ModuleOrUniformRoot<'b>,
ident: Ident,
) -> Option<(Option<Suggestion>, Vec<String>)> {
let mut crate_module = if let ModuleOrUniformRoot::Module(module) = module {
module
} else {
return None;
};
while let Some(parent) = crate_module.parent {
crate_module = parent;
}
if ModuleOrUniformRoot::same_def(ModuleOrUniformRoot::Module(crate_module), module) {
// Don't make a suggestion if the import was already from the root of the
// crate.
return None;
}
let resolutions = self.r.resolutions(crate_module).borrow();
let resolution = resolutions.get(&self.r.new_key(ident, MacroNS))?;
let binding = resolution.borrow().binding()?;
if let Res::Def(DefKind::Macro(MacroKind::Bang), _) = binding.res() {
let module_name = crate_module.kind.name().unwrap();
let import_snippet = match import.kind {
ImportKind::Single { source, target, .. } if source != target => {
format!("{} as {}", source, target)
}
_ => format!("{}", ident),
};
let mut corrections: Vec<(Span, String)> = Vec::new();
if !import.is_nested() {
// Assume this is the easy case of `use issue_59764::foo::makro;` and just remove
// intermediate segments.
corrections.push((import.span, format!("{}::{}", module_name, import_snippet)));
} else {
// Find the binding span (and any trailing commas and spaces).
// ie. `use a::b::{c, d, e};`
// ^^^
let (found_closing_brace, binding_span) = find_span_of_binding_until_next_binding(
self.r.session,
import.span,
import.use_span,
);
debug!(
"check_for_module_export_macro: found_closing_brace={:?} binding_span={:?}",
found_closing_brace, binding_span
);
let mut removal_span = binding_span;
if found_closing_brace {
// If the binding span ended with a closing brace, as in the below example:
// ie. `use a::b::{c, d};`
// ^
// Then expand the span of characters to remove to include the previous
// binding's trailing comma.
// ie. `use a::b::{c, d};`
// ^^^
if let Some(previous_span) =
extend_span_to_previous_binding(self.r.session, binding_span)
{
debug!("check_for_module_export_macro: previous_span={:?}", previous_span);
removal_span = removal_span.with_lo(previous_span.lo());
}
}
debug!("check_for_module_export_macro: removal_span={:?}", removal_span);
// Remove the `removal_span`.
corrections.push((removal_span, "".to_string()));
// Find the span after the crate name and if it has nested imports immediatately
// after the crate name already.
// ie. `use a::b::{c, d};`
// ^^^^^^^^^
// or `use a::{b, c, d}};`
// ^^^^^^^^^^^
let (has_nested, after_crate_name) = find_span_immediately_after_crate_name(
self.r.session,
module_name,
import.use_span,
);
debug!(
"check_for_module_export_macro: has_nested={:?} after_crate_name={:?}",
has_nested, after_crate_name
);
let source_map = self.r.session.source_map();
// Add the import to the start, with a `{` if required.
let start_point = source_map.start_point(after_crate_name);
if let Ok(start_snippet) = source_map.span_to_snippet(start_point) {
corrections.push((
start_point,
if has_nested {
// In this case, `start_snippet` must equal '{'.
format!("{}{}, ", start_snippet, import_snippet)
} else {
// In this case, add a `{`, then the moved import, then whatever
// was there before.
format!("{{{}, {}", import_snippet, start_snippet)
},
));
}
// Add a `};` to the end if nested, matching the `{` added at the start.
if !has_nested {
corrections.push((source_map.end_point(after_crate_name), "};".to_string()));
}
}
let suggestion = Some((
corrections,
String::from("a macro with this name exists at the root of the crate"),
Applicability::MaybeIncorrect,
));
let note = vec![
"this could be because a macro annotated with `#[macro_export]` will be exported \
at the root of the crate instead of the module where it is defined"
.to_string(),
];
Some((suggestion, note))
} else {
None
}
}
}
/// Given a `binding_span` of a binding within a use statement:
///
/// ```
/// use foo::{a, b, c};
/// ^
/// ```
///
/// then return the span until the next binding or the end of the statement:
///
/// ```
/// use foo::{a, b, c};
/// ^^^
/// ```
pub(crate) fn find_span_of_binding_until_next_binding(
sess: &Session,
binding_span: Span,
use_span: Span,
) -> (bool, Span) {
let source_map = sess.source_map();
// Find the span of everything after the binding.
// ie. `a, e};` or `a};`
let binding_until_end = binding_span.with_hi(use_span.hi());
// Find everything after the binding but not including the binding.
// ie. `, e};` or `};`
let after_binding_until_end = binding_until_end.with_lo(binding_span.hi());
// Keep characters in the span until we encounter something that isn't a comma or
// whitespace.
// ie. `, ` or ``.
//
// Also note whether a closing brace character was encountered. If there
// was, then later go backwards to remove any trailing commas that are left.
let mut found_closing_brace = false;
let after_binding_until_next_binding =
source_map.span_take_while(after_binding_until_end, |&ch| {
if ch == '}' {
found_closing_brace = true;
}
ch == ' ' || ch == ','
});
// Combine the two spans.
// ie. `a, ` or `a`.
//
// Removing these would leave `issue_52891::{d, e};` or `issue_52891::{d, e, };`
let span = binding_span.with_hi(after_binding_until_next_binding.hi());
(found_closing_brace, span)
}
/// Given a `binding_span`, return the span through to the comma or opening brace of the previous
/// binding.
///
/// ```
/// use foo::a::{a, b, c};
/// ^^--- binding span
/// |
/// returned span
///
/// use foo::{a, b, c};
/// --- binding span
/// ```
pub(crate) fn extend_span_to_previous_binding(sess: &Session, binding_span: Span) -> Option<Span> {
let source_map = sess.source_map();
// `prev_source` will contain all of the source that came before the span.
// Then split based on a command and take the first (ie. closest to our span)
// snippet. In the example, this is a space.
let prev_source = source_map.span_to_prev_source(binding_span).ok()?;
let prev_comma = prev_source.rsplit(',').collect::<Vec<_>>();
let prev_starting_brace = prev_source.rsplit('{').collect::<Vec<_>>();
if prev_comma.len() <= 1 || prev_starting_brace.len() <= 1 {
return None;
}
let prev_comma = prev_comma.first().unwrap();
let prev_starting_brace = prev_starting_brace.first().unwrap();
// If the amount of source code before the comma is greater than
// the amount of source code before the starting brace then we've only
// got one item in the nested item (eg. `issue_52891::{self}`).
if prev_comma.len() > prev_starting_brace.len() {
return None;
}
Some(binding_span.with_lo(BytePos(
// Take away the number of bytes for the characters we've found and an
// extra for the comma.
binding_span.lo().0 - (prev_comma.as_bytes().len() as u32) - 1,
)))
}
/// Given a `use_span` of a binding within a use statement, returns the highlighted span and if
/// it is a nested use tree.
///
/// ```
/// use foo::a::{b, c};
/// ^^^^^^^^^^ // false
///
/// use foo::{a, b, c};
/// ^^^^^^^^^^ // true
///
/// use foo::{a, b::{c, d}};
/// ^^^^^^^^^^^^^^^ // true
/// ```
fn find_span_immediately_after_crate_name(
sess: &Session,
module_name: Symbol,
use_span: Span,
) -> (bool, Span) {
debug!(
"find_span_immediately_after_crate_name: module_name={:?} use_span={:?}",
module_name, use_span
);
let source_map = sess.source_map();
// Using `use issue_59764::foo::{baz, makro};` as an example throughout..
let mut num_colons = 0;
// Find second colon.. `use issue_59764:`
let until_second_colon = source_map.span_take_while(use_span, |c| {
if *c == ':' {
num_colons += 1;
}
match c {
':' if num_colons == 2 => false,
_ => true,
}
});
// Find everything after the second colon.. `foo::{baz, makro};`
let from_second_colon = use_span.with_lo(until_second_colon.hi() + BytePos(1));
let mut found_a_non_whitespace_character = false;
// Find the first non-whitespace character in `from_second_colon`.. `f`
let after_second_colon = source_map.span_take_while(from_second_colon, |c| {
if found_a_non_whitespace_character {
return false;
}
if !c.is_whitespace() {
found_a_non_whitespace_character = true;
}
true
});
// Find the first `{` in from_second_colon.. `foo::{`
let next_left_bracket = source_map.span_through_char(from_second_colon, '{');
(next_left_bracket == after_second_colon, from_second_colon)
}
/// When an entity with a given name is not available in scope, we search for
/// entities with that name in all crates. This method allows outputting the
/// results of this search in a programmer-friendly way
crate fn show_candidates(
err: &mut DiagnosticBuilder<'_>,
// This is `None` if all placement locations are inside expansions
use_placement_span: Option<Span>,
candidates: &[ImportSuggestion],
instead: bool,
found_use: bool,
) {
if candidates.is_empty() {
return;
}
// we want consistent results across executions, but candidates are produced
// by iterating through a hash map, so make sure they are ordered:
let mut path_strings: Vec<_> =
candidates.iter().map(|c| path_names_to_string(&c.path)).collect();
path_strings.sort();
path_strings.dedup();
let (determiner, kind) = if candidates.len() == 1 {
("this", candidates[0].descr)
} else {
("one of these", "items")
};
let instead = if instead { " instead" } else { "" };
let mut msg = format!("consider importing {} {}{}", determiner, kind, instead);
if let Some(span) = use_placement_span {
for candidate in &mut path_strings {
// produce an additional newline to separate the new use statement
// from the directly following item.
let additional_newline = if found_use { "" } else { "\n" };
*candidate = format!("use {};\n{}", candidate, additional_newline);
}
err.span_suggestions(span, &msg, path_strings.into_iter(), Applicability::Unspecified);
} else {
msg.push(':');
for candidate in path_strings {
msg.push('\n');
msg.push_str(&candidate);
}
err.note(&msg);
}
}
| 41.002978 | 122 | 0.489759 |
e5fe1891cb87825279bdd34f79b7322891b4aad5 | 1,381 | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests correct kind-checking of the reason stack closures without the :Copy
// bound must be noncopyable. For details see
// http://smallcultfollowing.com/babysteps/blog/2013/04/30/the-case-of-the-recurring-closure/
struct R<'a> {
// This struct is needed to create the
// otherwise infinite type of a fn that
// accepts itself as argument:
c: |&R, bool|: 'a
}
fn innocent_looking_victim() {
let mut x = Some("hello".to_owned());
conspirator(|f, writer| {
if writer {
x = None;
} else {
match x {
Some(ref msg) => {
(f.c)(f, true);
println!("{:?}", msg);
},
None => fail!("oops"),
}
}
})
}
fn conspirator(f: |&R, bool|) {
let r = R {c: f};
f(&r, false) //~ ERROR use of moved value
}
fn main() { innocent_looking_victim() }
| 30.688889 | 93 | 0.598841 |
4bb70ccb4b1a004da91c4bad5b7276da292202c5 | 781 | use aoc_runner_derive::aoc;
fn expand(input: &str) -> String {
let mut output = vec![];
let mut it = input.chars();
let mut m = it.next().unwrap();
let mut count = 1;
loop {
if let Some(n) = it.next() {
if n == m {
count += 1;
}
else {
output.push(format!("{}", count));
output.push(m.to_string());
count = 1;
m = n;
}
}
else {
output.push(format!("{}", count));
output.push(m.to_string());
break;
}
}
output.join("")
}
#[aoc(day10, part1)]
pub fn part1(input: &str) -> usize {
let mut i = String::from(input);
for _ in 0..40 {
i = expand(i.as_str());
}
i.len()
}
#[aoc(day10, part2)]
pub fn part2(input: &str) -> usize {
let mut i = String::from(input);
for _ in 0..50 {
i = expand(i.as_str());
}
i.len()
}
| 16.270833 | 38 | 0.540333 |
791113b2d4fd6e48ff5df3033037df1899302852 | 2,070 | // SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2020 Andre Richter <[email protected]>
//! Rust runtime initialization code.
use crate::memory;
use core::ops::Range;
/// Return the range spanning the .bss section.
///
/// # Safety
///
/// - The symbol-provided addresses must be valid.
/// - The symbol-provided addresses must be usize aligned.
unsafe fn bss_range() -> Range<*mut usize> {
extern "C" {
// Boundaries of the .bss section, provided by linker script symbols.
static mut __bss_start: usize;
static mut __bss_end: usize;
}
Range {
start: &mut __bss_start,
end: &mut __bss_end,
}
}
/// Zero out the .bss section.
///
/// # Safety
///
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bss_range());
}
/// We are outsmarting the compiler here by using a trait as a layer of indirection. Because we are
/// generating PIC code, a static dispatch to `init()` would generate a relative jump from the
/// callee to `init()`. However, when calling `init()`, code just finished copying the binary to the
/// actual link-time address, and hence is still running at whatever location the previous loader
/// has put it. So we do not want a relative jump, because it would not jump to the relocated code.
///
/// By indirecting through a trait object, we can make use of the property that vtables store
/// absolute addresses. So calling `init()` this way will kick execution to the relocated binary.
pub trait RunTimeInit {
/// Equivalent to `crt0` or `c0` code in C/C++ world. Clears the `bss` section, then jumps to
/// kernel init code.
///
/// # Safety
///
/// - Only a single core must be active and running this function.
unsafe fn runtime_init(&self) -> ! {
zero_bss();
crate::kernel_init()
}
}
struct Traitor;
impl RunTimeInit for Traitor {}
/// Give the callee a `RunTimeInit` trait object.
pub fn get() -> &'static dyn RunTimeInit {
&Traitor {}
}
| 30.441176 | 100 | 0.666667 |
3a1f2a0a67586af86b4853a4deaebc99f1aa7b8b | 4,704 | // Copyright 2020 Veil Rust Developers
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
use core::fmt;
#[cfg(any(feature = "std", test))]
use std::error;
#[derive(Debug)]
pub enum ErrorKind {
RandomizeContext,
CombinePublicKey,
SerializeCompressedPublicKey,
SerializeUncompressedPublicKey,
CreatePublicKey,
VerifyPrivateKey,
ParseKey,
TweakKey,
ParseSignature,
SerializeSignature,
NormalizeSignature,
VerifySignature,
SignMessage,
Ecdh,
ParseGenerator,
BadSeed,
PrepareMlsag,
KeyImageMlsag,
}
#[derive(Debug)]
pub struct Error(ErrorKind);
impl Error {
pub fn new(kind: ErrorKind) -> Self {
Self(kind)
}
pub fn kind(&self) -> &ErrorKind {
&self.0
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use ErrorKind::*;
match self.0 {
RandomizeContext => write!(f, "failed to randomize, possibly fully initialized context object not for use with signing or verification"),
CombinePublicKey => write!(f, "sum of the public keys is not valid"),
SerializeCompressedPublicKey => write!(f, "output length is not 33 bytes"),
SerializeUncompressedPublicKey => write!(f, "output length is not 65 bytes"),
CreatePublicKey => write!(f, "can not create public key from supplied private key"),
VerifyPrivateKey => write!(f, "private key is not valid"),
ParseKey => write!(f, "key format undefined or can not be parsed"),
TweakKey => write!(f, "tweak out of range (chance of around 1 in 2^128 for uniformly random 32-byte arrays, or equal to zero"),
ParseSignature => write!(f, "signature undefined or can not be parse"),
SerializeSignature => write!(f, "not enough space was available to serialize"),
NormalizeSignature => write!(f, "signature is already normalized"),
VerifySignature => write!(f, "the signature is incorrect or not parsable"),
SignMessage => write!(f, "nonce generation function failed or the private key was invalid"),
// Ecdh
Ecdh => write!(f, "scalar was invalid (zero or overflow)"),
// Generator
ParseGenerator => write!(f, "input is an invalid signature"),
BadSeed => write!(f, "seed supplied it not acceptable"),
// Mlsag
PrepareMlsag => write!(f, "could not prepare MLSAG, wrong size matrix or inputs"),
KeyImageMlsag => write!(f, "key image error"),
}
}
}
#[cfg(any(feature = "std", test))]
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
use ErrorKind::*;
match self.0 {
RandomizeContext => None,
CombinePublicKey => None,
SerializeCompressedPublicKey => None,
SerializeUncompressedPublicKey => None,
CreatePublicKey => None,
VerifyPrivateKey => None,
ParseKey => None,
TweakKey => None,
ParseSignature => None,
SerializeSignature => None,
NormalizeSignature => None,
VerifySignature => None,
SignMessage => None,
Ecdh => None,
ParseGenerator => None,
BadSeed => None,
PrepareMlsag => None,
KeyImageMlsag => None,
}
}
}
| 39.529412 | 149 | 0.647534 |
1c82ee003a301771025d34c1c9ea9d039f0d71af | 5,980 | use crate::ser::Error;
use serde::ser;
use std::str;
pub struct PartSerializer<S> {
sink: S,
}
impl<S: Sink> PartSerializer<S> {
pub fn new(sink: S) -> Self {
PartSerializer { sink }
}
}
pub trait Sink: Sized {
type Ok;
fn serialize_static_str(
self,
value: &'static str,
) -> Result<Self::Ok, Error>;
fn serialize_str(self, value: &str) -> Result<Self::Ok, Error>;
fn serialize_string(self, value: String) -> Result<Self::Ok, Error>;
fn serialize_none(self) -> Result<Self::Ok, Error>;
fn serialize_some<T: ?Sized + ser::Serialize>(
self,
value: &T,
) -> Result<Self::Ok, Error>;
fn unsupported(self) -> Error;
}
impl<S: Sink> ser::Serializer for PartSerializer<S> {
type Ok = S::Ok;
type Error = Error;
type SerializeSeq = ser::Impossible<S::Ok, Error>;
type SerializeTuple = ser::Impossible<S::Ok, Error>;
type SerializeTupleStruct = ser::Impossible<S::Ok, Error>;
type SerializeTupleVariant = ser::Impossible<S::Ok, Error>;
type SerializeMap = ser::Impossible<S::Ok, Error>;
type SerializeStruct = ser::Impossible<S::Ok, Error>;
type SerializeStructVariant = ser::Impossible<S::Ok, Error>;
fn serialize_bool(self, v: bool) -> Result<S::Ok, Error> {
self.sink
.serialize_static_str(if v { "true" } else { "false" })
}
fn serialize_i8(self, v: i8) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_i16(self, v: i16) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_i32(self, v: i32) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_i64(self, v: i64) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_u8(self, v: u8) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_u16(self, v: u16) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_u32(self, v: u32) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_u64(self, v: u64) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_f32(self, v: f32) -> Result<S::Ok, Error> {
self.serialize_floating(v)
}
fn serialize_f64(self, v: f64) -> Result<S::Ok, Error> {
self.serialize_floating(v)
}
fn serialize_char(self, v: char) -> Result<S::Ok, Error> {
self.sink.serialize_string(v.to_string())
}
fn serialize_str(self, value: &str) -> Result<S::Ok, Error> {
self.sink.serialize_str(value)
}
fn serialize_bytes(self, value: &[u8]) -> Result<S::Ok, Error> {
match str::from_utf8(value) {
Ok(value) => self.sink.serialize_str(value),
Err(err) => Err(Error::Utf8(err)),
}
}
fn serialize_unit(self) -> Result<S::Ok, Error> {
Err(self.sink.unsupported())
}
fn serialize_unit_struct(self, name: &'static str) -> Result<S::Ok, Error> {
self.sink.serialize_static_str(name)
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
) -> Result<S::Ok, Error> {
self.sink.serialize_static_str(variant)
}
fn serialize_newtype_struct<T: ?Sized + ser::Serialize>(
self,
_name: &'static str,
value: &T,
) -> Result<S::Ok, Error> {
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized + ser::Serialize>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<S::Ok, Error> {
Err(self.sink.unsupported())
}
fn serialize_none(self) -> Result<S::Ok, Error> {
self.sink.serialize_none()
}
fn serialize_some<T: ?Sized + ser::Serialize>(
self,
value: &T,
) -> Result<S::Ok, Error> {
self.sink.serialize_some(value)
}
fn serialize_seq(
self,
_len: Option<usize>,
) -> Result<Self::SerializeSeq, Error> {
Err(self.sink.unsupported())
}
fn serialize_tuple(
self,
_len: usize,
) -> Result<Self::SerializeTuple, Error> {
Err(self.sink.unsupported())
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTuple, Error> {
Err(self.sink.unsupported())
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Error> {
Err(self.sink.unsupported())
}
fn serialize_map(
self,
_len: Option<usize>,
) -> Result<Self::SerializeMap, Error> {
Err(self.sink.unsupported())
}
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Error> {
Err(self.sink.unsupported())
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Error> {
Err(self.sink.unsupported())
}
}
impl<S: Sink> PartSerializer<S> {
fn serialize_integer<I>(self, value: I) -> Result<S::Ok, Error>
where
I: itoa::Integer,
{
let mut buf = [b'\0'; 20];
let len = itoa::write(&mut buf[..], value).unwrap();
let part = unsafe { str::from_utf8_unchecked(&buf[0..len]) };
ser::Serializer::serialize_str(self, part)
}
fn serialize_floating<F>(self, value: F) -> Result<S::Ok, Error>
where
F: ryu::Float,
{
let mut buf = ryu::Buffer::new();
let part = buf.format(value);
ser::Serializer::serialize_str(self, part)
}
}
| 26 | 80 | 0.572742 |
26b51766fd456df6846390fb2b05a294355cb564 | 12,269 | use std::collections::HashMap;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use cached::proc_macro::cached;
use cached::SizedCache;
use encoding_rs::{Encoding, UTF_8};
use md5::{Digest, Md5};
use mime::Mime;
use once_cell::sync::Lazy;
use regex::Regex;
use reqwest::header::{HeaderMap, HeaderName, HeaderValue, LOCATION};
use reqwest::redirect::Policy;
use reqwest::{header, Body, Method, Proxy, Response};
use url::Url;
use crate::fingerprint::WebFingerPrintRequest;
use crate::ward::RawData;
use crate::RequestOption;
async fn send_requests(
url: &Url,
fingerprint: &WebFingerPrintRequest,
config: &RequestOption,
) -> anyhow::Result<Response> {
let mut url = url.clone();
let mut headers = header::HeaderMap::new();
let ua = "Mozilla/5.0 (X11; Linux x86_64; rv:94.0) Gecko/20100101 Firefox/94.0";
headers.insert(header::USER_AGENT, header::HeaderValue::from_static(ua));
let method =
Method::from_str(&fingerprint.request_method.to_uppercase()).unwrap_or(Method::GET);
let body_data =
Body::from(base64::decode(fingerprint.request_data.clone()).unwrap_or_default());
if !fingerprint.request_headers.is_empty() {
for (k, v) in fingerprint.request_headers.clone() {
headers.insert(HeaderName::from_str(&k)?, HeaderValue::from_str(&v)?);
}
}
if fingerprint.path != "/" {
url.set_path(fingerprint.path.as_str());
}
let client = reqwest::Client::builder()
.pool_max_idle_per_host(0)
.danger_accept_invalid_certs(true)
.danger_accept_invalid_hostnames(true)
.default_headers(headers.clone())
.redirect(Policy::none())
.timeout(Duration::new(config.timeout, 0));
let config_proxy = config.proxy.clone();
let proxy_obj = Proxy::custom(move |_| config_proxy.clone());
return Ok(client
.proxy(proxy_obj)
.build()?
.request(method, url.as_ref())
.body(body_data)
.send()
.await?);
}
static RE_COMPILE_BY_CHARSET: Lazy<Regex> = Lazy::new(|| -> Regex {
Regex::new(r#"(?im)charset="(.*?)"|charset=(.*?)""#).expect("RE_COMPILE_BY_CHARSET")
});
fn get_default_encoding(byte: &[u8], headers: HeaderMap) -> String {
let (html, _, _) = UTF_8.decode(byte);
let mut default_encoding = "utf-8";
if let Some(charset) = RE_COMPILE_BY_CHARSET.captures(&html) {
for cs in charset.iter().flatten() {
default_encoding = cs.as_str();
}
}
let content_type = headers
.get(header::CONTENT_TYPE)
.and_then(|value| value.to_str().ok())
.and_then(|value| value.parse::<Mime>().ok());
let encoding_name = content_type
.as_ref()
.and_then(|mime| mime.get_param("charset").map(|charset| charset.as_str()))
.unwrap_or(default_encoding);
let encoding = Encoding::for_label(encoding_name.as_bytes()).unwrap_or(UTF_8);
let (text, _, _) = encoding.decode(byte);
text.to_lowercase()
}
async fn fetch_raw_data(
res: Response,
is_index: bool,
config: RequestOption,
) -> anyhow::Result<Arc<RawData>> {
let path: String = res.url().path().to_string();
let url = res.url().join("/")?;
let status_code = res.status();
let headers = res.headers().clone();
let base_url = res.url().clone();
let text = match res.bytes().await {
Ok(byte) => get_default_encoding(&byte, headers.clone()),
Err(_) => String::from(""),
};
let mut favicon: HashMap<String, String> = HashMap::new();
if is_index && !status_code.is_server_error() {
// 只有在首页的时候提取favicon图标链接
favicon = find_favicon_tag(base_url, &text, config).await;
}
// 在请求头和正文里匹配下一跳URL
let get_next_url = |headers: &HeaderMap, url: &Url, text: &String| {
let mut next_url = headers
.get(LOCATION)
.and_then(|location| location.to_str().ok())
.and_then(|location| {
if location.starts_with("http://") || location.starts_with("https://") {
Some(Url::parse(location).unwrap_or_else(|_| url.clone()))
} else {
url.join(location).ok()
}
});
if next_url.is_none() && text.len() <= 1024 {
for reg in RE_COMPILE_BY_JUMP.iter() {
if let Some(x) = reg.captures(text) {
let u = x.name("name").map_or("", |m| m.as_str());
if u.starts_with("http://") || u.starts_with("https://") {
next_url = Some(Url::parse(u).unwrap_or_else(|_| url.clone()));
break;
}
next_url = Some(url.join(u).unwrap_or_else(|_| url.clone()));
break;
}
}
}
next_url
};
let next_url = get_next_url(&headers, &url, &text);
let raw_data = Arc::new(RawData {
url,
path,
headers,
status_code,
text,
favicon,
next_url,
});
Ok(raw_data)
}
// favicon的URL到Hash
#[cached(
type = "SizedCache<String, String>",
create = "{ SizedCache::with_size(100) }",
result = true,
convert = r#"{ format!("{}", url.as_str().to_owned()) }"#
)]
async fn get_favicon_hash(url: &Url, config: &RequestOption) -> anyhow::Result<String> {
let default_request = WebFingerPrintRequest {
path: String::from("/"),
request_method: String::from("get"),
request_headers: Default::default(),
request_data: String::new(),
};
let res = send_requests(url, &default_request, config).await?;
if !res.status().is_success() {
return Err(anyhow::Error::from(std::io::Error::last_os_error()));
}
let content = res.bytes().await?;
let mut hasher = Md5::new();
hasher.update(content);
let result = hasher.finalize();
let favicon_md5: String = format!("{:x}", &result);
Ok(favicon_md5)
}
// 从HTML标签中提取favicon的链接
async fn find_favicon_tag(
base_url: reqwest::Url,
text: &str,
config: RequestOption,
) -> HashMap<String, String> {
let mut link_tags = HashMap::new();
for reg in RE_COMPILE_BY_ICON.iter() {
if let Some(x) = reg.captures(text) {
let u = x.name("name").map_or("", |m| m.as_str());
if u.starts_with("http://") || u.starts_with("https://") {
let favicon_url = Url::parse(u).unwrap_or_else(|_| base_url.clone());
if let Ok(favicon_md5) = get_favicon_hash(&favicon_url, &config).await {
link_tags.insert(String::from(favicon_url.clone()), favicon_md5);
};
} else {
let favicon_url = base_url.join(u).unwrap_or_else(|_| base_url.clone());
if let Ok(favicon_md5) = get_favicon_hash(&favicon_url, &config).await {
link_tags.insert(String::from(favicon_url.clone()), favicon_md5);
};
}
}
}
// 补充默认路径
let favicon_url = base_url.join("/favicon.ico").expect("favicon.icon");
if let std::collections::hash_map::Entry::Vacant(e) =
link_tags.entry(String::from(favicon_url.clone()))
{
if let Ok(favicon_md5) = get_favicon_hash(&favicon_url, &config).await {
e.insert(favicon_md5);
};
}
link_tags
}
// 支持部分正文跳转
static RE_COMPILE_BY_JUMP: Lazy<Vec<Regex>> = Lazy::new(|| -> Vec<Regex> {
let js_reg = vec![
r#"(?im)[ |.|:]location\.href.*?=.*?['|"](?P<name>.*?)['|"]"#,
r#"(?im)window.*?\.open\(['|"](?P<name>.*?)['|"]"#,
r#"(?im)window.*?\.location=['|"](?P<name>.*?)['|"]"#,
r#"(?im)<meta.*?http-equiv=.*?refresh.*?url=(?P<name>.*?)['|"]/?>"#,
];
let re_list: Vec<Regex> = js_reg
.iter()
.map(|reg| Regex::new(reg).expect("RE_COMPILE_BY_JUMP"))
.collect();
re_list
});
static RE_COMPILE_BY_ICON: Lazy<Vec<Regex>> = Lazy::new(|| -> Vec<Regex> {
let js_reg = vec![r#"(?im)<link rel=.*?icon.*?href=.*?(?P<name>.*?)['"/]{0,1}>"#];
let re_list: Vec<Regex> = js_reg
.iter()
.map(|reg| Regex::new(reg).expect("compiled regular expression"))
.collect();
re_list
});
static RE_COMPILE_BY_TITLE: Lazy<Regex> = Lazy::new(|| -> Regex {
Regex::new(r#"(?im)<title>(?P<name>.*?)</title>"#).expect("compiled regular expression")
});
pub fn get_title(raw_data: &Arc<RawData>) -> String {
if let Some(charset) = RE_COMPILE_BY_TITLE.captures_iter(&raw_data.text).next() {
let title = charset.name("name").map_or("", |m| m.as_str());
return title.to_string();
}
String::new()
}
// 首页请求
#[cached(
type = "SizedCache<String, Vec<Arc<RawData>>>",
create = "{ SizedCache::with_size(100) }",
result = true,
convert = r#"{ format!("{}{:?}", url_str.to_owned(), special_wfp) }"#
)]
pub async fn index_fetch(
url_str: &str,
special_wfp: &WebFingerPrintRequest,
is_index: bool,
is_special: bool,
config: RequestOption,
) -> anyhow::Result<Vec<Arc<RawData>>> {
let mut is_index: bool = is_index;
let mut is_start_with_http: bool = true;
let mut raw_data_list: Vec<Arc<RawData>> = vec![];
let schemes: [String; 2] = [String::from("https://"), String::from("http://")];
for mut scheme in schemes {
//最大重定向跳转次数
let mut max_redirect = 5;
let mut scheme_url = url_str;
if !url_str.to_lowercase().starts_with("http://")
&& !url_str.to_lowercase().starts_with("https://")
{
scheme.push_str(url_str);
scheme_url = scheme.as_str();
is_start_with_http = false;
}
let mut url = Url::parse(scheme_url)?;
loop {
let mut next_url: Option<Url> = Option::None;
if let Ok(res) = send_requests(&url, special_wfp, &config).await {
if let Ok(raw_data) = fetch_raw_data(res, is_index, config.clone()).await {
next_url = raw_data.next_url.clone();
raw_data_list.push(raw_data);
};
is_index = false;
};
if is_special {
break;
}
match next_url.clone() {
Some(next_jump_url) => {
url = next_jump_url;
}
None => {
break;
}
}
max_redirect -= 1;
if max_redirect <= 0 {
break;
}
}
// 已经有协议的没必要请求两次
if is_start_with_http {
break;
}
}
Ok(raw_data_list)
}
#[cfg(test)]
mod tests {
use crate::request::send_requests;
use crate::{RequestOption, WebFingerPrintRequest};
use url::Url;
// https://docs.rs/tokio/latest/tokio/attr.test.html
#[tokio::test]
async fn test_send_requests() {
let test_url = Url::parse("https://httpbin.org/").unwrap();
let fingerprint = WebFingerPrintRequest {
path: String::from("/"),
request_method: String::from("GET"),
request_headers: Default::default(),
request_data: String::from(""),
};
let timeout = 10_u64;
let request_config = RequestOption::new(&timeout, "");
let res = send_requests(&test_url, &fingerprint, &request_config)
.await
.unwrap();
assert!(res.text().await.unwrap().contains("swagger-ui"));
}
#[tokio::test]
async fn test_bad_ssl_send_requests() {
let test_url = Url::parse("https://expired.badssl.com/").unwrap();
let fingerprint = WebFingerPrintRequest {
path: String::from("/"),
request_method: String::from("GET"),
request_headers: Default::default(),
request_data: String::from(""),
};
let timeout = 10_u64;
let request_config = RequestOption::new(&timeout, "");
let res = send_requests(&test_url, &fingerprint, &request_config)
.await
.unwrap();
assert!(res
.text()
.await
.unwrap()
.contains("<title>expired.badssl.com</title>"));
}
}
| 35.154728 | 92 | 0.565979 |
29a8d5ae418df18ee55be0e6a09a9865c2c6544a | 44 | pub use types::TestResult;
pub mod types;
| 8.8 | 26 | 0.727273 |
1d24044674c9ba32786865852660fb237a3087f2 | 9,285 | // Possibly unused, but useful during development.
pub use crate::proxy::http;
use crate::{cache, Error};
pub use linkerd2_buffer as buffer;
pub use linkerd2_concurrency_limit::ConcurrencyLimit;
pub use linkerd2_stack::{
self as stack, layer, BoxNewService, Fail, NewRouter, NewService, NewUnwrapOr,
};
pub use linkerd2_stack_tracing::{InstrumentMake, InstrumentMakeLayer};
pub use linkerd2_timeout::{self as timeout, FailFast};
use std::{
task::{Context, Poll},
time::Duration,
};
use tower::{
layer::util::{Identity, Stack as Pair},
make::MakeService,
};
pub use tower::{
layer::Layer, service_fn as mk, spawn_ready::SpawnReady, util::Either, Service, ServiceExt,
};
#[derive(Clone, Debug)]
pub struct Layers<L>(L);
#[derive(Clone, Debug)]
pub struct Stack<S>(S);
pub fn layers() -> Layers<Identity> {
Layers(Identity::new())
}
pub fn stack<S>(inner: S) -> Stack<S> {
Stack(inner)
}
pub fn proxies() -> Stack<IdentityProxy> {
Stack(IdentityProxy(()))
}
#[derive(Copy, Clone, Debug)]
pub struct IdentityProxy(());
impl<T> NewService<T> for IdentityProxy {
type Service = ();
fn new_service(&mut self, _: T) -> Self::Service {}
}
#[allow(dead_code)]
impl<L> Layers<L> {
pub fn push<O>(self, outer: O) -> Layers<Pair<L, O>> {
Layers(Pair::new(self.0, outer))
}
pub fn push_map_target<M>(self, map_target: M) -> Layers<Pair<L, stack::MapTargetLayer<M>>> {
self.push(stack::MapTargetLayer::new(map_target))
}
/// Wraps an inner `MakeService` to be a `NewService`.
pub fn push_into_new_service(
self,
) -> Layers<Pair<L, stack::new_service::FromMakeServiceLayer>> {
self.push(stack::new_service::FromMakeServiceLayer::default())
}
/// Buffers requests in an mpsc, spawning the inner service onto a dedicated task.
pub fn push_spawn_buffer<Req, Rsp>(
self,
capacity: usize,
) -> Layers<Pair<L, buffer::SpawnBufferLayer<Req, Rsp>>>
where
Req: Send + 'static,
Rsp: Send + 'static,
{
self.push(buffer::SpawnBufferLayer::new(capacity))
}
pub fn push_on_response<U>(self, layer: U) -> Layers<Pair<L, stack::OnResponseLayer<U>>> {
self.push(stack::OnResponseLayer::new(layer))
}
pub fn push_map_response<R: Clone>(
self,
map_response: R,
) -> Layers<Pair<L, stack::MapResponseLayer<R>>> {
self.push(stack::MapResponseLayer::new(map_response))
}
pub fn push_instrument<G: Clone>(self, get_span: G) -> Layers<Pair<L, InstrumentMakeLayer<G>>> {
self.push(InstrumentMakeLayer::new(get_span))
}
}
impl<M, L: Layer<M>> Layer<M> for Layers<L> {
type Service = L::Service;
fn layer(&self, inner: M) -> Self::Service {
self.0.layer(inner)
}
}
#[allow(dead_code)]
impl<S> Stack<S> {
pub fn push<L: Layer<S>>(self, layer: L) -> Stack<L::Service> {
Stack(layer.layer(self.0))
}
pub fn push_map_target<M: Clone>(
self,
map_target: M,
) -> Stack<stack::map_target::MapTargetService<S, M>> {
self.push(stack::map_target::MapTargetLayer::new(map_target))
}
pub fn push_request_filter<F: Clone>(self, filter: F) -> Stack<stack::RequestFilter<F, S>> {
self.push(layer::mk(|inner| {
stack::RequestFilter::new(filter.clone(), inner)
}))
}
/// Wraps a `Service<T>` as a `Service<()>`.
///
/// Each time the service is called, the `T`-typed request is cloned and
/// issued into the inner service.
pub fn push_make_thunk(self) -> Stack<stack::make_thunk::MakeThunk<S>> {
self.push(layer::mk(stack::make_thunk::MakeThunk::new))
}
pub fn instrument<G: Clone>(self, get_span: G) -> Stack<InstrumentMake<G, S>> {
self.push(InstrumentMakeLayer::new(get_span))
}
pub fn instrument_from_target(self) -> Stack<InstrumentMake<(), S>> {
self.push(InstrumentMakeLayer::from_target())
}
/// Wraps an inner `MakeService` to be a `NewService`.
pub fn into_new_service(self) -> Stack<stack::new_service::FromMakeService<S>> {
self.push(stack::new_service::FromMakeServiceLayer::default())
}
pub fn into_make_service<T>(self) -> Stack<stack::new_service::IntoMakeService<S>>
where
S: NewService<T>,
{
Stack(stack::new_service::IntoMakeService::new(self.0))
}
/// Buffer requests when when the next layer is out of capacity.
pub fn spawn_buffer<Req, Rsp>(self, capacity: usize) -> Stack<buffer::Buffer<Req, Rsp>>
where
Req: Send + 'static,
Rsp: Send + 'static,
S: Service<Req, Response = Rsp> + Send + 'static,
S::Error: Into<Error> + Send + Sync,
S::Future: Send,
{
self.push(buffer::SpawnBufferLayer::new(capacity))
}
/// Assuming `S` implements `NewService` or `MakeService`, applies the given
/// `L`-typed layer on each service produced by `S`.
pub fn push_on_response<L: Clone>(self, layer: L) -> Stack<stack::OnResponse<L, S>> {
self.push(stack::OnResponseLayer::new(layer))
}
pub fn push_timeout(self, timeout: Duration) -> Stack<tower::timeout::Timeout<S>> {
self.push(tower::timeout::TimeoutLayer::new(timeout))
}
pub fn push_map_response<R: Clone>(self, map_response: R) -> Stack<stack::MapResponse<S, R>> {
self.push(stack::MapResponseLayer::new(map_response))
}
pub fn push_http_insert_target(self) -> Stack<http::insert::target::NewService<S>> {
self.push(http::insert::target::layer())
}
pub fn push_cache<T>(self, idle: Duration) -> Stack<cache::Cache<T, S>>
where
T: Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static,
S: NewService<T> + 'static,
S::Service: Send + Sync + 'static,
{
self.push(cache::Cache::layer(idle))
}
/// Push a service that either calls the inner service if it is ready, or
/// calls a `secondary` service if the inner service fails to become ready
/// for the `skip_after` duration.
pub fn push_when_unready<B: Clone>(
self,
secondary: B,
skip_after: Duration,
) -> Stack<stack::NewSwitchReady<S, B>> {
self.push(layer::mk(|inner: S| {
stack::NewSwitchReady::new(inner, secondary.clone(), skip_after)
}))
}
pub fn push_switch<T: Clone, U: Clone>(
self,
switch: T,
other: U,
) -> Stack<stack::MakeSwitch<T, S, U>> {
self.push(layer::mk(|inner: S| {
stack::MakeSwitch::new(switch.clone(), inner, other.clone())
}))
}
/// Validates that this stack serves T-typed targets.
pub fn check_new<T>(self) -> Self
where
S: NewService<T>,
{
self
}
pub fn check_new_clone<T>(self) -> Self
where
S: NewService<T>,
S::Service: Clone,
{
self
}
/// Validates that this stack serves T-typed targets.
pub fn check_new_service<T, Req>(self) -> Self
where
S: NewService<T>,
S::Service: Service<Req>,
{
self
}
/// Validates that this stack serves T-typed targets.
pub fn check_clone_new_service<T, Req>(self) -> Self
where
S: NewService<T> + Clone,
S::Service: Service<Req>,
{
self
}
/// Validates that this stack can be cloned
pub fn check_clone(self) -> Self
where
S: Clone,
{
self
}
/// Validates that this stack serves T-typed targets.
pub fn check_service<T>(self) -> Self
where
S: Service<T>,
{
self
}
/// Validates that this stack serves T-typed targets with `Unpin` futures.
pub fn check_service_unpin<T>(self) -> Self
where
S: Service<T>,
S::Future: Unpin,
{
self
}
pub fn check_service_response<T, U>(self) -> Self
where
S: Service<T, Response = U>,
{
self
}
/// Validates that this stack serves T-typed targets.
pub fn check_make_service<T, U>(self) -> Self
where
S: MakeService<T, U>,
{
self
}
/// Validates that this stack serves T-typed targets.
pub fn check_make_service_clone<T, U>(self) -> Self
where
S: MakeService<T, U> + Clone,
S::Service: Clone,
{
self
}
pub fn check_new_send_and_static<M, T, Req>(self) -> Self
where
S: NewService<T, Service = M>,
M: Service<Req> + Send + 'static,
M::Response: Send + 'static,
M::Error: Into<Error> + Send + Sync,
M::Future: Send,
{
self
}
pub fn into_inner(self) -> S {
self.0
}
}
impl<T, N> NewService<T> for Stack<N>
where
N: NewService<T>,
{
type Service = N::Service;
fn new_service(&mut self, t: T) -> Self::Service {
self.0.new_service(t)
}
}
impl<T, S> Service<T> for Stack<S>
where
S: Service<T>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(cx)
}
fn call(&mut self, t: T) -> Self::Future {
self.0.call(t)
}
}
| 27.470414 | 100 | 0.598923 |
336264d99f90030e40188d0714f234d6f40bb016 | 40,225 | //! C definitions used by libnative that don't belong in liblibc
#![allow(nonstandard_style)]
#![cfg_attr(test, allow(dead_code))]
#![unstable(issue = "none", feature = "windows_c")]
use crate::mem;
use crate::os::raw::{c_char, c_int, c_long, c_longlong, c_uint, c_ulong, c_ushort};
use crate::os::windows::io::{BorrowedHandle, HandleOrInvalid, HandleOrNull};
use crate::ptr;
use core::ffi::NonZero_c_ulong;
use libc::{c_void, size_t, wchar_t};
#[path = "c/errors.rs"] // c.rs is included from two places so we need to specify this
mod errors;
pub use errors::*;
pub use self::EXCEPTION_DISPOSITION::*;
pub use self::FILE_INFO_BY_HANDLE_CLASS::*;
pub type DWORD_PTR = ULONG_PTR;
pub type DWORD = c_ulong;
pub type NonZeroDWORD = NonZero_c_ulong;
pub type HANDLE = LPVOID;
pub type HINSTANCE = HANDLE;
pub type HMODULE = HINSTANCE;
pub type HRESULT = LONG;
pub type BOOL = c_int;
pub type BYTE = u8;
pub type BOOLEAN = BYTE;
pub type GROUP = c_uint;
pub type LARGE_INTEGER = c_longlong;
pub type LONG = c_long;
pub type UINT = c_uint;
pub type WCHAR = u16;
pub type USHORT = c_ushort;
pub type SIZE_T = usize;
pub type WORD = u16;
pub type CHAR = c_char;
pub type CCHAR = c_char;
pub type ULONG_PTR = usize;
pub type ULONG = c_ulong;
pub type NTSTATUS = LONG;
pub type ACCESS_MASK = DWORD;
pub type LPBOOL = *mut BOOL;
pub type LPBYTE = *mut BYTE;
pub type LPCSTR = *const CHAR;
pub type LPCWSTR = *const WCHAR;
pub type LPDWORD = *mut DWORD;
pub type LPHANDLE = *mut HANDLE;
pub type LPOVERLAPPED = *mut OVERLAPPED;
pub type LPPROCESS_INFORMATION = *mut PROCESS_INFORMATION;
pub type LPSECURITY_ATTRIBUTES = *mut SECURITY_ATTRIBUTES;
pub type LPSTARTUPINFO = *mut STARTUPINFO;
pub type LPVOID = *mut c_void;
pub type LPWCH = *mut WCHAR;
pub type LPWIN32_FIND_DATAW = *mut WIN32_FIND_DATAW;
pub type LPWSADATA = *mut WSADATA;
pub type LPWSAPROTOCOL_INFO = *mut WSAPROTOCOL_INFO;
pub type LPWSTR = *mut WCHAR;
pub type LPFILETIME = *mut FILETIME;
pub type LPSYSTEM_INFO = *mut SYSTEM_INFO;
pub type LPWSABUF = *mut WSABUF;
pub type LPWSAOVERLAPPED = *mut c_void;
pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *mut c_void;
pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
pub type PLARGE_INTEGER = *mut c_longlong;
pub type PSRWLOCK = *mut SRWLOCK;
pub type SOCKET = crate::os::windows::raw::SOCKET;
pub type socklen_t = c_int;
pub type ADDRESS_FAMILY = USHORT;
pub const TRUE: BOOL = 1;
pub const FALSE: BOOL = 0;
pub const CSTR_LESS_THAN: c_int = 1;
pub const CSTR_EQUAL: c_int = 2;
pub const CSTR_GREATER_THAN: c_int = 3;
pub const FILE_ATTRIBUTE_READONLY: DWORD = 0x1;
pub const FILE_ATTRIBUTE_DIRECTORY: DWORD = 0x10;
pub const FILE_ATTRIBUTE_REPARSE_POINT: DWORD = 0x400;
pub const INVALID_FILE_ATTRIBUTES: DWORD = DWORD::MAX;
pub const FILE_SHARE_DELETE: DWORD = 0x4;
pub const FILE_SHARE_READ: DWORD = 0x1;
pub const FILE_SHARE_WRITE: DWORD = 0x2;
pub const FILE_OPEN: ULONG = 0x00000001;
pub const FILE_OPEN_REPARSE_POINT: ULONG = 0x200000;
pub const OBJ_DONT_REPARSE: ULONG = 0x1000;
pub const CREATE_ALWAYS: DWORD = 2;
pub const CREATE_NEW: DWORD = 1;
pub const OPEN_ALWAYS: DWORD = 4;
pub const OPEN_EXISTING: DWORD = 3;
pub const TRUNCATE_EXISTING: DWORD = 5;
pub const FILE_LIST_DIRECTORY: DWORD = 0x1;
pub const FILE_WRITE_DATA: DWORD = 0x00000002;
pub const FILE_APPEND_DATA: DWORD = 0x00000004;
pub const FILE_WRITE_EA: DWORD = 0x00000010;
pub const FILE_WRITE_ATTRIBUTES: DWORD = 0x00000100;
pub const DELETE: DWORD = 0x10000;
pub const READ_CONTROL: DWORD = 0x00020000;
pub const SYNCHRONIZE: DWORD = 0x00100000;
pub const GENERIC_READ: DWORD = 0x80000000;
pub const GENERIC_WRITE: DWORD = 0x40000000;
pub const STANDARD_RIGHTS_WRITE: DWORD = READ_CONTROL;
pub const FILE_GENERIC_WRITE: DWORD = STANDARD_RIGHTS_WRITE
| FILE_WRITE_DATA
| FILE_WRITE_ATTRIBUTES
| FILE_WRITE_EA
| FILE_APPEND_DATA
| SYNCHRONIZE;
pub const FILE_FLAG_OPEN_REPARSE_POINT: DWORD = 0x00200000;
pub const FILE_FLAG_BACKUP_SEMANTICS: DWORD = 0x02000000;
pub const SECURITY_SQOS_PRESENT: DWORD = 0x00100000;
pub const FIONBIO: c_ulong = 0x8004667e;
#[repr(C)]
#[derive(Copy)]
pub struct WIN32_FIND_DATAW {
pub dwFileAttributes: DWORD,
pub ftCreationTime: FILETIME,
pub ftLastAccessTime: FILETIME,
pub ftLastWriteTime: FILETIME,
pub nFileSizeHigh: DWORD,
pub nFileSizeLow: DWORD,
pub dwReserved0: DWORD,
pub dwReserved1: DWORD,
pub cFileName: [wchar_t; 260], // #define MAX_PATH 260
pub cAlternateFileName: [wchar_t; 14],
}
impl Clone for WIN32_FIND_DATAW {
fn clone(&self) -> Self {
*self
}
}
pub const WSA_FLAG_OVERLAPPED: DWORD = 0x01;
pub const WSA_FLAG_NO_HANDLE_INHERIT: DWORD = 0x80;
pub const WSADESCRIPTION_LEN: usize = 256;
pub const WSASYS_STATUS_LEN: usize = 128;
pub const WSAPROTOCOL_LEN: DWORD = 255;
pub const INVALID_SOCKET: SOCKET = !0;
pub const MAX_PROTOCOL_CHAIN: DWORD = 7;
pub const MAXIMUM_REPARSE_DATA_BUFFER_SIZE: usize = 16 * 1024;
pub const FSCTL_GET_REPARSE_POINT: DWORD = 0x900a8;
pub const IO_REPARSE_TAG_SYMLINK: DWORD = 0xa000000c;
pub const IO_REPARSE_TAG_MOUNT_POINT: DWORD = 0xa0000003;
pub const SYMLINK_FLAG_RELATIVE: DWORD = 0x00000001;
pub const FSCTL_SET_REPARSE_POINT: DWORD = 0x900a4;
pub const SYMBOLIC_LINK_FLAG_DIRECTORY: DWORD = 0x1;
pub const SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE: DWORD = 0x2;
// Note that these are not actually HANDLEs, just values to pass to GetStdHandle
pub const STD_INPUT_HANDLE: DWORD = -10i32 as DWORD;
pub const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
pub const STD_ERROR_HANDLE: DWORD = -12i32 as DWORD;
pub const PROGRESS_CONTINUE: DWORD = 0;
pub const E_NOTIMPL: HRESULT = 0x80004001u32 as HRESULT;
pub const INVALID_HANDLE_VALUE: HANDLE = ptr::invalid_mut(!0);
pub const FACILITY_NT_BIT: DWORD = 0x1000_0000;
pub const FORMAT_MESSAGE_FROM_SYSTEM: DWORD = 0x00001000;
pub const FORMAT_MESSAGE_FROM_HMODULE: DWORD = 0x00000800;
pub const FORMAT_MESSAGE_IGNORE_INSERTS: DWORD = 0x00000200;
pub const TLS_OUT_OF_INDEXES: DWORD = 0xFFFFFFFF;
pub const DLL_THREAD_DETACH: DWORD = 3;
pub const DLL_PROCESS_DETACH: DWORD = 0;
pub const INFINITE: DWORD = !0;
pub const DUPLICATE_SAME_ACCESS: DWORD = 0x00000002;
pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { ptr: ptr::null_mut() };
pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: ptr::null_mut() };
pub const DETACHED_PROCESS: DWORD = 0x00000008;
pub const CREATE_NEW_PROCESS_GROUP: DWORD = 0x00000200;
pub const CREATE_UNICODE_ENVIRONMENT: DWORD = 0x00000400;
pub const STARTF_USESTDHANDLES: DWORD = 0x00000100;
pub const AF_INET: c_int = 2;
pub const AF_INET6: c_int = 23;
pub const SD_BOTH: c_int = 2;
pub const SD_RECEIVE: c_int = 0;
pub const SD_SEND: c_int = 1;
pub const SOCK_DGRAM: c_int = 2;
pub const SOCK_STREAM: c_int = 1;
pub const SOCKET_ERROR: c_int = -1;
pub const SOL_SOCKET: c_int = 0xffff;
pub const SO_LINGER: c_int = 0x0080;
pub const SO_RCVTIMEO: c_int = 0x1006;
pub const SO_SNDTIMEO: c_int = 0x1005;
pub const IPPROTO_IP: c_int = 0;
pub const IPPROTO_TCP: c_int = 6;
pub const IPPROTO_IPV6: c_int = 41;
pub const TCP_NODELAY: c_int = 0x0001;
pub const IP_TTL: c_int = 4;
pub const IPV6_V6ONLY: c_int = 27;
pub const SO_ERROR: c_int = 0x1007;
pub const SO_BROADCAST: c_int = 0x0020;
pub const IP_MULTICAST_LOOP: c_int = 11;
pub const IPV6_MULTICAST_LOOP: c_int = 11;
pub const IP_MULTICAST_TTL: c_int = 10;
pub const IP_ADD_MEMBERSHIP: c_int = 12;
pub const IP_DROP_MEMBERSHIP: c_int = 13;
pub const IPV6_ADD_MEMBERSHIP: c_int = 12;
pub const IPV6_DROP_MEMBERSHIP: c_int = 13;
pub const MSG_PEEK: c_int = 0x2;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct linger {
pub l_onoff: c_ushort,
pub l_linger: c_ushort,
}
#[repr(C)]
pub struct ip_mreq {
pub imr_multiaddr: in_addr,
pub imr_interface: in_addr,
}
#[repr(C)]
pub struct ipv6_mreq {
pub ipv6mr_multiaddr: in6_addr,
pub ipv6mr_interface: c_uint,
}
pub const VOLUME_NAME_DOS: DWORD = 0x0;
pub const MOVEFILE_REPLACE_EXISTING: DWORD = 1;
pub const FILE_BEGIN: DWORD = 0;
pub const FILE_CURRENT: DWORD = 1;
pub const FILE_END: DWORD = 2;
pub const WAIT_OBJECT_0: DWORD = 0x00000000;
pub const WAIT_TIMEOUT: DWORD = 258;
pub const WAIT_FAILED: DWORD = 0xFFFFFFFF;
pub const PIPE_ACCESS_INBOUND: DWORD = 0x00000001;
pub const PIPE_ACCESS_OUTBOUND: DWORD = 0x00000002;
pub const FILE_FLAG_FIRST_PIPE_INSTANCE: DWORD = 0x00080000;
pub const FILE_FLAG_OVERLAPPED: DWORD = 0x40000000;
pub const PIPE_WAIT: DWORD = 0x00000000;
pub const PIPE_TYPE_BYTE: DWORD = 0x00000000;
pub const PIPE_REJECT_REMOTE_CLIENTS: DWORD = 0x00000008;
pub const PIPE_READMODE_BYTE: DWORD = 0x00000000;
pub const FD_SETSIZE: usize = 64;
pub const STACK_SIZE_PARAM_IS_A_RESERVATION: DWORD = 0x00010000;
pub const STATUS_SUCCESS: NTSTATUS = 0x00000000;
pub const STATUS_DELETE_PENDING: NTSTATUS = 0xc0000056_u32 as _;
pub const STATUS_INVALID_PARAMETER: NTSTATUS = 0xc000000d_u32 as _;
pub const STATUS_PENDING: NTSTATUS = 0x103 as _;
pub const STATUS_END_OF_FILE: NTSTATUS = 0xC0000011_u32 as _;
// Equivalent to the `NT_SUCCESS` C preprocessor macro.
// See: https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/using-ntstatus-values
pub fn nt_success(status: NTSTATUS) -> bool {
status >= 0
}
pub const BCRYPT_USE_SYSTEM_PREFERRED_RNG: DWORD = 0x00000002;
#[repr(C)]
pub struct UNICODE_STRING {
pub Length: u16,
pub MaximumLength: u16,
pub Buffer: *mut u16,
}
impl UNICODE_STRING {
pub fn from_ref(slice: &[u16]) -> Self {
let len = slice.len() * mem::size_of::<u16>();
Self { Length: len as _, MaximumLength: len as _, Buffer: slice.as_ptr() as _ }
}
}
#[repr(C)]
pub struct OBJECT_ATTRIBUTES {
pub Length: ULONG,
pub RootDirectory: HANDLE,
pub ObjectName: *const UNICODE_STRING,
pub Attributes: ULONG,
pub SecurityDescriptor: *mut c_void,
pub SecurityQualityOfService: *mut c_void,
}
impl Default for OBJECT_ATTRIBUTES {
fn default() -> Self {
Self {
Length: mem::size_of::<Self>() as _,
RootDirectory: ptr::null_mut(),
ObjectName: ptr::null_mut(),
Attributes: 0,
SecurityDescriptor: ptr::null_mut(),
SecurityQualityOfService: ptr::null_mut(),
}
}
}
#[repr(C)]
union IO_STATUS_BLOCK_union {
Status: NTSTATUS,
Pointer: *mut c_void,
}
impl Default for IO_STATUS_BLOCK_union {
fn default() -> Self {
Self { Pointer: ptr::null_mut() }
}
}
#[repr(C)]
#[derive(Default)]
pub struct IO_STATUS_BLOCK {
u: IO_STATUS_BLOCK_union,
pub Information: usize,
}
pub type LPOVERLAPPED_COMPLETION_ROUTINE = unsafe extern "system" fn(
dwErrorCode: DWORD,
dwNumberOfBytesTransfered: DWORD,
lpOverlapped: *mut OVERLAPPED,
);
type IO_APC_ROUTINE = unsafe extern "system" fn(
ApcContext: *mut c_void,
IoStatusBlock: *mut IO_STATUS_BLOCK,
Reserved: ULONG,
);
#[repr(C)]
#[cfg(not(target_pointer_width = "64"))]
pub struct WSADATA {
pub wVersion: WORD,
pub wHighVersion: WORD,
pub szDescription: [u8; WSADESCRIPTION_LEN + 1],
pub szSystemStatus: [u8; WSASYS_STATUS_LEN + 1],
pub iMaxSockets: u16,
pub iMaxUdpDg: u16,
pub lpVendorInfo: *mut u8,
}
#[repr(C)]
#[cfg(target_pointer_width = "64")]
pub struct WSADATA {
pub wVersion: WORD,
pub wHighVersion: WORD,
pub iMaxSockets: u16,
pub iMaxUdpDg: u16,
pub lpVendorInfo: *mut u8,
pub szDescription: [u8; WSADESCRIPTION_LEN + 1],
pub szSystemStatus: [u8; WSASYS_STATUS_LEN + 1],
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct WSABUF {
pub len: ULONG,
pub buf: *mut CHAR,
}
#[repr(C)]
pub struct WSAPROTOCOL_INFO {
pub dwServiceFlags1: DWORD,
pub dwServiceFlags2: DWORD,
pub dwServiceFlags3: DWORD,
pub dwServiceFlags4: DWORD,
pub dwProviderFlags: DWORD,
pub ProviderId: GUID,
pub dwCatalogEntryId: DWORD,
pub ProtocolChain: WSAPROTOCOLCHAIN,
pub iVersion: c_int,
pub iAddressFamily: c_int,
pub iMaxSockAddr: c_int,
pub iMinSockAddr: c_int,
pub iSocketType: c_int,
pub iProtocol: c_int,
pub iProtocolMaxOffset: c_int,
pub iNetworkByteOrder: c_int,
pub iSecurityScheme: c_int,
pub dwMessageSize: DWORD,
pub dwProviderReserved: DWORD,
pub szProtocol: [u16; (WSAPROTOCOL_LEN as usize) + 1],
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct WIN32_FILE_ATTRIBUTE_DATA {
pub dwFileAttributes: DWORD,
pub ftCreationTime: FILETIME,
pub ftLastAccessTime: FILETIME,
pub ftLastWriteTime: FILETIME,
pub nFileSizeHigh: DWORD,
pub nFileSizeLow: DWORD,
}
#[repr(C)]
#[allow(dead_code)] // we only use some variants
pub enum FILE_INFO_BY_HANDLE_CLASS {
FileBasicInfo = 0,
FileStandardInfo = 1,
FileNameInfo = 2,
FileRenameInfo = 3,
FileDispositionInfo = 4,
FileAllocationInfo = 5,
FileEndOfFileInfo = 6,
FileStreamInfo = 7,
FileCompressionInfo = 8,
FileAttributeTagInfo = 9,
FileIdBothDirectoryInfo = 10, // 0xA
FileIdBothDirectoryRestartInfo = 11, // 0xB
FileIoPriorityHintInfo = 12, // 0xC
FileRemoteProtocolInfo = 13, // 0xD
FileFullDirectoryInfo = 14, // 0xE
FileFullDirectoryRestartInfo = 15, // 0xF
FileStorageInfo = 16, // 0x10
FileAlignmentInfo = 17, // 0x11
FileIdInfo = 18, // 0x12
FileIdExtdDirectoryInfo = 19, // 0x13
FileIdExtdDirectoryRestartInfo = 20, // 0x14
FileDispositionInfoEx = 21, // 0x15, Windows 10 version 1607
MaximumFileInfoByHandlesClass,
}
#[repr(C)]
pub struct FILE_DISPOSITION_INFO {
pub DeleteFile: BOOLEAN,
}
pub const FILE_DISPOSITION_DELETE: DWORD = 0x1;
pub const FILE_DISPOSITION_POSIX_SEMANTICS: DWORD = 0x2;
pub const FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE: DWORD = 0x10;
#[repr(C)]
pub struct FILE_DISPOSITION_INFO_EX {
pub Flags: DWORD,
}
#[repr(C)]
#[derive(Default)]
pub struct FILE_ID_BOTH_DIR_INFO {
pub NextEntryOffset: DWORD,
pub FileIndex: DWORD,
pub CreationTime: LARGE_INTEGER,
pub LastAccessTime: LARGE_INTEGER,
pub LastWriteTime: LARGE_INTEGER,
pub ChangeTime: LARGE_INTEGER,
pub EndOfFile: LARGE_INTEGER,
pub AllocationSize: LARGE_INTEGER,
pub FileAttributes: DWORD,
pub FileNameLength: DWORD,
pub EaSize: DWORD,
pub ShortNameLength: CCHAR,
pub ShortName: [WCHAR; 12],
pub FileId: LARGE_INTEGER,
pub FileName: [WCHAR; 1],
}
#[repr(C)]
pub struct FILE_BASIC_INFO {
pub CreationTime: LARGE_INTEGER,
pub LastAccessTime: LARGE_INTEGER,
pub LastWriteTime: LARGE_INTEGER,
pub ChangeTime: LARGE_INTEGER,
pub FileAttributes: DWORD,
}
#[repr(C)]
pub struct FILE_END_OF_FILE_INFO {
pub EndOfFile: LARGE_INTEGER,
}
#[repr(C)]
pub struct REPARSE_DATA_BUFFER {
pub ReparseTag: c_uint,
pub ReparseDataLength: c_ushort,
pub Reserved: c_ushort,
pub rest: (),
}
#[repr(C)]
pub struct SYMBOLIC_LINK_REPARSE_BUFFER {
pub SubstituteNameOffset: c_ushort,
pub SubstituteNameLength: c_ushort,
pub PrintNameOffset: c_ushort,
pub PrintNameLength: c_ushort,
pub Flags: c_ulong,
pub PathBuffer: WCHAR,
}
#[repr(C)]
pub struct MOUNT_POINT_REPARSE_BUFFER {
pub SubstituteNameOffset: c_ushort,
pub SubstituteNameLength: c_ushort,
pub PrintNameOffset: c_ushort,
pub PrintNameLength: c_ushort,
pub PathBuffer: WCHAR,
}
pub type LPPROGRESS_ROUTINE = crate::option::Option<
unsafe extern "system" fn(
TotalFileSize: LARGE_INTEGER,
TotalBytesTransferred: LARGE_INTEGER,
StreamSize: LARGE_INTEGER,
StreamBytesTransferred: LARGE_INTEGER,
dwStreamNumber: DWORD,
dwCallbackReason: DWORD,
hSourceFile: HANDLE,
hDestinationFile: HANDLE,
lpData: LPVOID,
) -> DWORD,
>;
#[repr(C)]
pub struct CONDITION_VARIABLE {
pub ptr: LPVOID,
}
#[repr(C)]
pub struct SRWLOCK {
pub ptr: LPVOID,
}
#[repr(C)]
pub struct CRITICAL_SECTION {
CriticalSectionDebug: LPVOID,
LockCount: LONG,
RecursionCount: LONG,
OwningThread: HANDLE,
LockSemaphore: HANDLE,
SpinCount: ULONG_PTR,
}
#[repr(C)]
pub struct REPARSE_MOUNTPOINT_DATA_BUFFER {
pub ReparseTag: DWORD,
pub ReparseDataLength: DWORD,
pub Reserved: WORD,
pub ReparseTargetLength: WORD,
pub ReparseTargetMaximumLength: WORD,
pub Reserved1: WORD,
pub ReparseTarget: WCHAR,
}
#[repr(C)]
pub struct GUID {
pub Data1: DWORD,
pub Data2: WORD,
pub Data3: WORD,
pub Data4: [BYTE; 8],
}
#[repr(C)]
pub struct WSAPROTOCOLCHAIN {
pub ChainLen: c_int,
pub ChainEntries: [DWORD; MAX_PROTOCOL_CHAIN as usize],
}
#[repr(C)]
pub struct SECURITY_ATTRIBUTES {
pub nLength: DWORD,
pub lpSecurityDescriptor: LPVOID,
pub bInheritHandle: BOOL,
}
#[repr(C)]
pub struct PROCESS_INFORMATION {
pub hProcess: HANDLE,
pub hThread: HANDLE,
pub dwProcessId: DWORD,
pub dwThreadId: DWORD,
}
#[repr(C)]
pub struct STARTUPINFO {
pub cb: DWORD,
pub lpReserved: LPWSTR,
pub lpDesktop: LPWSTR,
pub lpTitle: LPWSTR,
pub dwX: DWORD,
pub dwY: DWORD,
pub dwXSize: DWORD,
pub dwYSize: DWORD,
pub dwXCountChars: DWORD,
pub dwYCountCharts: DWORD,
pub dwFillAttribute: DWORD,
pub dwFlags: DWORD,
pub wShowWindow: WORD,
pub cbReserved2: WORD,
pub lpReserved2: LPBYTE,
pub hStdInput: HANDLE,
pub hStdOutput: HANDLE,
pub hStdError: HANDLE,
}
#[repr(C)]
pub struct SOCKADDR {
pub sa_family: ADDRESS_FAMILY,
pub sa_data: [CHAR; 14],
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct FILETIME {
pub dwLowDateTime: DWORD,
pub dwHighDateTime: DWORD,
}
#[repr(C)]
pub struct SYSTEM_INFO {
pub wProcessorArchitecture: WORD,
pub wReserved: WORD,
pub dwPageSize: DWORD,
pub lpMinimumApplicationAddress: LPVOID,
pub lpMaximumApplicationAddress: LPVOID,
pub dwActiveProcessorMask: DWORD_PTR,
pub dwNumberOfProcessors: DWORD,
pub dwProcessorType: DWORD,
pub dwAllocationGranularity: DWORD,
pub wProcessorLevel: WORD,
pub wProcessorRevision: WORD,
}
#[repr(C)]
pub struct OVERLAPPED {
pub Internal: *mut c_ulong,
pub InternalHigh: *mut c_ulong,
pub Offset: DWORD,
pub OffsetHigh: DWORD,
pub hEvent: HANDLE,
}
#[repr(C)]
#[allow(dead_code)] // we only use some variants
pub enum ADDRESS_MODE {
AddrMode1616,
AddrMode1632,
AddrModeReal,
AddrModeFlat,
}
#[repr(C)]
pub struct SOCKADDR_STORAGE_LH {
pub ss_family: ADDRESS_FAMILY,
pub __ss_pad1: [CHAR; 6],
pub __ss_align: i64,
pub __ss_pad2: [CHAR; 112],
}
#[repr(C)]
pub struct ADDRINFOA {
pub ai_flags: c_int,
pub ai_family: c_int,
pub ai_socktype: c_int,
pub ai_protocol: c_int,
pub ai_addrlen: size_t,
pub ai_canonname: *mut c_char,
pub ai_addr: *mut SOCKADDR,
pub ai_next: *mut ADDRINFOA,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct sockaddr_in {
pub sin_family: ADDRESS_FAMILY,
pub sin_port: USHORT,
pub sin_addr: in_addr,
pub sin_zero: [CHAR; 8],
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct sockaddr_in6 {
pub sin6_family: ADDRESS_FAMILY,
pub sin6_port: USHORT,
pub sin6_flowinfo: c_ulong,
pub sin6_addr: in6_addr,
pub sin6_scope_id: c_ulong,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct in_addr {
pub s_addr: u32,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct in6_addr {
pub s6_addr: [u8; 16],
}
#[repr(C)]
#[derive(Copy, Clone)]
#[allow(dead_code)] // we only use some variants
pub enum EXCEPTION_DISPOSITION {
ExceptionContinueExecution,
ExceptionContinueSearch,
ExceptionNestedException,
ExceptionCollidedUnwind,
}
#[repr(C)]
#[derive(Copy)]
pub struct fd_set {
pub fd_count: c_uint,
pub fd_array: [SOCKET; FD_SETSIZE],
}
impl Clone for fd_set {
fn clone(&self) -> fd_set {
*self
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct timeval {
pub tv_sec: c_long,
pub tv_usec: c_long,
}
// Desktop specific functions & types
cfg_if::cfg_if! {
if #[cfg(not(target_vendor = "uwp"))] {
pub const EXCEPTION_CONTINUE_SEARCH: LONG = 0;
pub const EXCEPTION_STACK_OVERFLOW: DWORD = 0xc00000fd;
pub const EXCEPTION_MAXIMUM_PARAMETERS: usize = 15;
#[repr(C)]
pub struct EXCEPTION_RECORD {
pub ExceptionCode: DWORD,
pub ExceptionFlags: DWORD,
pub ExceptionRecord: *mut EXCEPTION_RECORD,
pub ExceptionAddress: LPVOID,
pub NumberParameters: DWORD,
pub ExceptionInformation: [LPVOID; EXCEPTION_MAXIMUM_PARAMETERS],
}
pub enum CONTEXT {}
#[repr(C)]
pub struct EXCEPTION_POINTERS {
pub ExceptionRecord: *mut EXCEPTION_RECORD,
pub ContextRecord: *mut CONTEXT,
}
pub type PVECTORED_EXCEPTION_HANDLER =
extern "system" fn(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct CONSOLE_READCONSOLE_CONTROL {
pub nLength: ULONG,
pub nInitialChars: ULONG,
pub dwCtrlWakeupMask: ULONG,
pub dwControlKeyState: ULONG,
}
pub type PCONSOLE_READCONSOLE_CONTROL = *mut CONSOLE_READCONSOLE_CONTROL;
#[repr(C)]
pub struct BY_HANDLE_FILE_INFORMATION {
pub dwFileAttributes: DWORD,
pub ftCreationTime: FILETIME,
pub ftLastAccessTime: FILETIME,
pub ftLastWriteTime: FILETIME,
pub dwVolumeSerialNumber: DWORD,
pub nFileSizeHigh: DWORD,
pub nFileSizeLow: DWORD,
pub nNumberOfLinks: DWORD,
pub nFileIndexHigh: DWORD,
pub nFileIndexLow: DWORD,
}
pub type LPBY_HANDLE_FILE_INFORMATION = *mut BY_HANDLE_FILE_INFORMATION;
pub type LPCVOID = *const c_void;
pub const HANDLE_FLAG_INHERIT: DWORD = 0x00000001;
pub const TOKEN_READ: DWORD = 0x20008;
#[link(name = "advapi32")]
extern "system" {
// Allowed but unused by UWP
pub fn OpenProcessToken(
ProcessHandle: HANDLE,
DesiredAccess: DWORD,
TokenHandle: *mut HANDLE,
) -> BOOL;
}
#[link(name = "userenv")]
extern "system" {
// Allowed but unused by UWP
pub fn GetUserProfileDirectoryW(
hToken: HANDLE,
lpProfileDir: LPWSTR,
lpcchSize: *mut DWORD,
) -> BOOL;
}
#[link(name = "kernel32")]
extern "system" {
// Functions forbidden when targeting UWP
pub fn ReadConsoleW(
hConsoleInput: HANDLE,
lpBuffer: LPVOID,
nNumberOfCharsToRead: DWORD,
lpNumberOfCharsRead: LPDWORD,
pInputControl: PCONSOLE_READCONSOLE_CONTROL,
) -> BOOL;
pub fn WriteConsoleW(
hConsoleOutput: HANDLE,
lpBuffer: LPCVOID,
nNumberOfCharsToWrite: DWORD,
lpNumberOfCharsWritten: LPDWORD,
lpReserved: LPVOID,
) -> BOOL;
pub fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
// Allowed but unused by UWP
pub fn GetFileInformationByHandle(
hFile: HANDLE,
lpFileInformation: LPBY_HANDLE_FILE_INFORMATION,
) -> BOOL;
pub fn SetHandleInformation(hObject: HANDLE, dwMask: DWORD, dwFlags: DWORD) -> BOOL;
pub fn AddVectoredExceptionHandler(
FirstHandler: ULONG,
VectoredHandler: PVECTORED_EXCEPTION_HANDLER,
) -> LPVOID;
pub fn CreateHardLinkW(
lpSymlinkFileName: LPCWSTR,
lpTargetFileName: LPCWSTR,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
) -> BOOL;
pub fn SetThreadStackGuarantee(_size: *mut c_ulong) -> BOOL;
pub fn GetWindowsDirectoryW(lpBuffer: LPWSTR, uSize: UINT) -> UINT;
}
}
}
// UWP specific functions & types
cfg_if::cfg_if! {
if #[cfg(target_vendor = "uwp")] {
#[repr(C)]
pub struct FILE_STANDARD_INFO {
pub AllocationSize: LARGE_INTEGER,
pub EndOfFile: LARGE_INTEGER,
pub NumberOfLinks: DWORD,
pub DeletePending: BOOLEAN,
pub Directory: BOOLEAN,
}
}
}
// Shared between Desktop & UWP
#[link(name = "kernel32")]
extern "system" {
pub fn GetCurrentProcessId() -> DWORD;
pub fn InitializeCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
pub fn EnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
pub fn TryEnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION) -> BOOL;
pub fn LeaveCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
pub fn DeleteCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
pub fn GetSystemDirectoryW(lpBuffer: LPWSTR, uSize: UINT) -> UINT;
pub fn RemoveDirectoryW(lpPathName: LPCWSTR) -> BOOL;
pub fn SetFileAttributesW(lpFileName: LPCWSTR, dwFileAttributes: DWORD) -> BOOL;
pub fn SetLastError(dwErrCode: DWORD);
pub fn GetCommandLineW() -> LPWSTR;
pub fn GetTempPathW(nBufferLength: DWORD, lpBuffer: LPCWSTR) -> DWORD;
pub fn GetCurrentProcess() -> HANDLE;
pub fn GetCurrentThread() -> HANDLE;
pub fn GetStdHandle(which: DWORD) -> HANDLE;
pub fn ExitProcess(uExitCode: c_uint) -> !;
pub fn DeviceIoControl(
hDevice: HANDLE,
dwIoControlCode: DWORD,
lpInBuffer: LPVOID,
nInBufferSize: DWORD,
lpOutBuffer: LPVOID,
nOutBufferSize: DWORD,
lpBytesReturned: LPDWORD,
lpOverlapped: LPOVERLAPPED,
) -> BOOL;
pub fn CreateThread(
lpThreadAttributes: LPSECURITY_ATTRIBUTES,
dwStackSize: SIZE_T,
lpStartAddress: extern "system" fn(*mut c_void) -> DWORD,
lpParameter: LPVOID,
dwCreationFlags: DWORD,
lpThreadId: LPDWORD,
) -> HandleOrNull;
pub fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD;
pub fn SwitchToThread() -> BOOL;
pub fn Sleep(dwMilliseconds: DWORD);
pub fn SleepEx(dwMilliseconds: DWORD, bAlertable: BOOL) -> DWORD;
pub fn GetProcessId(handle: HANDLE) -> DWORD;
pub fn CopyFileExW(
lpExistingFileName: LPCWSTR,
lpNewFileName: LPCWSTR,
lpProgressRoutine: LPPROGRESS_ROUTINE,
lpData: LPVOID,
pbCancel: LPBOOL,
dwCopyFlags: DWORD,
) -> BOOL;
pub fn FormatMessageW(
flags: DWORD,
lpSrc: LPVOID,
msgId: DWORD,
langId: DWORD,
buf: LPWSTR,
nsize: DWORD,
args: *const c_void,
) -> DWORD;
pub fn TlsAlloc() -> DWORD;
pub fn TlsGetValue(dwTlsIndex: DWORD) -> LPVOID;
pub fn TlsSetValue(dwTlsIndex: DWORD, lpTlsvalue: LPVOID) -> BOOL;
pub fn GetLastError() -> DWORD;
pub fn QueryPerformanceFrequency(lpFrequency: *mut LARGE_INTEGER) -> BOOL;
pub fn QueryPerformanceCounter(lpPerformanceCount: *mut LARGE_INTEGER) -> BOOL;
pub fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: LPDWORD) -> BOOL;
pub fn TerminateProcess(hProcess: HANDLE, uExitCode: UINT) -> BOOL;
pub fn CreateProcessW(
lpApplicationName: LPCWSTR,
lpCommandLine: LPWSTR,
lpProcessAttributes: LPSECURITY_ATTRIBUTES,
lpThreadAttributes: LPSECURITY_ATTRIBUTES,
bInheritHandles: BOOL,
dwCreationFlags: DWORD,
lpEnvironment: LPVOID,
lpCurrentDirectory: LPCWSTR,
lpStartupInfo: LPSTARTUPINFO,
lpProcessInformation: LPPROCESS_INFORMATION,
) -> BOOL;
pub fn GetEnvironmentVariableW(n: LPCWSTR, v: LPWSTR, nsize: DWORD) -> DWORD;
pub fn SetEnvironmentVariableW(n: LPCWSTR, v: LPCWSTR) -> BOOL;
pub fn GetEnvironmentStringsW() -> LPWCH;
pub fn FreeEnvironmentStringsW(env_ptr: LPWCH) -> BOOL;
pub fn GetModuleFileNameW(hModule: HMODULE, lpFilename: LPWSTR, nSize: DWORD) -> DWORD;
pub fn CreateDirectoryW(
lpPathName: LPCWSTR,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
) -> BOOL;
pub fn DeleteFileW(lpPathName: LPCWSTR) -> BOOL;
pub fn GetCurrentDirectoryW(nBufferLength: DWORD, lpBuffer: LPWSTR) -> DWORD;
pub fn SetCurrentDirectoryW(lpPathName: LPCWSTR) -> BOOL;
pub fn DuplicateHandle(
hSourceProcessHandle: HANDLE,
hSourceHandle: HANDLE,
hTargetProcessHandle: HANDLE,
lpTargetHandle: LPHANDLE,
dwDesiredAccess: DWORD,
bInheritHandle: BOOL,
dwOptions: DWORD,
) -> BOOL;
pub fn ReadFile(
hFile: BorrowedHandle<'_>,
lpBuffer: LPVOID,
nNumberOfBytesToRead: DWORD,
lpNumberOfBytesRead: LPDWORD,
lpOverlapped: LPOVERLAPPED,
) -> BOOL;
pub fn ReadFileEx(
hFile: BorrowedHandle<'_>,
lpBuffer: LPVOID,
nNumberOfBytesToRead: DWORD,
lpOverlapped: LPOVERLAPPED,
lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE,
) -> BOOL;
pub fn WriteFileEx(
hFile: BorrowedHandle<'_>,
lpBuffer: LPVOID,
nNumberOfBytesToWrite: DWORD,
lpOverlapped: LPOVERLAPPED,
lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE,
) -> BOOL;
pub fn CloseHandle(hObject: HANDLE) -> BOOL;
pub fn MoveFileExW(lpExistingFileName: LPCWSTR, lpNewFileName: LPCWSTR, dwFlags: DWORD)
-> BOOL;
pub fn SetFilePointerEx(
hFile: HANDLE,
liDistanceToMove: LARGE_INTEGER,
lpNewFilePointer: PLARGE_INTEGER,
dwMoveMethod: DWORD,
) -> BOOL;
pub fn FlushFileBuffers(hFile: HANDLE) -> BOOL;
pub fn CreateFileW(
lpFileName: LPCWSTR,
dwDesiredAccess: DWORD,
dwShareMode: DWORD,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
dwCreationDisposition: DWORD,
dwFlagsAndAttributes: DWORD,
hTemplateFile: HANDLE,
) -> HandleOrInvalid;
pub fn FindFirstFileW(fileName: LPCWSTR, findFileData: LPWIN32_FIND_DATAW) -> HANDLE;
pub fn FindNextFileW(findFile: HANDLE, findFileData: LPWIN32_FIND_DATAW) -> BOOL;
pub fn FindClose(findFile: HANDLE) -> BOOL;
pub fn GetProcAddress(handle: HMODULE, name: LPCSTR) -> *mut c_void;
pub fn GetModuleHandleA(lpModuleName: LPCSTR) -> HMODULE;
pub fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE;
pub fn GetSystemTimeAsFileTime(lpSystemTimeAsFileTime: LPFILETIME);
pub fn GetSystemInfo(lpSystemInfo: LPSYSTEM_INFO);
pub fn CreateEventW(
lpEventAttributes: LPSECURITY_ATTRIBUTES,
bManualReset: BOOL,
bInitialState: BOOL,
lpName: LPCWSTR,
) -> HANDLE;
pub fn WaitForMultipleObjects(
nCount: DWORD,
lpHandles: *const HANDLE,
bWaitAll: BOOL,
dwMilliseconds: DWORD,
) -> DWORD;
pub fn CreateNamedPipeW(
lpName: LPCWSTR,
dwOpenMode: DWORD,
dwPipeMode: DWORD,
nMaxInstances: DWORD,
nOutBufferSize: DWORD,
nInBufferSize: DWORD,
nDefaultTimeOut: DWORD,
lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
) -> HANDLE;
pub fn CancelIo(handle: HANDLE) -> BOOL;
pub fn GetOverlappedResult(
hFile: HANDLE,
lpOverlapped: LPOVERLAPPED,
lpNumberOfBytesTransferred: LPDWORD,
bWait: BOOL,
) -> BOOL;
pub fn CreateSymbolicLinkW(
lpSymlinkFileName: LPCWSTR,
lpTargetFileName: LPCWSTR,
dwFlags: DWORD,
) -> BOOLEAN;
pub fn GetFinalPathNameByHandleW(
hFile: HANDLE,
lpszFilePath: LPCWSTR,
cchFilePath: DWORD,
dwFlags: DWORD,
) -> DWORD;
pub fn GetFileInformationByHandleEx(
hFile: HANDLE,
fileInfoClass: FILE_INFO_BY_HANDLE_CLASS,
lpFileInformation: LPVOID,
dwBufferSize: DWORD,
) -> BOOL;
pub fn SetFileInformationByHandle(
hFile: HANDLE,
FileInformationClass: FILE_INFO_BY_HANDLE_CLASS,
lpFileInformation: LPVOID,
dwBufferSize: DWORD,
) -> BOOL;
pub fn SleepConditionVariableSRW(
ConditionVariable: PCONDITION_VARIABLE,
SRWLock: PSRWLOCK,
dwMilliseconds: DWORD,
Flags: ULONG,
) -> BOOL;
pub fn WakeConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
pub fn WakeAllConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
pub fn AcquireSRWLockExclusive(SRWLock: PSRWLOCK);
pub fn AcquireSRWLockShared(SRWLock: PSRWLOCK);
pub fn ReleaseSRWLockExclusive(SRWLock: PSRWLOCK);
pub fn ReleaseSRWLockShared(SRWLock: PSRWLOCK);
pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN;
pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN;
pub fn CompareStringOrdinal(
lpString1: LPCWSTR,
cchCount1: c_int,
lpString2: LPCWSTR,
cchCount2: c_int,
bIgnoreCase: BOOL,
) -> c_int;
pub fn GetFullPathNameW(
lpFileName: LPCWSTR,
nBufferLength: DWORD,
lpBuffer: LPWSTR,
lpFilePart: *mut LPWSTR,
) -> DWORD;
pub fn GetFileAttributesW(lpFileName: LPCWSTR) -> DWORD;
}
#[link(name = "ws2_32")]
extern "system" {
pub fn WSAStartup(wVersionRequested: WORD, lpWSAData: LPWSADATA) -> c_int;
pub fn WSACleanup() -> c_int;
pub fn WSAGetLastError() -> c_int;
pub fn WSADuplicateSocketW(
s: SOCKET,
dwProcessId: DWORD,
lpProtocolInfo: LPWSAPROTOCOL_INFO,
) -> c_int;
pub fn WSASend(
s: SOCKET,
lpBuffers: LPWSABUF,
dwBufferCount: DWORD,
lpNumberOfBytesSent: LPDWORD,
dwFlags: DWORD,
lpOverlapped: LPWSAOVERLAPPED,
lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE,
) -> c_int;
pub fn WSARecv(
s: SOCKET,
lpBuffers: LPWSABUF,
dwBufferCount: DWORD,
lpNumberOfBytesRecvd: LPDWORD,
lpFlags: LPDWORD,
lpOverlapped: LPWSAOVERLAPPED,
lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE,
) -> c_int;
pub fn WSASocketW(
af: c_int,
kind: c_int,
protocol: c_int,
lpProtocolInfo: LPWSAPROTOCOL_INFO,
g: GROUP,
dwFlags: DWORD,
) -> SOCKET;
pub fn ioctlsocket(s: SOCKET, cmd: c_long, argp: *mut c_ulong) -> c_int;
pub fn closesocket(socket: SOCKET) -> c_int;
pub fn recv(socket: SOCKET, buf: *mut c_void, len: c_int, flags: c_int) -> c_int;
pub fn send(socket: SOCKET, buf: *const c_void, len: c_int, flags: c_int) -> c_int;
pub fn recvfrom(
socket: SOCKET,
buf: *mut c_void,
len: c_int,
flags: c_int,
addr: *mut SOCKADDR,
addrlen: *mut c_int,
) -> c_int;
pub fn sendto(
socket: SOCKET,
buf: *const c_void,
len: c_int,
flags: c_int,
addr: *const SOCKADDR,
addrlen: c_int,
) -> c_int;
pub fn shutdown(socket: SOCKET, how: c_int) -> c_int;
pub fn accept(socket: SOCKET, address: *mut SOCKADDR, address_len: *mut c_int) -> SOCKET;
pub fn getsockopt(
s: SOCKET,
level: c_int,
optname: c_int,
optval: *mut c_char,
optlen: *mut c_int,
) -> c_int;
pub fn setsockopt(
s: SOCKET,
level: c_int,
optname: c_int,
optval: *const c_void,
optlen: c_int,
) -> c_int;
pub fn getsockname(socket: SOCKET, address: *mut SOCKADDR, address_len: *mut c_int) -> c_int;
pub fn getpeername(socket: SOCKET, address: *mut SOCKADDR, address_len: *mut c_int) -> c_int;
pub fn bind(socket: SOCKET, address: *const SOCKADDR, address_len: socklen_t) -> c_int;
pub fn listen(socket: SOCKET, backlog: c_int) -> c_int;
pub fn connect(socket: SOCKET, address: *const SOCKADDR, len: c_int) -> c_int;
pub fn getaddrinfo(
node: *const c_char,
service: *const c_char,
hints: *const ADDRINFOA,
res: *mut *mut ADDRINFOA,
) -> c_int;
pub fn freeaddrinfo(res: *mut ADDRINFOA);
pub fn select(
nfds: c_int,
readfds: *mut fd_set,
writefds: *mut fd_set,
exceptfds: *mut fd_set,
timeout: *const timeval,
) -> c_int;
}
#[link(name = "bcrypt")]
extern "system" {
// >= Vista / Server 2008
// https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom
pub fn BCryptGenRandom(
hAlgorithm: LPVOID,
pBuffer: *mut u8,
cbBuffer: ULONG,
dwFlags: ULONG,
) -> NTSTATUS;
}
// Functions that aren't available on every version of Windows that we support,
// but we still use them and just provide some form of a fallback implementation.
compat_fn! {
"kernel32":
// >= Win10 1607
// https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-setthreaddescription
pub fn SetThreadDescription(hThread: HANDLE,
lpThreadDescription: LPCWSTR) -> HRESULT {
SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); E_NOTIMPL
}
// >= Win8 / Server 2012
// https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getsystemtimepreciseasfiletime
pub fn GetSystemTimePreciseAsFileTime(lpSystemTimeAsFileTime: LPFILETIME)
-> () {
GetSystemTimeAsFileTime(lpSystemTimeAsFileTime)
}
// >= Win11 / Server 2022
// https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-gettemppath2a
pub fn GetTempPath2W(nBufferLength: DWORD, lpBuffer: LPCWSTR) -> DWORD {
GetTempPathW(nBufferLength, lpBuffer)
}
}
compat_fn! {
"api-ms-win-core-synch-l1-2-0":
// >= Windows 8 / Server 2012
// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-waitonaddress
pub fn WaitOnAddress(
Address: LPVOID,
CompareAddress: LPVOID,
AddressSize: SIZE_T,
dwMilliseconds: DWORD
) -> BOOL {
panic!("WaitOnAddress not available")
}
pub fn WakeByAddressSingle(Address: LPVOID) -> () {
// If this api is unavailable, there cannot be anything waiting, because
// WaitOnAddress would've panicked. So it's fine to do nothing here.
}
}
compat_fn! {
"ntdll":
pub fn NtCreateFile(
FileHandle: *mut HANDLE,
DesiredAccess: ACCESS_MASK,
ObjectAttributes: *const OBJECT_ATTRIBUTES,
IoStatusBlock: *mut IO_STATUS_BLOCK,
AllocationSize: *mut i64,
FileAttributes: ULONG,
ShareAccess: ULONG,
CreateDisposition: ULONG,
CreateOptions: ULONG,
EaBuffer: *mut c_void,
EaLength: ULONG
) -> NTSTATUS {
panic!("`NtCreateFile` not available");
}
pub fn NtReadFile(
FileHandle: BorrowedHandle<'_>,
Event: HANDLE,
ApcRoutine: Option<IO_APC_ROUTINE>,
ApcContext: *mut c_void,
IoStatusBlock: &mut IO_STATUS_BLOCK,
Buffer: *mut crate::mem::MaybeUninit<u8>,
Length: ULONG,
ByteOffset: Option<&LARGE_INTEGER>,
Key: Option<&ULONG>
) -> NTSTATUS {
panic!("`NtReadFile` not available");
}
pub fn NtWriteFile(
FileHandle: BorrowedHandle<'_>,
Event: HANDLE,
ApcRoutine: Option<IO_APC_ROUTINE>,
ApcContext: *mut c_void,
IoStatusBlock: &mut IO_STATUS_BLOCK,
Buffer: *const u8,
Length: ULONG,
ByteOffset: Option<&LARGE_INTEGER>,
Key: Option<&ULONG>
) -> NTSTATUS {
panic!("`NtWriteFile` not available");
}
pub fn RtlNtStatusToDosError(
Status: NTSTATUS
) -> ULONG {
panic!("`RtlNtStatusToDosError` not available");
}
pub fn NtCreateKeyedEvent(
KeyedEventHandle: LPHANDLE,
DesiredAccess: ACCESS_MASK,
ObjectAttributes: LPVOID,
Flags: ULONG
) -> NTSTATUS {
panic!("keyed events not available")
}
pub fn NtReleaseKeyedEvent(
EventHandle: HANDLE,
Key: LPVOID,
Alertable: BOOLEAN,
Timeout: PLARGE_INTEGER
) -> NTSTATUS {
panic!("keyed events not available")
}
pub fn NtWaitForKeyedEvent(
EventHandle: HANDLE,
Key: LPVOID,
Alertable: BOOLEAN,
Timeout: PLARGE_INTEGER
) -> NTSTATUS {
panic!("keyed events not available")
}
}
| 30.131086 | 117 | 0.678384 |
3ac7287284543f90b5c5660027b1cd41cbbb3039 | 2,402 | mod connect_nested;
mod connect_or_create_nested;
mod create_nested;
mod delete_nested;
mod disconnect_nested;
mod set_nested;
mod update_nested;
mod upsert_nested;
use super::*;
use crate::{
query_graph::{NodeRef, QueryGraph},
ParsedInputMap,
};
use connect_nested::*;
use connect_or_create_nested::*;
use create_nested::*;
use delete_nested::*;
use disconnect_nested::*;
use prisma_models::RelationFieldRef;
use schema::ConnectorContext;
use schema_builder::constants::operations;
use set_nested::*;
use update_nested::*;
use upsert_nested::*;
#[rustfmt::skip]
pub fn connect_nested_query(
graph: &mut QueryGraph,
connector_ctx: &ConnectorContext,
parent: NodeRef,
parent_relation_field: RelationFieldRef,
data_map: ParsedInputMap,
) -> QueryGraphBuilderResult<()> {
let child_model = parent_relation_field.related_model();
for (field_name, value) in data_map {
match field_name.as_str() {
operations::CREATE => nested_create(graph, connector_ctx,parent, &parent_relation_field, value, &child_model)?,
operations::CREATE_MANY => nested_create_many(graph, parent, &parent_relation_field, value, &child_model)?,
operations::UPDATE => nested_update(graph, connector_ctx, &parent, &parent_relation_field, value, &child_model)?,
operations::UPSERT => nested_upsert(graph, connector_ctx, parent, &parent_relation_field, value)?,
operations::DELETE => nested_delete(graph, connector_ctx, &parent, &parent_relation_field, value, &child_model)?,
operations::CONNECT => nested_connect(graph, parent, &parent_relation_field, value, &child_model)?,
operations::DISCONNECT => nested_disconnect(graph, parent, &parent_relation_field, value, &child_model)?,
operations::SET => nested_set(graph, &parent, &parent_relation_field, value, &child_model)?,
operations::UPDATE_MANY => nested_update_many(graph, connector_ctx, &parent, &parent_relation_field, value, &child_model)?,
operations::DELETE_MANY => nested_delete_many(graph, connector_ctx, &parent, &parent_relation_field, value, &child_model)?,
operations::CONNECT_OR_CREATE => nested_connect_or_create(graph, connector_ctx, parent, &parent_relation_field, value, &child_model)?,
_ => panic!("Unhandled nested operation: {}", field_name),
};
}
Ok(())
}
| 42.892857 | 146 | 0.721482 |
7ad8d27b48330edd475ca1b6db51256efa659cdd | 10,159 | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Unix-specific extensions to primitives in the `std::fs` module.
#![stable(feature = "rust1", since = "1.0.0")]
use fs::{self, Permissions, OpenOptions};
use io;
use path::Path;
use sys;
use sys_common::{FromInner, AsInner, AsInnerMut};
/// Unix-specific extensions to `Permissions`
#[stable(feature = "fs_ext", since = "1.1.0")]
pub trait PermissionsExt {
/// Returns the underlying raw `mode_t` bits that are the standard Unix
/// permissions for this file.
///
/// # Examples
///
/// ```rust,ignore
/// use std::fs::File;
/// use std::os::unix::fs::PermissionsExt;
///
/// let f = File::create("foo.txt")?;
/// let metadata = f.metadata()?;
/// let permissions = metadata.permissions();
///
/// println!("permissions: {}", permissions.mode());
/// ```
#[stable(feature = "fs_ext", since = "1.1.0")]
fn mode(&self) -> u32;
/// Sets the underlying raw bits for this set of permissions.
///
/// # Examples
///
/// ```rust,ignore
/// use std::fs::File;
/// use std::os::unix::fs::PermissionsExt;
///
/// let f = File::create("foo.txt")?;
/// let metadata = f.metadata()?;
/// let mut permissions = metadata.permissions();
///
/// permissions.set_mode(0o644); // Read/write for owner and read for others.
/// assert_eq!(permissions.mode(), 0o644);
/// ```
#[stable(feature = "fs_ext", since = "1.1.0")]
fn set_mode(&mut self, mode: u32);
/// Creates a new instance of `Permissions` from the given set of Unix
/// permission bits.
///
/// # Examples
///
/// ```rust,ignore
/// use std::fs::Permissions;
/// use std::os::unix::fs::PermissionsExt;
///
/// // Read/write for owner and read for others.
/// let permissions = Permissions::from_mode(0o644);
/// assert_eq!(permissions.mode(), 0o644);
/// ```
#[stable(feature = "fs_ext", since = "1.1.0")]
fn from_mode(mode: u32) -> Self;
}
#[stable(feature = "fs_ext", since = "1.1.0")]
impl PermissionsExt for Permissions {
fn mode(&self) -> u32 {
self.as_inner().mode()
}
fn set_mode(&mut self, mode: u32) {
*self = Permissions::from_inner(FromInner::from_inner(mode));
}
fn from_mode(mode: u32) -> Permissions {
Permissions::from_inner(FromInner::from_inner(mode))
}
}
/// Unix-specific extensions to `OpenOptions`
#[stable(feature = "fs_ext", since = "1.1.0")]
pub trait OpenOptionsExt {
/// Sets the mode bits that a new file will be created with.
///
/// If a new file is created as part of a `File::open_opts` call then this
/// specified `mode` will be used as the permission bits for the new file.
/// If no `mode` is set, the default of `0o666` will be used.
/// The operating system masks out bits with the systems `umask`, to produce
/// the final permissions.
///
/// # Examples
///
/// ```rust,ignore
/// extern crate libc;
/// use std::fs::OpenOptions;
/// use std::os::unix::fs::OpenOptionsExt;
///
/// let mut options = OpenOptions::new();
/// options.mode(0o644); // Give read/write for owner and read for others.
/// let file = options.open("foo.txt");
/// ```
#[stable(feature = "fs_ext", since = "1.1.0")]
fn mode(&mut self, mode: u32) -> &mut Self;
/// Pass custom flags to the `flags` agument of `open`.
///
/// The bits that define the access mode are masked out with `O_ACCMODE`, to
/// ensure they do not interfere with the access mode set by Rusts options.
///
/// Custom flags can only set flags, not remove flags set by Rusts options.
/// This options overwrites any previously set custom flags.
///
/// # Examples
///
/// ```rust,ignore
/// extern crate libc;
/// use std::fs::OpenOptions;
/// use std::os::unix::fs::OpenOptionsExt;
///
/// let mut options = OpenOptions::new();
/// options.write(true);
/// if cfg!(unix) {
/// options.custom_flags(libc::O_NOFOLLOW);
/// }
/// let file = options.open("foo.txt");
/// ```
#[stable(feature = "open_options_ext", since = "1.10.0")]
fn custom_flags(&mut self, flags: i32) -> &mut Self;
}
#[stable(feature = "fs_ext", since = "1.1.0")]
impl OpenOptionsExt for OpenOptions {
fn mode(&mut self, mode: u32) -> &mut OpenOptions {
self.as_inner_mut().mode(mode); self
}
fn custom_flags(&mut self, flags: i32) -> &mut OpenOptions {
self.as_inner_mut().custom_flags(flags); self
}
}
// Hm, why are there casts here to the returned type, shouldn't the types always
// be the same? Right you are! Turns out, however, on android at least the types
// in the raw `stat` structure are not the same as the types being returned. Who
// knew!
//
// As a result to make sure this compiles for all platforms we do the manual
// casts and rely on manual lowering to `stat` if the raw type is desired.
#[stable(feature = "metadata_ext", since = "1.1.0")]
pub trait MetadataExt {
#[stable(feature = "metadata_ext", since = "1.1.0")]
fn mode(&self) -> u32;
#[stable(feature = "metadata_ext", since = "1.1.0")]
fn uid(&self) -> u32;
#[stable(feature = "metadata_ext", since = "1.1.0")]
fn gid(&self) -> u32;
#[stable(feature = "metadata_ext", since = "1.1.0")]
fn size(&self) -> u64;
#[stable(feature = "metadata_ext", since = "1.1.0")]
fn atime(&self) -> i64;
#[stable(feature = "metadata_ext", since = "1.1.0")]
fn atime_nsec(&self) -> i64;
#[stable(feature = "metadata_ext", since = "1.1.0")]
fn mtime(&self) -> i64;
#[stable(feature = "metadata_ext", since = "1.1.0")]
fn mtime_nsec(&self) -> i64;
#[stable(feature = "metadata_ext", since = "1.1.0")]
fn ctime(&self) -> i64;
#[stable(feature = "metadata_ext", since = "1.1.0")]
fn ctime_nsec(&self) -> i64;
}
#[stable(feature = "metadata_ext", since = "1.1.0")]
impl MetadataExt for fs::Metadata {
fn mode(&self) -> u32 {
self.as_inner().as_inner().st_mode as u32
}
fn uid(&self) -> u32 {
self.as_inner().as_inner().st_uid as u32
}
fn gid(&self) -> u32 {
self.as_inner().as_inner().st_gid as u32
}
fn size(&self) -> u64 {
self.as_inner().as_inner().st_size as u64
}
fn atime(&self) -> i64 {
self.as_inner().as_inner().st_atime as i64
}
fn atime_nsec(&self) -> i64 {
self.as_inner().as_inner().st_atime_nsec as i64
}
fn mtime(&self) -> i64 {
self.as_inner().as_inner().st_mtime as i64
}
fn mtime_nsec(&self) -> i64 {
self.as_inner().as_inner().st_mtime_nsec as i64
}
fn ctime(&self) -> i64 {
self.as_inner().as_inner().st_ctime as i64
}
fn ctime_nsec(&self) -> i64 {
self.as_inner().as_inner().st_ctime_nsec as i64
}
}
/// Add special unix types (block/char device, fifo and socket)
#[stable(feature = "file_type_ext", since = "1.5.0")]
pub trait FileTypeExt {
/// Returns whether this file type is a block device.
#[stable(feature = "file_type_ext", since = "1.5.0")]
fn is_block_device(&self) -> bool;
/// Returns whether this file type is a char device.
#[stable(feature = "file_type_ext", since = "1.5.0")]
fn is_char_device(&self) -> bool;
/// Returns whether this file type is a fifo.
#[stable(feature = "file_type_ext", since = "1.5.0")]
fn is_fifo(&self) -> bool;
/// Returns whether this file type is a socket.
#[stable(feature = "file_type_ext", since = "1.5.0")]
fn is_socket(&self) -> bool;
}
#[stable(feature = "file_type_ext", since = "1.5.0")]
impl FileTypeExt for fs::FileType {
fn is_block_device(&self) -> bool { false /*FIXME: Implement block device mode*/ }
fn is_char_device(&self) -> bool { false /*FIXME: Implement char device mode*/ }
fn is_fifo(&self) -> bool { false /*FIXME: Implement fifo mode*/ }
fn is_socket(&self) -> bool { false /*FIXME: Implement socket mode*/ }
}
/// Creates a new symbolic link on the filesystem.
///
/// The `dst` path will be a symbolic link pointing to the `src` path.
///
/// # Note
///
/// On Windows, you must specify whether a symbolic link points to a file
/// or directory. Use `os::windows::fs::symlink_file` to create a
/// symbolic link to a file, or `os::windows::fs::symlink_dir` to create a
/// symbolic link to a directory. Additionally, the process must have
/// `SeCreateSymbolicLinkPrivilege` in order to be able to create a
/// symbolic link.
///
/// # Examples
///
/// ```
/// use std::os::unix::fs;
///
/// # fn foo() -> std::io::Result<()> {
/// fs::symlink("a.txt", "b.txt")?;
/// # Ok(())
/// # }
/// ```
#[stable(feature = "symlink", since = "1.1.0")]
pub fn symlink<P: AsRef<Path>, Q: AsRef<Path>>(src: P, dst: Q) -> io::Result<()>
{
sys::fs::symlink(src.as_ref(), dst.as_ref())
}
#[stable(feature = "dir_builder", since = "1.6.0")]
/// An extension trait for `fs::DirBuilder` for unix-specific options.
pub trait DirBuilderExt {
/// Sets the mode to create new directories with. This option defaults to
/// 0o777.
///
/// # Examples
///
/// ```ignore
/// use std::fs::DirBuilder;
/// use std::os::unix::fs::DirBuilderExt;
///
/// let mut builder = DirBuilder::new();
/// builder.mode(0o755);
/// ```
#[stable(feature = "dir_builder", since = "1.6.0")]
fn mode(&mut self, mode: u32) -> &mut Self;
}
#[stable(feature = "dir_builder", since = "1.6.0")]
impl DirBuilderExt for fs::DirBuilder {
fn mode(&mut self, mode: u32) -> &mut fs::DirBuilder {
self.as_inner_mut().set_mode(mode);
self
}
}
| 33.976589 | 86 | 0.603307 |
8ad9da8f9c95997effe4dc464e794c6bdb3f3136 | 3,454 | #![allow(clippy::len_without_is_empty)]
use std::hash::{Hash, Hasher};
use std::ops;
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub struct Vec2 {
pub x: f32,
pub y: f32,
}
impl PartialEq for Vec2 {
fn eq(&self, other: &Self) -> bool {
self.validate();
self.x == other.x && self.y == other.y
}
}
impl Eq for Vec2 {}
impl Hash for Vec2 {
fn hash<H: Hasher>(&self, state: &mut H) {
self.validate();
unsafe {
state.write(std::slice::from_raw_parts(
self as *const Self as *const _,
std::mem::size_of::<Self>(),
))
}
}
}
impl Default for Vec2 {
fn default() -> Self {
Self::ZERO
}
}
impl Vec2 {
pub const ZERO: Self = Self { x: 0.0, y: 0.0 };
pub const UNIT: Self = Self { x: 1.0, y: 1.0 };
pub const X: Self = Self {
x: 1.0,
y: 0.0,
};
pub const Y: Self = Self {
x: 0.0,
y: 1.0,
};
fn validate(self) {
debug_assert!(!self.x.is_nan());
debug_assert!(!self.y.is_nan());
}
#[inline]
pub const fn new(x: f32, y: f32) -> Self {
Self { x, y }
}
#[inline]
pub fn dot(self, other: Self) -> f32 {
self.x * other.x + self.y * other.y
}
#[inline]
pub fn len(self) -> f32 {
self.dot(self).sqrt()
}
#[inline]
pub fn angle(self, other: Self) -> f32 {
(self.dot(other) / (self.len() * other.len())).acos()
}
#[inline]
pub fn perpendicular(self) -> Self {
Self {
x: self.y,
y: -self.x,
}
}
#[inline]
pub fn scale(self, scalar: f32) -> Self {
Self {
x: self.x * scalar,
y: self.y * scalar,
}
}
/// Per component min
pub fn min(&self, other: Self) -> Self {
Self {
x: self.x.min(other.x),
y: self.y.min(other.y),
}
}
/// Per component max
pub fn max(&self, other: Self) -> Self {
Self {
x: self.x.max(other.x),
y: self.y.max(other.y),
}
}
#[inline]
pub fn normalized(self) -> Option<Self> {
let len = self.len();
if len >= std::f32::EPSILON {
let inv_len = 1.0 / len;
return Some(Self {
x: self.x * inv_len,
y: self.y * inv_len,
});
}
None
}
}
impl From<(f32, f32)> for Vec2 {
fn from(v: (f32, f32)) -> Self {
Self { x: v.0, y: v.1 }
}
}
impl From<Vec2> for (f32, f32) {
fn from(v: Vec2) -> Self {
(v.x, v.y)
}
}
impl ops::Add<Self> for Vec2 {
type Output = Self;
fn add(self, b: Self) -> Self {
Self {
x: self.x + b.x,
y: self.y + b.y,
}
}
}
impl ops::AddAssign<Self> for Vec2 {
fn add_assign(&mut self, b: Self) {
self.x += b.x;
self.y += b.y;
}
}
impl ops::Sub<Self> for Vec2 {
type Output = Self;
fn sub(self, b: Self) -> Self {
Self {
x: self.x - b.x,
y: self.y - b.y,
}
}
}
impl ops::SubAssign<Self> for Vec2 {
fn sub_assign(&mut self, b: Self) {
self.x -= b.x;
self.y -= b.y;
}
}
impl ops::Neg for Vec2 {
type Output = Self;
fn neg(self) -> Self::Output {
Self {
x: -self.x,
y: -self.y,
}
}
}
| 19.188889 | 61 | 0.444702 |
f4f52d4ab1c70429d4b85e9faf3ab940dcfbc39a | 510 | use crate::filter::with_table;
use model::{Tables, TablesFormat};
use warp::filters::BoxedFilter;
use warp::path;
use warp::{Filter, Rejection, Reply};
pub fn tables_route(tables: &Tables) -> BoxedFilter<(impl Reply,)> {
warp::get()
.and(path("tables"))
.and(with_table(tables))
.and_then(table_handler)
.boxed()
}
async fn table_handler(tables: Tables) -> std::result::Result<impl Reply, Rejection> {
Ok(serde_json::to_string(&TablesFormat::from(tables)).unwrap())
}
| 28.333333 | 86 | 0.668627 |
9b01a2ed6193515eae98ea54c1f96e52a9da3090 | 10,161 | use crate::{
core::{color::Color, math::Rect},
renderer::framework::{
framebuffer::{CullFace, DrawParameters},
gl::{
self,
types::{GLboolean, GLenum, GLint, GLuint},
},
},
};
pub struct State {
blend: bool,
depth_test: bool,
depth_write: bool,
color_write: ColorMask,
stencil_test: bool,
cull_face: CullFace,
culling: bool,
stencil_mask: u32,
clear_color: Color,
clear_stencil: i32,
clear_depth: f32,
framebuffer: GLuint,
viewport: Rect<i32>,
blend_src_factor: GLuint,
blend_dst_factor: GLuint,
program: GLuint,
texture_units: [TextureUnit; 32],
stencil_func: StencilFunc,
stencil_op: StencilOp,
vao: GLuint,
vbo: GLuint,
}
#[derive(Copy, Clone)]
struct TextureUnit {
target: GLenum,
texture: GLuint,
}
impl Default for TextureUnit {
fn default() -> Self {
Self {
target: gl::TEXTURE_2D,
texture: 0,
}
}
}
fn bool_to_gl_bool(v: bool) -> GLboolean {
if v {
gl::TRUE
} else {
gl::FALSE
}
}
#[derive(Copy, Clone, PartialOrd, PartialEq, Hash, Debug)]
pub struct ColorMask {
pub red: bool,
pub green: bool,
pub blue: bool,
pub alpha: bool,
}
impl Default for ColorMask {
fn default() -> Self {
Self {
red: true,
green: true,
blue: true,
alpha: true,
}
}
}
impl ColorMask {
pub fn all(value: bool) -> Self {
Self {
red: value,
green: value,
blue: value,
alpha: value,
}
}
}
#[derive(Copy, Clone, PartialOrd, PartialEq, Hash, Debug)]
pub struct StencilFunc {
pub func: GLenum,
pub ref_value: GLint,
pub mask: GLuint,
}
impl Default for StencilFunc {
fn default() -> Self {
Self {
func: gl::ALWAYS,
ref_value: 0,
mask: 0xFFFF_FFFF,
}
}
}
#[derive(Copy, Clone, PartialOrd, PartialEq, Hash, Debug)]
pub struct StencilOp {
pub fail: GLenum,
pub zfail: GLenum,
pub zpass: GLenum,
}
impl Default for StencilOp {
fn default() -> Self {
Self {
fail: gl::KEEP,
zfail: gl::KEEP,
zpass: gl::KEEP,
}
}
}
impl State {
pub fn new() -> Self {
Self {
blend: false,
depth_test: false,
depth_write: true,
color_write: Default::default(),
stencil_test: false,
cull_face: CullFace::Back,
culling: false,
stencil_mask: 0xFFFF_FFFF,
clear_color: Color::from_rgba(0, 0, 0, 0),
clear_stencil: 0,
clear_depth: 1.0,
framebuffer: 0,
viewport: Rect {
x: 0,
y: 0,
w: 1,
h: 1,
},
blend_src_factor: gl::ONE,
blend_dst_factor: gl::ZERO,
program: 0,
texture_units: [Default::default(); 32],
stencil_func: Default::default(),
stencil_op: Default::default(),
vao: 0,
vbo: 0,
}
}
pub fn set_framebuffer(&mut self, framebuffer: GLuint) {
if self.framebuffer != framebuffer {
self.framebuffer = framebuffer;
unsafe { gl::BindFramebuffer(gl::FRAMEBUFFER, self.framebuffer) }
}
}
pub fn set_viewport(&mut self, viewport: Rect<i32>) {
if self.viewport != viewport {
self.viewport = viewport;
unsafe {
gl::Viewport(
self.viewport.x,
self.viewport.y,
self.viewport.w,
self.viewport.h,
);
}
}
}
pub fn set_blend(&mut self, blend: bool) {
if self.blend != blend {
self.blend = blend;
unsafe {
if self.blend {
gl::Enable(gl::BLEND);
} else {
gl::Disable(gl::BLEND);
}
}
}
}
pub fn set_depth_test(&mut self, depth_test: bool) {
if self.depth_test != depth_test {
self.depth_test = depth_test;
unsafe {
if self.depth_test {
gl::Enable(gl::DEPTH_TEST);
} else {
gl::Disable(gl::DEPTH_TEST);
}
}
}
}
pub fn set_depth_write(&mut self, depth_write: bool) {
if self.depth_write != depth_write {
self.depth_write = depth_write;
unsafe {
gl::DepthMask(bool_to_gl_bool(self.depth_write));
}
}
}
pub fn set_color_write(&mut self, color_write: ColorMask) {
if self.color_write != color_write {
self.color_write = color_write;
unsafe {
gl::ColorMask(
bool_to_gl_bool(self.color_write.red),
bool_to_gl_bool(self.color_write.green),
bool_to_gl_bool(self.color_write.blue),
bool_to_gl_bool(self.color_write.alpha),
);
}
}
}
pub fn set_stencil_test(&mut self, stencil_test: bool) {
if self.stencil_test != stencil_test {
self.stencil_test = stencil_test;
unsafe {
if self.stencil_test {
gl::Enable(gl::STENCIL_TEST);
} else {
gl::Disable(gl::STENCIL_TEST);
}
}
}
}
pub fn set_cull_face(&mut self, cull_face: CullFace) {
if self.cull_face != cull_face {
self.cull_face = cull_face;
unsafe { gl::CullFace(self.cull_face.into_gl_value()) }
}
}
pub fn set_culling(&mut self, culling: bool) {
if self.culling != culling {
self.culling = culling;
unsafe {
if self.culling {
gl::Enable(gl::CULL_FACE);
} else {
gl::Disable(gl::CULL_FACE);
}
}
}
}
pub fn set_stencil_mask(&mut self, stencil_mask: u32) {
if self.stencil_mask != stencil_mask {
self.stencil_mask = stencil_mask;
unsafe {
gl::StencilMask(stencil_mask);
}
}
}
pub fn set_clear_color(&mut self, color: Color) {
if self.clear_color != color {
self.clear_color = color;
let rgba = color.as_frgba();
unsafe {
gl::ClearColor(rgba.x, rgba.y, rgba.z, rgba.w);
}
}
}
pub fn set_clear_depth(&mut self, depth: f32) {
if (self.clear_depth - depth).abs() > std::f32::EPSILON {
self.clear_depth = depth;
unsafe {
gl::ClearDepth(depth as f64);
}
}
}
pub fn set_clear_stencil(&mut self, stencil: i32) {
if self.clear_stencil != stencil {
self.clear_stencil = stencil;
unsafe {
gl::ClearStencil(stencil);
}
}
}
pub fn set_blend_func(&mut self, sfactor: GLenum, dfactor: GLenum) {
if self.blend_src_factor != sfactor || self.blend_dst_factor != dfactor {
self.blend_src_factor = sfactor;
self.blend_dst_factor = dfactor;
unsafe {
gl::BlendFunc(self.blend_src_factor, self.blend_dst_factor);
}
}
}
pub fn set_program(&mut self, program: GLuint) {
if self.program != program {
self.program = program;
unsafe {
gl::UseProgram(self.program);
}
}
}
pub fn set_texture(&mut self, sampler_index: usize, target: GLenum, texture: GLuint) {
let unit = self.texture_units.get_mut(sampler_index).unwrap();
if unit.target != target || unit.texture != texture {
unit.texture = texture;
unit.target = target;
unsafe {
gl::ActiveTexture(gl::TEXTURE0 + sampler_index as u32);
gl::BindTexture(target, texture);
}
}
}
pub fn set_stencil_func(&mut self, func: StencilFunc) {
if self.stencil_func != func {
self.stencil_func = func;
unsafe {
gl::StencilFunc(
self.stencil_func.func,
self.stencil_func.ref_value,
self.stencil_func.mask,
);
}
}
}
pub fn set_stencil_op(&mut self, op: StencilOp) {
if self.stencil_op != op {
self.stencil_op = op;
unsafe {
gl::StencilOp(
self.stencil_op.fail,
self.stencil_op.zfail,
self.stencil_op.zpass,
);
}
}
}
pub fn set_vertex_array_object(&mut self, vao: GLuint) {
if self.vao != vao {
self.vao = vao;
unsafe {
gl::BindVertexArray(vao);
}
}
}
pub fn set_vertex_buffer_object(&mut self, vbo: GLuint) {
if self.vbo != vbo {
self.vbo = vbo;
unsafe {
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
}
}
}
pub fn invalidate_resource_bindings_cache(&mut self) {
self.texture_units = Default::default();
self.program = 0;
}
pub fn apply_draw_parameters(&mut self, draw_params: &DrawParameters) {
self.set_blend(draw_params.blend);
self.set_depth_test(draw_params.depth_test);
self.set_depth_write(draw_params.depth_write);
self.set_color_write(draw_params.color_write);
self.set_stencil_test(draw_params.stencil_test);
self.set_cull_face(draw_params.cull_face);
self.set_culling(draw_params.culling);
}
}
| 24.722628 | 90 | 0.497687 |
cc9949b3894196bf69c89c475190468e7daa0aea | 1,781 | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! Errors that originate from the PeerManager module
use crate::protocols::wire::messaging::v1 as wire;
use diem_network_address::NetworkAddress;
use diem_types::PeerId;
use futures::channel::{mpsc, oneshot};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum PeerManagerError {
#[error("Error: {0:?}")]
Error(#[from] anyhow::Error),
#[error("IO error: {0}")]
IoError(#[from] ::std::io::Error),
#[error("Transport error: {0}")]
TransportError(::anyhow::Error),
#[error("Shutting down Peer")]
ShuttingDownPeer,
#[error("Not connected with Peer {0}")]
NotConnected(PeerId),
#[error("Already connected at {0}")]
AlreadyConnected(NetworkAddress),
#[error("Sending end of oneshot dropped")]
OneshotSenderDropped,
#[error("Failed to send on mpsc: {0}")]
MpscSendError(mpsc::SendError),
#[error("Serialization error {0}")]
LcsError(lcs::Error),
#[error("Error reading off wire: {0}")]
WireReadError(#[from] wire::ReadError),
#[error("Error writing to wire: {0}")]
WireWriteError(#[from] wire::WriteError),
}
impl PeerManagerError {
pub fn from_transport_error<E: Into<::anyhow::Error>>(error: E) -> Self {
PeerManagerError::TransportError(error.into())
}
}
impl From<oneshot::Canceled> for PeerManagerError {
fn from(_: oneshot::Canceled) -> Self {
PeerManagerError::OneshotSenderDropped
}
}
impl From<lcs::Error> for PeerManagerError {
fn from(e: lcs::Error) -> Self {
PeerManagerError::LcsError(e)
}
}
impl From<mpsc::SendError> for PeerManagerError {
fn from(e: mpsc::SendError) -> Self {
PeerManagerError::MpscSendError(e)
}
}
| 25.084507 | 77 | 0.65525 |
18e64d657efe2cf7216cd0aaea8249d49f86eec3 | 1,901 | use bytes::Bytes;
use crate::Error;
use crate::protocol as pb;
use crate::plainbuffer::PbufSerde;
use std::convert::{TryFrom};
use super::*;
#[derive(Debug, Clone)]
pub struct PutRowRequest {
pub table_name: Name,
pub row: Row,
pub condition: Condition,
pub in_return: InReturn,
}
impl PutRowRequest {
pub fn new<T: ToString>(table_name: T, row: Row) -> Result<Self, Error> {
Ok(Self{
table_name: Name::new(table_name),
row,
condition: Condition{
row_exist: RowExistenceExpectation::Ignore,
},
in_return: InReturn::Nothing,
})
}
}
#[derive(Debug, Clone)]
pub struct PutRowResponse {
pub base: super::BaseResponse,
}
impl From<PutRowRequest> for pb::PutRowRequest {
fn from(x: PutRowRequest) -> pb::PutRowRequest {
pb::PutRowRequest{
table_name: x.table_name.into(),
row: x.row.to_pbuf(),
condition: x.condition.into(),
return_content: Some(x.in_return.into()),
}
}
}
impl From<pb::PutRowResponse> for PutRowResponse {
fn from(_: pb::PutRowResponse) -> PutRowResponse {
PutRowResponse{
base: super::BaseResponse::default()
}
}
}
impl From<PutRowRequest> for Bytes {
fn from(x: PutRowRequest) -> Bytes {
serialize_request::<PutRowRequest, pb::PutRowRequest>(x)
}
}
impl TryFrom<Vec<u8>> for PutRowResponse {
type Error = Error;
fn try_from(v: Vec<u8>) -> Result<Self, Error> {
new_response::<Self, pb::PutRowResponse>(&v)
}
}
impl super::Request for PutRowRequest {
fn action(&self) -> Action {
Action::PutRow
}
fn path(&self) -> String {
self.action().to_string()
}
}
impl super::Response for PutRowResponse {
fn base_mut_ref(&mut self) -> &mut BaseResponse {
&mut self.base
}
}
| 23.182927 | 77 | 0.601789 |
d531944b08d0a326e9e5076d8e5e942654aadba2 | 469 | mod private;
mod public;
pub use private::{GetTradeBalanceRequest, GetTradeBalanceResponse};
pub use public::{GetServerTimeRequest, GetServerTimeResponse};
use serde::de::DeserializeOwned;
use serde::Serialize;
pub trait Request: Serialize {
const SIGNED: bool = false;
const ENDPOINT: &'static str;
const HAS_PAYLOAD: bool = true;
type Response: DeserializeOwned;
#[inline]
fn no_payload(&self) -> bool {
!Self::HAS_PAYLOAD
}
}
| 22.333333 | 67 | 0.707889 |
1abc5f7005a99952c9f149596eaace100de56cd9 | 19,953 | use crate::CHECK;
use ton_block::{
ShardIdent, BlockIdExt, ConfigParams, McStateExtra, ShardHashes, ValidatorSet, McShardRecord,
FutureSplitMerge, INVALID_WORKCHAIN_ID, MASTERCHAIN_ID, GlobalCapabilities
};
use ton_types::{fail, error, Result};
use std::{collections::HashSet, cmp::max};
pub fn supported_capabilities() -> u64 {
GlobalCapabilities::CapCreateStatsEnabled as u64 |
GlobalCapabilities::CapBounceMsgBody as u64 |
GlobalCapabilities::CapReportVersion as u64 |
GlobalCapabilities::CapShortDequeue as u64
}
pub fn supported_version() -> u32 {
1
}
pub fn check_this_shard_mc_info(
shard: &ShardIdent,
block_id: &BlockIdExt,
after_merge: bool,
after_split: bool,
mut before_split: bool,
prev_blocks: &Vec<BlockIdExt>,
config_params: &ConfigParams,
mc_state_extra: &McStateExtra,
is_validate: bool,
now: u32,
) -> Result<(u32, bool, bool)> {
let mut now_upper_limit = u32::MAX;
// let mut before_split = false;
// let mut accept_msgs = false;
let wc_info = config_params.workchains()?.get(&shard.workchain_id())?
.ok_or_else(|| error!("cannot create new block for workchain {} absent \
from workchain configuration", shard.workchain_id()))?;
if !wc_info.active() {
fail!("cannot create new block for disabled workchain {}", shard.workchain_id());
}
if !wc_info.basic() {
fail!("cannot create new block for non-basic workchain {}", shard.workchain_id());
}
if wc_info.enabled_since != 0 && wc_info.enabled_since > now {
fail!("cannot create new block for workchain {} which is not enabled yet", shard.workchain_id())
}
// if !is_validate {
// if wc_info.min_addr_len != 0x100 || wc_info.max_addr_len != 0x100 {
// fail!("wc_info.min_addr_len == 0x100 || wc_info.max_addr_len == 0x100");
// }
// }
let accept_msgs = wc_info.accept_msgs;
let mut split_allowed = false;
if !mc_state_extra.shards().has_workchain(shard.workchain_id())? {
// creating first block for a new workchain
log::debug!(target: "validator", "creating first block for workchain {}", shard.workchain_id());
fail!("cannot create first block for workchain {} after previous block {} \
because no shard for this workchain is declared yet",
shard.workchain_id(), prev_blocks[0])
}
let left = mc_state_extra.shards().find_shard(&shard.left_ancestor_mask()?)?
.ok_or_else(|| error!("cannot create new block for shard {} because there is no \
similar shard in existing masterchain configuration", shard))?;
// log::info!(target: "validator", "left for {} is {:?}", block_id(), left.descr());
if left.shard() == shard {
log::info!("check_this_shard_mc_info, block: {} left: {:?}", block_id, left);
// no split/merge
if after_merge || after_split {
fail!("cannot generate new shardchain block for {} after a supposed split or merge event \
because this event is not reflected in the masterchain", shard)
}
check_prev_block(&left.block_id, &prev_blocks[0], true)?;
if left.descr().before_split {
fail!("cannot generate new unsplit shardchain block for {} \
after previous block {} with before_split set", shard, left.block_id())
}
if left.descr().before_merge {
let sib = mc_state_extra.shards().get_shard(&shard.sibling())?
.ok_or_else(|| error!("No sibling for {}", shard))?;
if sib.descr().before_merge {
fail!("cannot generate new unmerged shardchain block for {} after both {} \
and {} set before_merge flags", shard, left.block_id(), sib.block_id())
}
}
if left.descr().is_fsm_split() {
if is_validate {
if now >= left.descr().fsm_utime() && now < left.descr().fsm_utime_end() {
split_allowed = true;
}
} else {
// t-node's collator contains next code:
// auto tmp_now = std::max<td::uint32>(config_->utime, (unsigned)std::time(nullptr));
// but `now` parameter passed to this function is already initialized same way.
// `13` and `11` is a magic from t-node
if now >= left.descr().fsm_utime() && now + 13 < left.descr().fsm_utime_end() {
now_upper_limit = left.descr().fsm_utime_end() - 11; // ultimate value of now_ must be at most now_upper_limit_
before_split = true;
log::info!("BEFORE_SPLIT set for the new block of shard {}", shard);
}
}
}
} else if shard.is_parent_for(left.shard()) {
// after merge
if !left.descr().before_merge {
fail!("cannot create new merged block for shard {} \
because its left ancestor {} has no before_merge flag", shard, left.block_id())
}
let right = match mc_state_extra.shards().find_shard(&shard.right_ancestor_mask()?)? {
Some(right) => right,
None => fail!("cannot create new block for shard {} after a preceding merge \
because there is no right ancestor shard in existing masterchain configuration", shard)
};
if !shard.is_parent_for(right.shard()) {
fail!("cannot create new block for shard {} after a preceding merge \
because its right ancestor appears to be {}", shard, right.block_id());
}
if !right.descr().before_merge {
fail!("cannot create new merged block for shard {} \
because its right ancestor {} has no before_merge flag", shard, right.block_id())
}
if after_split {
fail!("cannot create new block for shard {} after a purported split \
because existing shard configuration suggests a merge", shard)
} else if after_merge {
check_prev_block_exact(shard, left.block_id(), &prev_blocks[0])?;
check_prev_block_exact(shard, right.block_id(), &prev_blocks[1])?;
} else {
let cseqno = std::cmp::max(left.descr().seq_no, right.descr.seq_no);
if prev_blocks[0].seq_no <= cseqno {
fail!("cannot create new block for shard {} after previous block {} \
because masterchain contains newer possible ancestors {} and {}",
shard, prev_blocks[0], left.block_id(), right.block_id())
}
if prev_blocks[0].seq_no >= cseqno + 8 {
fail!("cannot create new block for shard {} after previous block {} \
because this would lead to an unregistered chain of length > 8 \
(masterchain contains only {} and {})",
shard, prev_blocks[0], left.block_id(), right.block_id())
}
}
} else if left.shard.is_parent_for(shard) {
// after split
if !left.descr().before_split {
fail!("cannot generate new split shardchain block for {} \
after previous block {} without before_split", shard, left.block_id())
}
if after_merge {
fail!("cannot create new block for shard {} \
after a purported merge because existing shard configuration suggests a split", shard)
} else if after_split {
check_prev_block_exact(shard, left.block_id(), &prev_blocks[0])?;
} else {
check_prev_block(left.block_id(), &prev_blocks[0], true)?;
}
} else {
fail!("masterchain configuration contains only block {} \
which belongs to a different shard from ours {}", left.block_id(), shard)
}
if is_validate && before_split && !split_allowed {
fail!("new block {} has before_split set, \
but this is forbidden by masterchain configuration", block_id)
}
Ok((now_upper_limit, before_split, accept_msgs))
}
pub fn check_prev_block_exact(shard: &ShardIdent, listed: &BlockIdExt, prev: &BlockIdExt) -> Result<()> {
if listed != prev {
fail!("cannot generate shardchain block for shard {} \
after previous block {} because masterchain configuration expects \
another previous block {} and we are immediately after a split/merge event",
shard, prev, listed)
}
Ok(())
}
pub fn check_prev_block(listed: &BlockIdExt, prev: &BlockIdExt, chk_chain_len: bool) -> Result<()> {
if listed.seq_no > prev.seq_no {
fail!("cannot generate a shardchain block after previous block {} \
because masterchain configuration already contains a newer block {}", prev, listed)
}
if listed.seq_no == prev.seq_no && listed != prev {
fail!("cannot generate a shardchain block after previous block {} \
because masterchain configuration lists another block {} of the same height", prev, listed)
}
if chk_chain_len && prev.seq_no >= listed.seq_no + 8 {
fail!("cannot generate next block after {} because this would lead to \
an unregistered chain of length > 8 (only {} is registered in the masterchain)", prev, listed)
}
Ok(())
}
pub fn check_cur_validator_set(
validator_set: &ValidatorSet,
block_id: &BlockIdExt,
shard: &ShardIdent,
mc_state_extra: &McStateExtra,
old_mc_shards: &ShardHashes,
config_params: &ConfigParams,
now: u32,
is_fake: bool,
) -> Result<bool> {
if is_fake { return Ok(true) }
let mut cc_seqno_with_delta = 0; // cc_seqno delta = 0
let cc_seqno_from_state = if shard.is_masterchain() {
mc_state_extra.validator_info.catchain_seqno
} else {
old_mc_shards.calc_shard_cc_seqno(&shard)?
};
let nodes = config_params.compute_validator_set_cc(&shard, now, cc_seqno_from_state, &mut cc_seqno_with_delta)?;
if nodes.is_empty() {
fail!("Cannot compute masterchain validator set from old masterchain state")
}
if validator_set.catchain_seqno() != cc_seqno_with_delta {
fail!("Current validator set catchain seqno mismatch: this validator set has cc_seqno={}, \
only validator set with cc_seqno={} is entitled to create block {}",
validator_set.catchain_seqno(), cc_seqno_with_delta, block_id);
}
// TODO: check compute_validator_set
let nodes = ValidatorSet::with_cc_seqno(0, 0, 0, cc_seqno_with_delta, nodes)?.list().clone();
let export_nodes = validator_set.list();
// log::debug!(target: "validator", "block candidate validator set {:?}", export_nodes);
// log::debug!(target: "validator", "current validator set {:?}", nodes);
CHECK!(export_nodes, &nodes);
if export_nodes != &nodes /* && !is_fake */ {
fail!("current validator set mismatch: this validator set is not \
entitled to create block {}", block_id)
}
Ok(true)
}
pub fn may_update_shard_block_info(
shards: &ShardHashes,
new_info: &McShardRecord,
old_blkids: &Vec<BlockIdExt>,
lt_limit: u64,
shards_updated: Option<&HashSet<ShardIdent>>,
) -> Result<(bool, Option<McShardRecord>)> {
log::trace!("may_update_shard_block_info new_info.block_id(): {}", new_info.block_id());
let wc = new_info.shard.workchain_id();
if wc == INVALID_WORKCHAIN_ID || wc == MASTERCHAIN_ID {
fail!("new top shard block description belongs to an invalid workchain {}", wc)
}
if !shards.has_workchain(wc)? {
fail!("new top shard block belongs to an unknown or disabled workchain {}", wc)
}
if old_blkids.len() != 1 && old_blkids.len() != 2 {
fail!("`old_blkids` must have either one or two start blocks in a top shard block update")
}
let before_split = old_blkids[0].shard().is_parent_for(&new_info.shard);
let before_merge = old_blkids.len() == 2;
if before_merge {
if old_blkids[0].shard().sibling() != *old_blkids[1].shard() {
fail!("the two start blocks of a top shard block update must be siblings")
}
if !new_info.shard.is_parent_for(old_blkids[0].shard()) {
fail!(
"the two start blocks of a top shard block update do not merge into expected \
final shard {}",
old_blkids[0].shard()
)
}
} else if (new_info.shard != *old_blkids[0].shard()) && !before_split {
fail!(
"the start block of a top shard block update must either coincide with the final\
shard or be its parent"
)
}
let mut ancestor = None;
let mut old_cc_seqno = 0;
for ob in old_blkids {
let odef = shards
.get_shard(ob.shard())
.unwrap_or_default()
.ok_or_else(|| {
error!(
"the start block's shard {} of a top shard block update is not contained \
in the previous shard configuration",
ob,
)
})?;
if odef.block_id().seq_no() != ob.seq_no() ||
odef.block_id().root_hash != *ob.root_hash() || odef.block_id().file_hash != *ob.file_hash() {
fail!(
"the start block {} of a top shard block update is not contained \
in the previous shard configuration",
ob,
)
}
old_cc_seqno = max(old_cc_seqno, odef.descr.next_catchain_seqno);
if let Some(shards_updated) = shards_updated {
if shards_updated.contains(ob.shard()) {
fail!(
"the shard of the start block {} of a top shard block update has been \
already updated in the current shard configuration",
ob
)
}
}
if odef.descr.before_split != before_split {
fail!(
"the shard of the start block {} has before_split={} \
but the top shard block update is valid only if before_split={}",
ob,
odef.descr.before_split,
before_split,
)
}
if odef.descr.before_merge != before_merge {
fail!(
"the shard of the start block {} has before_merge={} \
but the top shard block update is valid only if before_merge={}",
ob,
odef.descr.before_merge,
before_merge,
)
}
if new_info.descr.before_split {
if before_merge || before_split {
fail!(
"cannot register a before-split block {} at the end of a chain that itself \
starts with a split/merge event",
new_info.block_id()
)
}
if odef.descr.is_fsm_merge() || odef.descr.is_fsm_none() {
fail!(
"cannot register a before-split block {} because fsm_split state was not \
set for this shard beforehand",
new_info.block_id()
)
}
if new_info.descr.gen_utime < odef.descr.fsm_utime() ||
new_info.descr.gen_utime >= odef.descr.fsm_utime() + odef.descr.fsm_interval() {
fail!(
"cannot register a before-split block {} because fsm_split state was \
enabled for unixtime {} .. {} but the block is generated at {}",
new_info.block_id(),
odef.descr.fsm_utime(),
odef.descr.fsm_utime() + odef.descr.fsm_interval(),
new_info.descr.gen_utime
)
}
}
if before_merge {
if odef.descr.is_fsm_split() || odef.descr.is_fsm_none() {
fail!(
"cannot register a merged block {} because fsm_merge state was not \
set for shard {} beforehand",
new_info.block_id(),
odef.block_id().shard()
)
}
if new_info.descr.gen_utime < odef.descr.fsm_utime() ||
new_info.descr.gen_utime >= odef.descr.fsm_utime() + odef.descr.fsm_interval() {
fail!(
"cannot register merged block {} because fsm_merge state was \
enabled for shard {} for unixtime {} .. {} but the block is generated at {}",
new_info.block_id(),
odef.block_id().shard(),
odef.descr.fsm_utime(),
odef.descr.fsm_utime() + odef.descr.fsm_interval(),
new_info.descr.gen_utime
)
}
}
if !before_merge && !before_split {
ancestor = Some(odef);
}
}
let expected_next_catchain_seqno = old_cc_seqno + if before_merge {1} else {0};
if expected_next_catchain_seqno != new_info.descr.next_catchain_seqno {
fail!(
"the top shard block update is generated with catchain_seqno={} but previous shard \
configuration expects {}",
new_info.descr.next_catchain_seqno,
expected_next_catchain_seqno
)
}
if new_info.descr.end_lt >= lt_limit {
fail!(
"the top shard block update has end_lt {} which is larger than the current limit {}",
new_info.descr.end_lt,
lt_limit
)
}
log::trace!("after may_update_shard_block_info new_info.block_id(): {}", new_info.block_id());
return Ok((!before_split, ancestor))
}
pub fn update_shard_block_info(
shardes: &mut ShardHashes,
mut new_info: McShardRecord,
old_blkids: &Vec<BlockIdExt>,
shards_updated: Option<&mut HashSet<ShardIdent>>,
) -> Result<()> {
let (res, ancestor) = may_update_shard_block_info(shardes, &new_info, old_blkids, !0,
shards_updated.as_ref().map(|s| &**s))?;
if !res {
fail!(
"cannot apply the after-split update for {} without a corresponding sibling update",
new_info.blk_id()
);
}
if let Some(ancestor) = ancestor {
if ancestor.descr.split_merge_at != FutureSplitMerge::None {
new_info.descr.split_merge_at = ancestor.descr.split_merge_at;
}
}
let shard = new_info.shard().clone();
if old_blkids.len() == 2 {
shardes.merge_shards(&shard, |_, _| Ok(new_info.descr))?;
} else {
shardes.update_shard(&shard, |_| Ok(new_info.descr))?;
}
if let Some(shards_updated) = shards_updated {
shards_updated.insert(shard);
}
Ok(())
}
pub fn update_shard_block_info2(
shardes: &mut ShardHashes,
mut new_info1: McShardRecord,
mut new_info2: McShardRecord,
old_blkids: &Vec<BlockIdExt>,
shards_updated: Option<&mut HashSet<ShardIdent>>,
) -> Result<()> {
let (res1, _) = may_update_shard_block_info(shardes, &new_info1, old_blkids, !0,
shards_updated.as_ref().map(|s| &**s))?;
let (res2, _) = may_update_shard_block_info(shardes, &new_info2, old_blkids, !0,
shards_updated.as_ref().map(|s| &**s))?;
if res1 || res2 {
fail!("the two updates in update_shard_block_info2 must follow a shard split event");
}
if new_info1.shard().shard_prefix_with_tag() > new_info2.shard().shard_prefix_with_tag() {
std::mem::swap(&mut new_info1, &mut new_info2);
}
let shard1 = new_info1.shard().clone();
shardes.split_shard(&new_info1.shard().merge()?, |_| Ok((new_info1.descr, new_info2.descr)))?;
if let Some(shards_updated) = shards_updated {
shards_updated.insert(shard1);
}
Ok(())
}
| 41.742678 | 132 | 0.588232 |
69f3de4ada0f7fa620bd1bd6aaaf0246d408d6b9 | 8,734 | mod utils;
use std::str::FromStr;
use std::collections::BTreeMap;
use std::cmp;
use std::os::unix::io::AsRawFd;
use std::collections::HashMap;
use log::debug;
use byteorder::{ByteOrder, NetworkEndian};
use smoltcp::time::{Duration, Instant};
use smoltcp::phy::Device;
use smoltcp::phy::wait as phy_wait;
use smoltcp::wire::{EthernetAddress, IpAddress, IpCidr,
Ipv6Address, Icmpv6Repr, Icmpv6Packet,
Ipv4Address, Icmpv4Repr, Icmpv4Packet};
use smoltcp::iface::{NeighborCache, InterfaceBuilder, Routes};
use smoltcp::socket::{SocketSet, IcmpSocket, IcmpSocketBuffer, IcmpPacketMetadata, IcmpEndpoint};
macro_rules! send_icmp_ping {
( $repr_type:ident, $packet_type:ident, $ident:expr, $seq_no:expr,
$echo_payload:expr, $socket:expr, $remote_addr:expr ) => {{
let icmp_repr = $repr_type::EchoRequest {
ident: $ident,
seq_no: $seq_no,
data: &$echo_payload,
};
let icmp_payload = $socket
.send(icmp_repr.buffer_len(), $remote_addr)
.unwrap();
let icmp_packet = $packet_type::new_unchecked(icmp_payload);
(icmp_repr, icmp_packet)
}}
}
macro_rules! get_icmp_pong {
( $repr_type:ident, $repr:expr, $payload:expr, $waiting_queue:expr, $remote_addr:expr,
$timestamp:expr, $received:expr ) => {{
if let $repr_type::EchoReply { seq_no, data, .. } = $repr {
if let Some(_) = $waiting_queue.get(&seq_no) {
let packet_timestamp_ms = NetworkEndian::read_i64(data);
println!("{} bytes from {}: icmp_seq={}, time={}ms",
data.len(), $remote_addr, seq_no,
$timestamp.total_millis() - packet_timestamp_ms);
$waiting_queue.remove(&seq_no);
$received += 1;
}
}
}}
}
fn main() {
utils::setup_logging("warn");
let (mut opts, mut free) = utils::create_options();
utils::add_tap_options(&mut opts, &mut free);
utils::add_middleware_options(&mut opts, &mut free);
opts.optopt("c", "count", "Amount of echo request packets to send (default: 4)", "COUNT");
opts.optopt("i", "interval",
"Interval between successive packets sent (seconds) (default: 1)", "INTERVAL");
opts.optopt("", "timeout",
"Maximum wait duration for an echo response packet (seconds) (default: 5)",
"TIMEOUT");
free.push("ADDRESS");
let mut matches = utils::parse_options(&opts, free);
let device = utils::parse_tap_options(&mut matches);
let fd = device.as_raw_fd();
let device = utils::parse_middleware_options(&mut matches, device, /*loopback=*/false);
let device_caps = device.capabilities();
let address = IpAddress::from_str(&matches.free[0]).expect("invalid address format");
let count = matches.opt_str("count").map(|s| usize::from_str(&s).unwrap()).unwrap_or(4);
let interval = matches.opt_str("interval")
.map(|s| Duration::from_secs(u64::from_str(&s).unwrap()))
.unwrap_or_else(|| Duration::from_secs(1));
let timeout = Duration::from_secs(
matches.opt_str("timeout").map(|s| u64::from_str(&s).unwrap()).unwrap_or(5)
);
let neighbor_cache = NeighborCache::new(BTreeMap::new());
let remote_addr = address;
let icmp_rx_buffer = IcmpSocketBuffer::new(vec![IcmpPacketMetadata::EMPTY], vec![0; 256]);
let icmp_tx_buffer = IcmpSocketBuffer::new(vec![IcmpPacketMetadata::EMPTY], vec![0; 256]);
let icmp_socket = IcmpSocket::new(icmp_rx_buffer, icmp_tx_buffer);
let ethernet_addr = EthernetAddress([0x02, 0x00, 0x00, 0x00, 0x00, 0x02]);
let src_ipv6 = IpAddress::v6(0xfdaa, 0, 0, 0, 0, 0, 0, 1);
let ip_addrs = [IpCidr::new(IpAddress::v4(192, 168, 69, 1), 24),
IpCidr::new(src_ipv6, 64),
IpCidr::new(IpAddress::v6(0xfe80, 0, 0, 0, 0, 0, 0, 1), 64)];
let default_v4_gw = Ipv4Address::new(192, 168, 69, 100);
let default_v6_gw = Ipv6Address::new(0xfe80, 0, 0, 0, 0, 0, 0, 0x100);
let mut routes_storage = [None; 2];
let mut routes = Routes::new(&mut routes_storage[..]);
routes.add_default_ipv4_route(default_v4_gw).unwrap();
routes.add_default_ipv6_route(default_v6_gw).unwrap();
let mut iface = InterfaceBuilder::new(device)
.ethernet_addr(ethernet_addr)
.ip_addrs(ip_addrs)
.routes(routes)
.neighbor_cache(neighbor_cache)
.finalize();
let mut sockets = SocketSet::new(vec![]);
let icmp_handle = sockets.add(icmp_socket);
let mut send_at = Instant::from_millis(0);
let mut seq_no = 0;
let mut received = 0;
let mut echo_payload = [0xffu8; 40];
let mut waiting_queue = HashMap::new();
let ident = 0x22b;
loop {
let timestamp = Instant::now();
match iface.poll(&mut sockets, timestamp) {
Ok(_) => {},
Err(e) => {
debug!("poll error: {}", e);
}
}
{
let timestamp = Instant::now();
let mut socket = sockets.get::<IcmpSocket>(icmp_handle);
if !socket.is_open() {
socket.bind(IcmpEndpoint::Ident(ident)).unwrap();
send_at = timestamp;
}
if socket.can_send() && seq_no < count as u16 &&
send_at <= timestamp {
NetworkEndian::write_i64(&mut echo_payload, timestamp.total_millis());
match remote_addr {
IpAddress::Ipv4(_) => {
let (icmp_repr, mut icmp_packet) = send_icmp_ping!(
Icmpv4Repr, Icmpv4Packet, ident, seq_no,
echo_payload, socket, remote_addr);
icmp_repr.emit(&mut icmp_packet, &device_caps.checksum);
},
IpAddress::Ipv6(_) => {
let (icmp_repr, mut icmp_packet) = send_icmp_ping!(
Icmpv6Repr, Icmpv6Packet, ident, seq_no,
echo_payload, socket, remote_addr);
icmp_repr.emit(&src_ipv6, &remote_addr,
&mut icmp_packet, &device_caps.checksum);
},
_ => unimplemented!()
}
waiting_queue.insert(seq_no, timestamp);
seq_no += 1;
send_at += interval;
}
if socket.can_recv() {
let (payload, _) = socket.recv().unwrap();
match remote_addr {
IpAddress::Ipv4(_) => {
let icmp_packet = Icmpv4Packet::new_checked(&payload).unwrap();
let icmp_repr =
Icmpv4Repr::parse(&icmp_packet, &device_caps.checksum).unwrap();
get_icmp_pong!(Icmpv4Repr, icmp_repr, payload,
waiting_queue, remote_addr, timestamp, received);
}
IpAddress::Ipv6(_) => {
let icmp_packet = Icmpv6Packet::new_checked(&payload).unwrap();
let icmp_repr = Icmpv6Repr::parse(&remote_addr, &src_ipv6,
&icmp_packet, &device_caps.checksum).unwrap();
get_icmp_pong!(Icmpv6Repr, icmp_repr, payload,
waiting_queue, remote_addr, timestamp, received);
},
_ => unimplemented!()
}
}
waiting_queue.retain(|seq, from| {
if timestamp - *from < timeout {
true
} else {
println!("From {} icmp_seq={} timeout", remote_addr, seq);
false
}
});
if seq_no == count as u16 && waiting_queue.is_empty() {
break
}
}
let timestamp = Instant::now();
match iface.poll_at(&sockets, timestamp) {
Some(poll_at) if timestamp < poll_at => {
let resume_at = cmp::min(poll_at, send_at);
phy_wait(fd, Some(resume_at - timestamp)).expect("wait error");
},
Some(_) => (),
None => {
phy_wait(fd, Some(send_at - timestamp)).expect("wait error");
}
}
}
println!("--- {} ping statistics ---", remote_addr);
println!("{} packets transmitted, {} received, {:.0}% packet loss",
seq_no, received, 100.0 * (seq_no - received) as f64 / seq_no as f64);
}
| 40.813084 | 97 | 0.546027 |
38f899d8315a25eb7112efc27eab8d5ba5ba40cc | 1,055 |
use alloc::vec::Vec;
use crate::{print, println, sys::{pci, pci_details}};
pub fn main(args: &Vec<&str>) -> usize {
if args.len() < 2 {println!("pci <command>"); return 1;}
match args[1] {
"ls" => { ls(args); },
_ => {
println!("Unknown Command '{}'", args[1]);
println!("Available Commands: ");
println!("ls - List Device IDs");
}
}
0
}
fn ls(_args: &Vec<&str>) {
let devices = &*pci::PCI_DEVICES.lock();
for dev in devices {
println!("{} {}: ", pci_details::vendor(dev.vendor_id), pci_details::device(dev.device_id));
println!("+=========+============+==============+============+==================+");
println!("| Bus: {:02x} | Device: {:02x} | Function: {:02x} | Vendor: {:04x} | DeviceID: {:04x} |", dev.bus, dev.device, dev.function, dev.vendor_id, dev.device_id);
for addr in dev.base_addresses {
if addr > 0 {print!("| BAR: {:08x} ", addr);}
}
println!("|");
}
} | 34.032258 | 174 | 0.463507 |
1d3cb078b2afc82d9b36ab6ee15280fb404043a1 | 788 | // Copyright 2018 Guillaume Pinot (@TeXitoi) <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use clap::Clap;
#[derive(Clap, Debug)]
struct MakeCookie {
#[clap(short)]
s: String,
#[clap(subcommand)]
cmd: Option<Option<Command>>,
}
#[derive(Clap, Debug)]
enum Command {
/// Pound acorns into flour for cookie dough.
Pound { acorns: u32 },
Sparkle {
#[clap(short)]
color: String,
},
}
fn main() {
let opt = MakeCookie::parse();
println!("{:?}", opt);
}
| 22.514286 | 68 | 0.640863 |
89ff96d0ca616a74b82cf15fc9707b5371d48af0 | 95 |
fn main() {
println!("cargo:rustc-link-search=/home/reidatcheson/Library/metis/lib");
}
| 13.571429 | 77 | 0.673684 |
e2a919109757015c24b6cb1489e38db50786a38a | 985 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #1112
// Alignment of interior pointers to dynamic-size types
struct X<T> {
a: T,
b: u8,
c: bool,
d: u8,
e: u16,
f: u8,
g: u8
}
pub fn main() {
let x: X<int> = X {
a: 12345678,
b: 9u8,
c: true,
d: 10u8,
e: 11u16,
f: 12u8,
g: 13u8
};
bar(x);
}
fn bar<T>(x: X<T>) {
assert x.b == 9u8;
assert x.c == true;
assert x.d == 10u8;
assert x.e == 11u16;
assert x.f == 12u8;
assert x.g == 13u8;
}
| 21.888889 | 68 | 0.583756 |
22aecf9baa2ba744384eebc5c0ccaaba280b2e3b | 2,651 | use rumbaa;
use std::fs::{OpenOptions,remove_file};
#[cfg(test)]
mod tests {
// Note this useful idiom: importing names from outer (for mod tests) scope.
use super::*;
#[test]
fn test_theorem_without_label() {
let filename = String::from("theorem_without_label.tex");
let data_folder = String::from("tests/datas/");
let main_file = OpenOptions::new()
.create(false)
.read(true)
.write(false)
.open(data_folder.clone() + &filename)
.unwrap();
let _doc = rumbaa::texparser::texparser::parse_tex(&main_file, &filename).unwrap();
}
#[test]
fn test_equation_in_def() {
let filename = String::from("equation_in_def.tex");
let data_folder = String::from("tests/datas/");
let tmp_file_name = String::from("tmp_eq_in_def.tex");
let clean_file = rumbaa::preprocessing::wrap_and_preprocess(&filename, &tmp_file_name, &data_folder).unwrap();
let doc = rumbaa::texparser::texparser::parse_tex(&clean_file, &filename).unwrap();
// 1. test
let label = String::from("th");
let vec = doc.get_vec_dependences(&label).unwrap();
// 2. Delete file
match remove_file(tmp_file_name) {
Ok(()) => (),
Err(_) => (),
};
// 3. Test
assert_eq!(vec.len(), 1);
}
#[test]
fn test_remove_comments() {
let filename = String::from("remove_comments.tex");
let data_folder = String::from("tests/datas/");
let tmp_file_name = String::from("tmp_remove_comments.tex");
let clean_file = rumbaa::preprocessing::wrap_and_preprocess(&filename, &tmp_file_name, &data_folder).unwrap();
let doc = rumbaa::texparser::texparser::parse_tex(&clean_file, &filename).unwrap();
// 2. Delete file
match remove_file(tmp_file_name) {
Ok(()) => (),
Err(_) => (),
};
// 3. Test
assert_eq!(doc.contains_key(&"def".to_string()), true);
assert_eq!(doc.contains_key(&"def:commented".to_string()), false);
assert_eq!(doc.contains_key(&"th:commented".to_string()), false);
}
/**
* @brief commented \input causes crash
* @details Associated to Issue #11
* @return
*/
#[test]
fn test_input_in_comment() {
let filename = String::from("input_in_comment.tex");
let data_folder = String::from("tests/datas/");
let tmp_file_name = data_folder.to_owned() + &String::from("tmp_input_in_comment.tex");
let clean_file = rumbaa::preprocessing::wrap_and_preprocess(&filename, &tmp_file_name, &data_folder).unwrap();
let doc = rumbaa::texparser::texparser::parse_tex(&clean_file, &filename).unwrap();
// 2. Delete file
match remove_file(tmp_file_name) {
Ok(()) => (),
Err(_) => (),
};
// 3. Test
assert_eq!(doc.contains_key(&"th".to_string()), true);
}
} | 28.815217 | 112 | 0.667295 |
675a6f593b5cc21ea63d1b830982c7a789a641c8 | 22,026 | // Copyright (c) 2018-2022 The MobileCoin Foundation
//! Contains the worker thread that performs the business logic for Fog
//! Overseer.
//!
//! This is the "core" logic for Overseer.
//!
//! HTTP Client -> Overseer Rocket Server -> OverseerService -> *OverseerWorker*
use crate::{error::OverseerError, metrics};
use mc_api::external;
use mc_common::logger::{log, Logger};
use mc_crypto_keys::CompressedRistrettoPublic;
use mc_fog_api::ingest_common::{IngestControllerMode, IngestSummary};
use mc_fog_ingest_client::FogIngestGrpcClient;
use mc_fog_recovery_db_iface::{IngressPublicKeyRecord, IngressPublicKeyRecordFilters, RecoveryDb};
use retry::{delay::Fixed, retry_with_index, OperationResult};
use std::{
convert::TryFrom,
iter::Iterator,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread::{Builder as ThreadBuilder, JoinHandle},
time::Duration,
};
/// Wraps a thread that is responsible for overseeing the active Fog Ingest
/// cluster.
///
/// The worker checks to see that there's always one active ingress key. If
/// there is no active key, then it promotes an idle node to active, and in the
/// case where none of the idle nodes contain the previously active ingress key,
/// it reports that key as lost.
pub struct OverseerWorker {
/// Join handle used to wait for the thread to terminate.
join_handle: Option<JoinHandle<()>>,
/// If true, stops the worker thread.
stop_requested: Arc<AtomicBool>,
}
impl OverseerWorker {
pub fn new<DB: RecoveryDb + Clone + Send + Sync + 'static>(
ingest_clients: Arc<Vec<FogIngestGrpcClient>>,
recovery_db: DB,
logger: Logger,
is_enabled: Arc<AtomicBool>,
) -> Self
where
OverseerError: From<DB::Error>,
{
let thread_is_enabled = is_enabled;
let stop_requested = Arc::new(AtomicBool::new(false));
let thread_stop_requested = stop_requested.clone();
let join_handle = Some(
ThreadBuilder::new()
.name("OverseerWorker".to_string())
.spawn(move || {
OverseerWorkerThread::start(
ingest_clients,
recovery_db,
thread_is_enabled,
thread_stop_requested,
logger,
)
})
.expect("Could not spawn OverseerWorkerThread"),
);
Self {
join_handle,
stop_requested,
}
}
/// Stop and join the db poll thread
pub fn stop(&mut self) -> Result<(), ()> {
if let Some(join_handle) = self.join_handle.take() {
self.stop_requested.store(true, Ordering::SeqCst);
join_handle.join().map_err(|_| ())?;
}
Ok(())
}
}
impl Drop for OverseerWorker {
fn drop(&mut self) {
let _ = self.stop();
}
}
/// The thread that performs the Fog Overseer logic.
struct OverseerWorkerThread<DB: RecoveryDb> {
/// The list of FogIngestClients that Overseer uses to communicate with
/// each node in the Fog Ingest cluster that it's monitoring.
ingest_clients: Arc<Vec<FogIngestGrpcClient>>,
/// The database that contains, among other things, info on the Fog Ingest
/// cluster's ingress keys.
recovery_db: DB,
/// If this is true, the worker will not perform it's monitoring logic.
is_enabled: Arc<AtomicBool>,
/// If this is true, the thread will stop.
stop_requested: Arc<AtomicBool>,
logger: Logger,
}
/// This associates an IngestSummary with an IngestClient. This makes it easy
/// to query a given node based on its IngestSummary.
struct IngestSummaryNodeMapping {
node_index: usize,
ingest_summary: IngestSummary,
}
impl<DB: RecoveryDb> OverseerWorkerThread<DB>
where
OverseerError: From<DB::Error>,
{
/// Poll the Fog Ingest cluster every 5 seconds.
const POLLING_FREQUENCY: Duration = Duration::from_secs(5);
/// Try a request to Fog Ingest node this many times if you encounter an
/// error.
const NUMBER_OF_TRIES: usize = 3;
pub fn start(
ingest_clients: Arc<Vec<FogIngestGrpcClient>>,
recovery_db: DB,
is_enabled: Arc<AtomicBool>,
stop_requested: Arc<AtomicBool>,
logger: Logger,
) {
let thread = Self {
ingest_clients,
recovery_db,
is_enabled,
stop_requested,
logger,
};
thread.run();
}
fn run(self) {
loop {
log::trace!(self.logger, "Overseer worker start of thread.");
std::thread::sleep(Self::POLLING_FREQUENCY);
if self.stop_requested.load(Ordering::SeqCst) {
log::info!(self.logger, "Overseer worker thread stopping.");
break;
}
if !self.is_enabled.load(Ordering::SeqCst) {
log::trace!(self.logger, "Overseer worker is currently disabled.");
continue;
}
let ingest_summary_node_mappings: Vec<IngestSummaryNodeMapping> = match self
.retrieve_ingest_summary_node_mappings()
{
Ok(ingest_summary_node_mappings) => ingest_summary_node_mappings,
Err(err) => {
log::error!(self.logger, "Encountered an error while retrieving ingest summaries: {}. Returning to beginning of overseer logic.", err);
continue;
}
};
let ingest_summaries: Vec<IngestSummary> = ingest_summary_node_mappings
.iter()
.map(|mapping| mapping.ingest_summary.clone())
.collect();
metrics::utils::set_metrics(&self.logger, ingest_summaries.as_slice());
let active_ingest_summary_node_mappings: Vec<&IngestSummaryNodeMapping> =
ingest_summary_node_mappings
.iter()
.filter(|ingest_summary_node_mapping| {
ingest_summary_node_mapping.ingest_summary.mode
== IngestControllerMode::Active
})
.collect();
let active_node_count = active_ingest_summary_node_mappings.len();
match active_node_count {
0 => {
log::warn!(
self.logger,
"There are currently no active nodes in the Fog Ingest cluster. Initiating automatic failover.",
);
match self.perform_automatic_failover(ingest_summary_node_mappings) {
Ok(_) => {
log::info!(self.logger, "Automatic failover completed successfully.")
}
Err(err) => {
log::error!(self.logger, "Automatic failover failed: {}", err)
}
};
}
1 => {
log::trace!(
self.logger,
"There is one active node in the Fog Ingest cluster. Active ingress key: {:?}",
active_ingest_summary_node_mappings[0]
.ingest_summary
.get_ingress_pubkey()
);
continue;
}
_ => {
let active_node_ingress_pubkeys: Vec<&external::CompressedRistretto> =
active_ingest_summary_node_mappings
.iter()
.map(|active_ingest_summary_node_mapping| {
active_ingest_summary_node_mapping
.ingest_summary
.get_ingress_pubkey()
})
.collect();
let error_message =
format!("Active ingress keys: {:?}", active_node_ingress_pubkeys);
let error = OverseerError::MultipleActiveNodes(error_message);
log::error!(self.logger, "{}", error);
}
}
}
}
/// Returns the latest round of ingest summaries for each
/// FogIngestGrpcClient that communicates with a node that is online.
fn retrieve_ingest_summary_node_mappings(
&self,
) -> Result<Vec<IngestSummaryNodeMapping>, OverseerError> {
let mut ingest_summary_node_mappings: Vec<IngestSummaryNodeMapping> = Vec::new();
for (ingest_client_index, ingest_client) in self.ingest_clients.iter().enumerate() {
match ingest_client.get_status() {
Ok(ingest_summary) => {
log::trace!(
self.logger,
"Ingest summary retrieved: {:?}",
ingest_summary
);
ingest_summary_node_mappings.push(IngestSummaryNodeMapping {
node_index: ingest_client_index,
ingest_summary,
});
}
Err(err) => {
let error_message = format!(
"Unable to retrieve ingest summary for node ({}): {}",
ingest_client.get_uri(),
err
);
return Err(OverseerError::UnresponsiveNodeError(error_message));
}
}
}
Ok(ingest_summary_node_mappings)
}
/// Performs automatic failover, which means that we try to activate nodes
/// for an outstanding ingress key, if it exists.
///
/// The logic is as follows:
/// 1. Find all of the "outstanding keys" as determined by the RecoveryDb.
/// These are ingress keys that Fog Ingest needs to scan blocks with
/// but Fog Ingest isn't currently doing that because all nodes are
/// idle.
/// 2. If there are:
/// a) 0 outstanding keys:
/// No node will be activated. We now have to:
/// (i) Set new keys on an idle node.
/// (ii) Activate that node.
/// b) 1 outsanding key:
/// Try to find an idle node that contains that key.
/// (i) If you find one, great! Just activate that node. If
/// activation is unsuccessful, then return an error
/// and return to the overseer polling logic.
/// (ii) If you don't find an idle node with that key,
/// then you have to report that key as lost, set
/// new keys on an idle node, and activate that node.
/// c) > 1 outstanding key:
/// (i) Disable
/// (ii) TODO: Send an alert.
fn perform_automatic_failover(
&self,
ingest_summary_node_mappings: Vec<IngestSummaryNodeMapping>,
) -> Result<(), OverseerError> {
let inactive_outstanding_keys: Vec<CompressedRistrettoPublic> =
self.get_inactive_outstanding_keys()?;
match inactive_outstanding_keys.len() {
0 => {
log::info!(self.logger, "Found 0 outstanding keys.");
let activated_node_index = self.set_new_key_on_a_node()?;
self.activate_a_node(activated_node_index)?;
Ok(())
}
1 => {
log::info!(self.logger, "Found 1 outstanding key.");
let inactive_outstanding_key = inactive_outstanding_keys[0];
self.handle_one_inactive_outstanding_key(
inactive_outstanding_key,
ingest_summary_node_mappings,
)?;
Ok(())
}
_ => {
self.is_enabled.store(false, Ordering::SeqCst);
let error_message = format!("This is unexpected and requires manual intervention. As such, we've disabled overseer. Take the appropriate action and then re-enable overseer by calling the /enable endpoint. Inactive oustanding keys: {:?}", inactive_outstanding_keys);
Err(OverseerError::MultipleInactiveOutstandingKeys(
error_message,
))
}
}
}
fn get_inactive_outstanding_keys(
&self,
) -> Result<Vec<CompressedRistrettoPublic>, OverseerError> {
// An outanding key is one that Fog Ingest is still obligated to be
// scanning blocks with on behalf of users.
let outstanding_keys_filters = IngressPublicKeyRecordFilters {
// A lost key can never be outstanding because it will never again
// be used to scan blocks.
should_include_lost_keys: false,
// Its possible for a retired key to be outstanding if its public
// expiry is greater than its last scanned block, so we have to
// include retired keys in this query.
should_include_retired_keys: true,
// If a key has expired- i.e. its last scanned block is greater
// than or equal to its public expiry- then it will no longer scan
// blocks. Therefore, we need to include unexpired keys because they
// are still supposed to be scanned by Fog.
should_only_include_unexpired_keys: true,
};
// First, find the "inactive_outstanding_keys" which are outstanding
// keys that we've grabbed from the RecoveryDb.
//
// TODO: Add a config that allows us to set this start block.
let ingress_public_key_records: Vec<IngressPublicKeyRecord> =
self.recovery_db.get_ingress_key_records(
/* start_block_at_least= */ 0,
&outstanding_keys_filters,
)?;
Ok(ingress_public_key_records
.iter()
.map(|record| record.key)
.collect())
}
/// Performs the following logic when one inactive outstanding key is found:
/// 1) Tries to find an idle node that contains that key.
/// (i) If it's found, it activates the node that contains it. If
/// activation is unsuccessful, then it returns an error.
/// (ii) If no idle node is found that contains the key, then it
/// reports that key as lost, sets new keys on an idle node, and
/// activates that node.
fn handle_one_inactive_outstanding_key(
&self,
inactive_outstanding_key: CompressedRistrettoPublic,
ingest_summary_node_mappings: Vec<IngestSummaryNodeMapping>,
) -> Result<(), OverseerError> {
log::info!(
self.logger,
"Trying to activate an idle node with inactive outstanding key: {:?}",
&inactive_outstanding_key
);
for ingest_summary_node_mapping in &ingest_summary_node_mappings {
let node_ingress_key = match CompressedRistrettoPublic::try_from(
ingest_summary_node_mapping
.ingest_summary
.get_ingress_pubkey(),
) {
Ok(key) => key,
Err(_) => continue,
};
if inactive_outstanding_key.eq(&node_ingress_key) {
let node = &self.ingest_clients[ingest_summary_node_mapping.node_index];
match node.activate() {
Ok(_) => {
log::info!(
self.logger,
"Successfully activated node {}.",
node.get_uri()
);
return Ok(());
}
Err(err) => {
let error_message = format!(
"Tried activating node {}, but it failed: {}.",
node.get_uri(),
err
);
return Err(OverseerError::ActivateNode(error_message));
}
}
}
}
// We've gone through all the Fog Ingest nodes' keys,
// and none of them matches the inactive outstanding key. We must
// report the inactive outstanding key as lost, set new keys
// on an idle node, and activate that node.
self.report_lost_ingress_key(inactive_outstanding_key)?;
let activated_node_index = self.set_new_key_on_a_node()?;
self.activate_a_node(activated_node_index)?;
Ok(())
}
/// Tries to report a lost ingress key.
fn report_lost_ingress_key(
&self,
inactive_outstanding_key: CompressedRistrettoPublic,
) -> Result<(), OverseerError> {
let result = retry_with_index(
Fixed::from_millis(200).take(Self::NUMBER_OF_TRIES),
|current_try| match self
.recovery_db
.report_lost_ingress_key(inactive_outstanding_key)
{
Ok(_) => {
log::info!(
self.logger,
"The following key was successfully reported as lost: {}",
inactive_outstanding_key
);
OperationResult::Ok(())
}
Err(err) => {
let number_of_remaining_tries = Self::NUMBER_OF_TRIES - current_try as usize;
let error_message = match number_of_remaining_tries {
0 => format!("Did not succeed in reporting lost ingress key {} within {} tries. Underlying error: {}", inactive_outstanding_key, Self::NUMBER_OF_TRIES, err),
_ => format!("The following key was not successfully reported as lost: {}. Will try {} more times. Underlying error: {}", inactive_outstanding_key, number_of_remaining_tries, err),
};
OperationResult::Retry(OverseerError::ReportLostKey(error_message))
}
},
);
Ok(result?)
}
/// Tries to set a new ingress key on a node. The node is assumed to be
/// idle.
fn set_new_key_on_a_node(&self) -> Result<usize, OverseerError> {
for (i, ingest_client) in self.ingest_clients.iter().enumerate() {
let result = retry_with_index(
Fixed::from_millis(200).take(Self::NUMBER_OF_TRIES),
|current_try| {
match ingest_client.new_keys() {
Ok(_) => {
log::info!(
self.logger,
"New keys successfully set on the ingest node at index {}.",
i
);
OperationResult::Ok(())
}
// TODO: We'll need to alert Ops to take manual action at this point.
Err(err) => {
let number_of_remaining_tries =
Self::NUMBER_OF_TRIES - current_try as usize;
let error_message = match number_of_remaining_tries {
0 => format!("Did not succeed in setting a new key on node at index {}. Underlying error: {}", i, err),
_ => format!("New keys were not successfully set on the ingest node at index {}. Will try {} more times. Underlying error: {}", i, number_of_remaining_tries, err),
};
OperationResult::Retry(OverseerError::SetNewKey(error_message))
}
}
},
);
if result.is_ok() {
return Ok(i);
}
}
Err(OverseerError::SetNewKey(
"New keys were not successfully set on any of the idle nodes.".to_string(),
))
}
/// Tries to activate a node. The node is assumed to be idle.
fn activate_a_node(&self, activated_node_index: usize) -> Result<(), OverseerError> {
let result = retry_with_index(
Fixed::from_millis(200).take(Self::NUMBER_OF_TRIES),
|current_try| {
match self.ingest_clients[activated_node_index].activate() {
Ok(_) => {
log::info!(
self.logger,
"Node at index {} successfully activated.",
activated_node_index
);
OperationResult::Ok(())
}
// TODO: Alert Ops to take manual action at this point.
Err(err) => {
let number_of_remaining_tries =
Self::NUMBER_OF_TRIES - current_try as usize;
let error_message = match number_of_remaining_tries {
0 => format!(
"Did not succeed in setting a new key on node at index {}. Underlying error: {}",
activated_node_index,
err
),
_ => format!(
"Node at index {} not activated. Will try {} more times. Underlying error: {}",
activated_node_index, number_of_remaining_tries, err
),
};
OperationResult::Retry(OverseerError::ActivateNode(error_message))
}
}
},
);
Ok(result?)
}
}
| 41.402256 | 281 | 0.531463 |
38189b3649b3e93808286b7fc21d62afe8f42ca3 | 505 | #[cfg(not(target_os = "macos"))]
pub use fallback::*;
#[cfg(target_os = "macos")]
pub use macos::*;
#[cfg(target_os = "macos")]
mod macos;
#[cfg(not(target_os = "macos"))]
mod fallback {
use std::path::PathBuf;
pub fn has_bundle(_bundle_identifier: &str) -> bool {
false
}
pub fn get_path(
_bundle_identifier: &str,
_resource_name: &str,
_resource_type: Option<&str>,
_sub_dir_name: Option<&str>,
) -> Option<PathBuf> {
None
}
}
| 19.423077 | 57 | 0.580198 |
ab4959ea1b37b7ac665c09191f6d956580d1ff68 | 648 | use std::io;
fn main() {
let a = read_one::<usize>();
let s = read_one::<String>();
if a >= 3200 { println!("{}", s); }
else { println!("red"); }
}
#[allow(dead_code)]
fn read<T>() -> Vec<T>
where T:
std::str::FromStr,
T::Err: std::fmt::Debug {
let mut buf = String::new();
io::stdin().read_line(&mut buf).unwrap();
buf.split_whitespace()
.map(|s| s.trim().parse().unwrap())
.collect()
}
#[allow(dead_code)]
fn read_one<T>() -> T
where T:
std::str::FromStr,
T::Err: std::fmt::Debug {
let mut buf = String::new();
io::stdin().read_line(&mut buf).unwrap();
buf.trim().parse().unwrap()
}
| 18.514286 | 45 | 0.544753 |
09a56961219f2e075b36518fe248c3deddf73569 | 6,255 | use crate::opcode::OpCode;
use crate::token::{Token, TokenType};
use crate::label::Label;
use crate::parser::parser::InstrParser;
use crate::{
get_cpn, expect_token, get_number, get_creg, get_reg, get_token, get_label
};
pub struct CpOpsParser;
impl CpOpsParser {
/// Return the binary representation of the "Coprocessor Data Operations"
/// instruction
pub fn parse(opcode: OpCode, tokens: &[Token]) -> u32 {
// Condition code
let cond = InstrParser::parse_cond(opcode, &tokens[0]) as u32;
// Coprocessor number
let cpn = get_cpn!(tokens, 1);
expect_token!(tokens, 2, TokenType::Comma);
// Coprocessor operation code
let cpopc = get_number!(tokens, 3).value;
expect_token!(tokens, 4, TokenType::Comma);
// Coprocessor registers
let crd = get_creg!(tokens, 5);
expect_token!(tokens, 6, TokenType::Comma);
let crn = get_creg!(tokens, 7);
expect_token!(tokens, 8, TokenType::Comma);
let crm = get_creg!(tokens, 9);
// Optional field
// Coprocessor information
let mut cp = 0u32;
if tokens.get(10).is_some() {
expect_token!(tokens, 10, TokenType::Comma);
cp = get_number!(tokens, 11).value;
// Larger than 3 bits
if cp > 7 {
panic!("Immediate value out of range.");
}
}
return (((((((cond << 4 | 0b1110) << 4 | cpopc) << 4 | crn) << 4 | crd)
<< 4 | cpn) << 3 | cp) << 1 | 0b0) << 4 | crm;
}
}
pub struct CpTransfersParser;
impl CpTransfersParser {
/// Return the binary representation of the "Coprocessor Data Transfers"
/// instruction
pub fn parse(opcode: OpCode, tokens: &[Token], labels: &Vec<Label>) -> u32 {
// Condition code
let cond = InstrParser::parse_cond(opcode, &tokens[0]) as u32;
// If true, add offset before transfer, otherwise add offset after
// transfer
let mut pre = false as u32;
// If true, add offset to base, otherwise substract offset from base
let mut up = true as u32;
// If true, perform long transfer, otherwise perform short transfer
let trans_len = tokens.get(0).unwrap().value
// Remove cond to avoid any false positive
.replace(&cond.to_string(), "")
.ends_with("l") as u32;
let write = tokens.last().unwrap().value.eq("!") as u32;
// If true, load from memory, otherwise store to memory
let load = (opcode == OpCode::LDC) as u32;
// Coprocessor number
let cpn = get_cpn!(tokens, 1);
expect_token!(tokens, 2, TokenType::Comma);
// Coprocessor register
let crd = get_creg!(tokens, 3);
expect_token!(tokens, 4, TokenType::Comma);
// Now we parse an <address> which can either be:
// 1 - <expression>
//
// 2.1 - [Rn]
// 2 - [Rn,<#expression>]{!}
//
// 3 - [Rn],<#expression>
let mut offset = 0u32;
let rn;
let token = get_token!(tokens, 5, [TokenType::Keyword,
TokenType::OpenBracket]);
if token.token_type == TokenType::Keyword {
// Case 1
let label = get_label!(tokens, 5, labels);
// Calculate the offset and take the lower 8 bits
offset = label.offset(&tokens[0], 8);
pre = true as u32;
up = false as u32;
rn = 15;
} else {
// Case 2.1, 2.2 and 3
rn = get_reg!(tokens, 6);
// Check if token 8 exists, if not, token 7 must be a close bracket
if !tokens.get(8).is_some() {
// Case 2.1
// Offset is zero
expect_token!(tokens, 7, TokenType::CloseBracket);
} else {
// Case 2.2 and 3
let mut idx = 8;
let token = get_token!(tokens, 7, [TokenType::Comma,
TokenType::CloseBracket]);
if token.token_type == TokenType::CloseBracket {
// Case 3
expect_token!(tokens, 8, TokenType::Comma);
idx = 9;
} else {
pre = true as u32;
}
let imm = get_number!(tokens, idx).value;
if imm % 4 != 0 {
panic!("Coprocessor offset out of range.");
}
offset = imm / 4;
}
}
return (((((((((cond << 3 | 0b110) << 1 | pre) << 1 | up)
<< 1 | trans_len) << 1 | write) << 1 | load) << 4 | rn)
<< 4 | crd) << 4 | cpn) << 8 | offset;
}
}
pub struct CpRegTransParser;
impl CpRegTransParser {
/// Return the binary representation of the "Coprocessor Register Transfers"
/// instruction
pub fn parse(opcode: OpCode, tokens: &[Token]) -> u32 {
// Condition code
let cond = InstrParser::parse_cond(opcode, &tokens[0]) as u32;
// If true, load from memory, otherwise store to memory
let load = (opcode == OpCode::MRC) as u32;
// Coprocessor number
let cpn = get_cpn!(tokens, 1);
expect_token!(tokens, 2, TokenType::Comma);
// Coprocessor operation code
let cpopc = get_number!(tokens, 3).value;
expect_token!(tokens, 4, TokenType::Comma);
let rd = get_reg!(tokens, 5);
expect_token!(tokens, 6, TokenType::Comma);
// Coprocessor registers
let crn = get_creg!(tokens, 7);
expect_token!(tokens, 8, TokenType::Comma);
let crm = get_creg!(tokens, 9);
// Optional field
// Coprocessor information
let mut cp = 0u32;
if tokens.get(10).is_some() {
expect_token!(tokens, 10, TokenType::Comma);
cp = get_number!(tokens, 11).value;
// Larger than 3 bits
if cp > 7 {
panic!("Immediate value out of range.");
}
}
return ((((((((cond << 4 | 0b1110) << 3 | cpopc) << 1 | load)
<< 4 | crn) << 4 | rd) << 4 | cpn) << 3 | cp) << 1 | 0b1)
<< 4 | crm;
}
}
| 33.994565 | 80 | 0.523581 |
dd6a05b8f616301164a7e8056a34560423678817 | 34,257 | // Copyright 2019 Zhizhesihai (Beijing) Technology Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use core::analysis::TokenStream;
use core::codec::field_infos::{
FieldInfo, FieldInfos, FieldInfosBuilder, FieldInvertState, FieldNumbersRef,
};
use core::codec::norms::NormsProducer;
use core::codec::postings::{
ParallelPostingsArray, PostingsArray, TermsHash, TermsHashBase, TermsHashPerField,
TermsHashPerFieldBase,
};
use core::codec::segment_infos::SegmentWriteState;
use core::codec::term_vectors::{
TermVectorsFormat, TermVectorsReader, TermVectorsWriter, TermVectorsWriterEnum,
};
use core::codec::Codec;
use core::codec::{Fields, TermIterator, Terms};
use core::codec::{PackedLongDocMap, SorterDocMap};
use core::codec::{PostingIterator, PostingIteratorFlags};
use core::doc::Fieldable;
use core::doc::IndexOptions;
use core::index::merge::{MergePolicy, MergeScheduler};
use core::index::writer::{
DocumentsWriterPerThread, TrackingTmpDirectory, TrackingTmpOutputDirectoryWrapper,
TrackingValidDirectory,
};
use core::search::{DocIterator, NO_MORE_DOCS};
use core::store::directory::Directory;
use core::store::{FlushInfo, IOContext};
use core::util::{ByteBlockPool, ByteSliceReader, BytesRef, DocId};
use error::{ErrorKind, Result};
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::sync::Arc;
pub struct TermVectorsConsumerImpl<
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
DW: Directory + Send + Sync + 'static,
> {
pub base: TermsHashBase,
// format: Box<TermVectorsFormat>,
// directory: DirectoryRc,
// segment_info: SegmentInfo,
writer: Option<TermVectorsWriterEnum<DW::IndexOutput>>,
out_dir: Arc<DW>,
vector_slice_reader_pos: ByteSliceReader,
vector_slice_reader_off: ByteSliceReader,
has_vectors: bool,
pub num_vector_fields: u32,
last_doc_id: DocId,
doc_writer: *const DocumentsWriterPerThread<D, C, MS, MP>,
pub per_fields: Vec<*mut TermVectorsConsumerPerField<D, C, MS, MP>>,
pub inited: bool,
}
impl<D, C, MS, MP, DW> TermVectorsConsumerImpl<D, C, MS, MP, DW>
where
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
DW: Directory + Send + Sync + 'static,
{
pub fn new(doc_writer: &mut DocumentsWriterPerThread<D, C, MS, MP>, out_dir: Arc<DW>) -> Self {
let base = TermsHashBase::new(doc_writer);
TermVectorsConsumerImpl {
base,
writer: None,
out_dir,
vector_slice_reader_off: ByteSliceReader::default(),
vector_slice_reader_pos: ByteSliceReader::default(),
has_vectors: false,
num_vector_fields: 0,
last_doc_id: 0,
doc_writer,
per_fields: vec![],
inited: false,
}
}
pub fn terms_writer(&mut self) -> &mut TermVectorsWriterEnum<DW::IndexOutput> {
debug_assert!(self.writer.is_some());
self.writer.as_mut().unwrap()
}
fn init_term_vectors_writer(&mut self) -> Result<()> {
debug_assert!(self.inited);
if self.writer.is_none() {
let doc_writer = unsafe { &*self.doc_writer };
let context = IOContext::Flush(FlushInfo::new(doc_writer.num_docs_in_ram));
self.writer = Some(doc_writer.codec().term_vectors_format().tv_writer(
self.out_dir.as_ref(),
&doc_writer.segment_info,
&context,
)?);
self.last_doc_id = 0;
}
Ok(())
}
/// Fills in no-term-vectors for all docs we haven't seen
/// since the last doc that had term vectors.
fn fill(&mut self, doc_id: DocId) -> Result<()> {
loop {
if self.last_doc_id >= doc_id {
break;
}
if let Some(ref mut writer) = self.writer {
writer.start_document(0)?;
writer.finish_document()?;
}
self.last_doc_id += 1;
}
Ok(())
}
fn do_flush<DW1: Directory>(
&mut self,
_field_to_flush: BTreeMap<&str, &TermVectorsConsumerPerField<D, C, MS, MP>>,
state: &mut SegmentWriteState<D, DW1, C>,
) -> Result<()> {
if self.writer.is_some() {
let num_docs = state.segment_info.max_doc;
debug_assert!(num_docs > 0);
// At least one doc in this run had term vectors enabled
self.fill(num_docs)?;
self.writer
.as_mut()
.unwrap()
.finish(&state.field_infos, num_docs as usize)?;
}
Ok(())
}
fn reset_field(&mut self) {
self.per_fields.truncate(0); // don't hang onto stuff from previous doc
self.num_vector_fields = 0;
}
fn add_field_to_flush(&mut self, field_to_flush: &TermVectorsConsumerPerField<D, C, MS, MP>) {
self.per_fields.push(
field_to_flush as *const TermVectorsConsumerPerField<D, C, MS, MP>
as *mut TermVectorsConsumerPerField<D, C, MS, MP>,
);
self.num_vector_fields += 1;
}
}
impl<D, C, MS, MP, DW> TermVectorsConsumerImpl<D, C, MS, MP, DW>
where
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
DW: Directory + Send + Sync + 'static,
{
fn flush<DW1: Directory>(
&mut self,
field_to_flush: BTreeMap<&str, &TermVectorsConsumerPerField<D, C, MS, MP>>,
state: &mut SegmentWriteState<D, DW1, C>,
_sort_map: Option<&Arc<PackedLongDocMap>>,
_norms: Option<&impl NormsProducer>,
) -> Result<()> {
let res = self.do_flush(field_to_flush, state);
self.writer = None;
self.last_doc_id = 0;
self.has_vectors = false;
res
}
fn abort(&mut self) -> Result<()> {
self.has_vectors = false;
self.writer = None;
self.last_doc_id = 0;
self.base.reset();
Ok(())
}
fn start_document(&mut self) -> Result<()> {
self.reset_field();
self.num_vector_fields = 0;
Ok(())
}
fn finish_document(
&mut self,
field_infos: &mut FieldInfosBuilder<FieldNumbersRef>,
) -> Result<()> {
if !self.has_vectors {
return Ok(());
}
let mut pf_idxs: BTreeMap<String, usize> = BTreeMap::new();
for i in 0..self.num_vector_fields as usize {
unsafe {
let pf: &TermVectorsConsumerPerField<D, C, MS, MP> = &(*self.per_fields[i]);
pf_idxs.insert(pf.base().field_info.name.clone(), i);
}
}
self.init_term_vectors_writer()?;
let doc_id = unsafe { (*self.doc_writer).doc_state.doc_id };
self.fill(doc_id)?;
{
debug_assert!(self.writer.is_some());
let writer = self.writer.as_mut().unwrap();
writer.start_document(self.num_vector_fields as usize)?;
for (_, i) in pf_idxs {
unsafe {
let pf: &mut TermVectorsConsumerPerField<D, C, MS, MP> =
&mut (*self.per_fields[i]);
pf.finish_document(
writer,
&mut self.vector_slice_reader_pos,
&mut self.vector_slice_reader_off,
field_infos,
)?;
}
}
writer.finish_document()?;
}
debug_assert!(self.last_doc_id == doc_id);
self.last_doc_id += 1;
self.base.reset();
self.reset_field();
Ok(())
}
}
pub struct SortingTermVectorsConsumerImpl<
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
> {
consumer: TermVectorsConsumerImpl<D, C, MS, MP, TrackingTmpDirectory<D>>,
tmp_directory: Arc<TrackingTmpDirectory<D>>,
}
impl<D, C, MS, MP> SortingTermVectorsConsumerImpl<D, C, MS, MP>
where
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
{
fn new(doc_writer: &mut DocumentsWriterPerThread<D, C, MS, MP>) -> Self {
let dir = Arc::new(TrackingTmpOutputDirectoryWrapper::new(Arc::clone(
&doc_writer.directory,
)));
let consumer = TermVectorsConsumerImpl::new(doc_writer, Arc::clone(&dir));
Self {
consumer,
tmp_directory: dir,
}
}
fn finish_document(
&mut self,
field_infos: &mut FieldInfosBuilder<FieldNumbersRef>,
) -> Result<()> {
self.consumer.finish_document(field_infos)
}
fn flush<DW: Directory>(
&mut self,
field_to_flush: BTreeMap<&str, &TermVectorsConsumerPerField<D, C, MS, MP>>,
state: &mut SegmentWriteState<D, DW, C>,
sort_map: Option<&Arc<PackedLongDocMap>>,
norms: Option<&impl NormsProducer>,
) -> Result<()> {
let skip_flush = self.consumer.writer.is_none();
self.consumer
.flush(field_to_flush, state, sort_map, norms)?;
if skip_flush {
return Ok(());
}
if let Some(sort_map) = sort_map {
let res = self.flush_sorted(state, sort_map.as_ref());
self.tmp_directory.delete_temp_files();
res
} else {
// we're lucky the index is already sorted, just rename the temporary file and return
for (k, v) in &*self.tmp_directory.file_names.lock().unwrap() {
self.tmp_directory.rename(v, k)?;
}
Ok(())
}
}
fn flush_sorted<DW: Directory>(
&mut self,
flush_state: &SegmentWriteState<D, DW, C>,
sort_map: &impl SorterDocMap,
) -> Result<()> {
let doc_writer = unsafe { &*self.consumer.doc_writer };
let reader = doc_writer.codec().term_vectors_format().tv_reader(
self.tmp_directory.as_ref(),
&flush_state.segment_info,
Arc::new(flush_state.field_infos.clone()),
&IOContext::Default,
)?;
let mut writer = doc_writer.codec().term_vectors_format().tv_writer(
flush_state.directory.as_ref(),
&flush_state.segment_info,
&IOContext::Default,
)?;
for i in 0..flush_state.segment_info.max_doc {
let vectors = reader.get(sort_map.new_to_old(i))?;
Self::write_term_vectors(&mut writer, vectors, &flush_state.field_infos)?;
}
writer.finish(
&flush_state.field_infos,
flush_state.segment_info.max_doc as usize,
)
}
fn write_term_vectors(
writer: &mut impl TermVectorsWriter,
vectors: Option<impl Fields>,
fields_infos: &FieldInfos,
) -> Result<()> {
if let Some(vectors) = vectors {
let num_fields = vectors.size();
writer.start_document(num_fields)?;
let mut last_field_name = String::new();
let mut field_count = 0;
for field in vectors.fields() {
field_count += 1;
debug_assert!(field > last_field_name);
let field_info = fields_infos.field_info_by_name(&field).unwrap();
if let Some(terms) = vectors.terms(&field)? {
let has_positions = terms.has_positions()?;
let has_offsets = terms.has_offsets()?;
let has_payloads = terms.has_payloads()?;
debug_assert!(!has_payloads || has_positions);
let mut num_terms = terms.size()?;
if num_terms == -1 {
// count manually. It is stupid, but needed, as Terms.size() is not a
// mandatory statistics function
num_terms = 0;
let mut term_iter = terms.iterator()?;
while term_iter.next()?.is_some() {
num_terms += 1;
}
}
writer.start_field(
field_info,
num_terms as usize,
has_positions,
has_offsets,
has_payloads,
)?;
let mut terms_iter = terms.iterator()?;
let mut term_count = 0;
while let Some(term) = terms_iter.next()? {
term_count += 1;
let freq = terms_iter.total_term_freq()? as i32;
writer.start_term(BytesRef::new(&term), freq)?;
if has_positions || has_offsets {
let mut docs_and_pos_iter = terms_iter.postings_with_flags(
PostingIteratorFlags::OFFSETS | PostingIteratorFlags::PAYLOADS,
)?;
let doc = docs_and_pos_iter.next()?;
debug_assert_ne!(doc, NO_MORE_DOCS);
debug_assert_eq!(docs_and_pos_iter.freq()?, freq);
for _pos_upto in 0..freq {
let pos = docs_and_pos_iter.next_position()?;
let start_offset = docs_and_pos_iter.start_offset()?;
let end_offset = docs_and_pos_iter.end_offset()?;
let payloads = docs_and_pos_iter.payload()?;
debug_assert!(!has_positions || pos >= 0);
writer.add_position(pos, start_offset, end_offset, &payloads)?;
}
}
writer.finish_term()?;
}
debug_assert_eq!(term_count, num_terms);
writer.finish_field()?;
last_field_name = field;
}
}
assert_eq!(field_count, num_fields);
} else {
writer.start_document(0)?;
}
writer.finish_document()
}
fn abort(&mut self) -> Result<()> {
self.consumer.abort()?;
self.tmp_directory.delete_temp_files();
Ok(())
}
}
enum TermVectorsConsumerEnum<
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
> {
Raw(TermVectorsConsumerImpl<D, C, MS, MP, TrackingValidDirectory<D>>),
Sorting(SortingTermVectorsConsumerImpl<D, C, MS, MP>),
}
pub struct TermVectorsConsumer<
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
>(TermVectorsConsumerEnum<D, C, MS, MP>);
impl<D, C, MS, MP> TermVectorsConsumer<D, C, MS, MP>
where
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
{
pub fn new_raw(dwpt: &mut DocumentsWriterPerThread<D, C, MS, MP>) -> Self {
let dir = Arc::clone(&dwpt.directory);
let raw = TermVectorsConsumerImpl::new(dwpt, dir);
TermVectorsConsumer(TermVectorsConsumerEnum::Raw(raw))
}
pub fn new_sorting(dwpt: &mut DocumentsWriterPerThread<D, C, MS, MP>) -> Self {
let c = SortingTermVectorsConsumerImpl::new(dwpt);
TermVectorsConsumer(TermVectorsConsumerEnum::Sorting(c))
}
pub fn reset_doc_writer(&mut self, parent: *const DocumentsWriterPerThread<D, C, MS, MP>) {
match &mut self.0 {
TermVectorsConsumerEnum::Raw(c) => c.doc_writer = parent,
TermVectorsConsumerEnum::Sorting(c) => c.consumer.doc_writer = parent,
}
}
pub fn set_term_bytes_pool(&mut self, byte_pool: *mut ByteBlockPool) {
match &mut self.0 {
TermVectorsConsumerEnum::Raw(c) => {
c.base.term_byte_pool = byte_pool;
}
TermVectorsConsumerEnum::Sorting(c) => {
c.consumer.base.term_byte_pool = byte_pool;
}
}
}
pub fn set_inited(&mut self, init: bool) {
match &mut self.0 {
TermVectorsConsumerEnum::Raw(c) => {
c.inited = init;
}
TermVectorsConsumerEnum::Sorting(c) => {
c.consumer.inited = init;
}
}
}
fn set_has_vectors(&mut self, has_vectors: bool) {
match &mut self.0 {
TermVectorsConsumerEnum::Raw(c) => c.has_vectors = has_vectors,
TermVectorsConsumerEnum::Sorting(c) => c.consumer.has_vectors = has_vectors,
}
}
fn add_field_to_flush(&mut self, field_to_flush: &TermVectorsConsumerPerField<D, C, MS, MP>) {
match &mut self.0 {
TermVectorsConsumerEnum::Raw(c) => c.add_field_to_flush(field_to_flush),
TermVectorsConsumerEnum::Sorting(c) => c.consumer.add_field_to_flush(field_to_flush),
}
}
fn base(&mut self) -> &mut TermsHashBase {
match &mut self.0 {
TermVectorsConsumerEnum::Raw(c) => &mut c.base,
TermVectorsConsumerEnum::Sorting(c) => &mut c.consumer.base,
}
}
}
impl<D, C, MS, MP> TermsHash<D, C> for TermVectorsConsumer<D, C, MS, MP>
where
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
{
type PerField = TermVectorsConsumerPerField<D, C, MS, MP>;
fn base(&self) -> &TermsHashBase {
match &self.0 {
TermVectorsConsumerEnum::Raw(c) => &c.base,
TermVectorsConsumerEnum::Sorting(s) => &s.consumer.base,
}
}
fn base_mut(&mut self) -> &mut TermsHashBase {
match &mut self.0 {
TermVectorsConsumerEnum::Raw(c) => &mut c.base,
TermVectorsConsumerEnum::Sorting(s) => &mut s.consumer.base,
}
}
fn add_field(
&mut self,
_field_invert_state: &FieldInvertState,
field_info: &FieldInfo,
) -> TermVectorsConsumerPerField<D, C, MS, MP> {
TermVectorsConsumerPerField::new(self, field_info.clone())
}
fn flush<DW: Directory>(
&mut self,
field_to_flush: BTreeMap<&str, &Self::PerField>,
state: &mut SegmentWriteState<D, DW, C>,
sort_map: Option<&Arc<PackedLongDocMap>>,
norms: Option<&impl NormsProducer>,
) -> Result<()> {
match &mut self.0 {
TermVectorsConsumerEnum::Raw(c) => c.flush(field_to_flush, state, sort_map, norms),
TermVectorsConsumerEnum::Sorting(s) => s.flush(field_to_flush, state, sort_map, norms),
}
}
fn abort(&mut self) -> Result<()> {
match &mut self.0 {
TermVectorsConsumerEnum::Raw(c) => c.abort(),
TermVectorsConsumerEnum::Sorting(s) => s.abort(),
}
}
fn start_document(&mut self) -> Result<()> {
match &mut self.0 {
TermVectorsConsumerEnum::Raw(c) => c.start_document(),
TermVectorsConsumerEnum::Sorting(s) => s.consumer.start_document(),
}
}
fn finish_document(
&mut self,
field_infos: &mut FieldInfosBuilder<FieldNumbersRef>,
) -> Result<()> {
match &mut self.0 {
TermVectorsConsumerEnum::Raw(c) => c.finish_document(field_infos),
TermVectorsConsumerEnum::Sorting(s) => s.finish_document(field_infos),
}
}
}
pub struct TermVectorsConsumerPerField<
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
> {
base: TermsHashPerFieldBase<TermVectorPostingsArray>,
do_vectors: bool,
do_vector_positions: bool,
do_vector_offsets: bool,
do_vector_payloads: bool,
has_payloads: bool,
// if enabled, and we actually saw any for this field
inited: bool,
parent: *mut TermVectorsConsumer<D, C, MS, MP>,
}
impl<D, C, MS, MP> TermVectorsConsumerPerField<D, C, MS, MP>
where
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
{
fn new(terms_writer: &mut TermVectorsConsumer<D, C, MS, MP>, field_info: FieldInfo) -> Self {
let base = TermsHashPerFieldBase::new(
2,
terms_writer.base(),
field_info,
TermVectorPostingsArray::default(),
);
TermVectorsConsumerPerField {
base,
do_vectors: false,
do_vector_positions: false,
do_vector_offsets: false,
do_vector_payloads: false,
has_payloads: false,
inited: false,
parent: terms_writer,
}
}
#[allow(clippy::mut_from_ref)]
fn term_vectors_writer(&self) -> &mut TermVectorsConsumer<D, C, MS, MP> {
unsafe { &mut *self.parent }
}
fn finish_document<T: TermVectorsWriter>(
&mut self,
tv: &mut T,
pos_reader: &mut ByteSliceReader,
off_reader: &mut ByteSliceReader,
field_infos: &mut FieldInfosBuilder<FieldNumbersRef>,
) -> Result<()> {
if !self.do_vectors {
return Ok(());
}
self.do_vectors = false;
let num_postings = unsafe { self.base.bytes_hash.get_ref().len() };
// This is called once, after inverting all occurrences
// of a given field in the doc. At this point we flush
// our hash into the DocWriter.
unsafe {
self.base.bytes_hash.get_mut().sort();
}
match &mut self.term_vectors_writer().0 {
TermVectorsConsumerEnum::Raw(r) => {
r.terms_writer().start_field(
&self.base.field_info,
num_postings,
self.do_vector_positions,
self.do_vector_offsets,
self.has_payloads,
)?;
}
TermVectorsConsumerEnum::Sorting(r) => {
r.consumer.terms_writer().start_field(
&self.base.field_info,
num_postings,
self.do_vector_positions,
self.do_vector_offsets,
self.has_payloads,
)?;
}
}
for j in 0..num_postings {
let term_id = unsafe { self.base.bytes_hash.get_ref().ids[j] as usize };
let freq = self.base.postings_array.freqs[term_id];
// Get BytesPtr
let flush_term = self
.base
.term_pool()
.set_bytes_ref(self.base.postings_array.base.text_starts[term_id] as usize);
tv.start_term(flush_term, freq as i32)?;
if self.do_vector_positions || self.do_vector_offsets {
if self.do_vector_positions {
self.init_reader(pos_reader, term_id, 0);
}
if self.do_vector_offsets {
self.init_reader(off_reader, term_id, 1);
}
tv.add_prox(freq as usize, Some(pos_reader), Some(off_reader))?;
}
tv.finish_term()?;
}
tv.finish_field()?;
self.reset();
let fi = field_infos.get_or_add(&self.base.field_info.name)?;
fi.has_store_term_vector = true;
Ok(())
}
fn reset(&mut self) {
unsafe {
self.base.bytes_hash.get_mut().clear(false);
}
}
fn write_prox(
&mut self,
term_id: usize,
field_state: &FieldInvertState,
token_stream: &dyn TokenStream,
) {
if self.do_vector_offsets {
let start_offset = field_state.offset + token_stream.offset_attribute().start_offset();
let end_offset = field_state.offset + token_stream.offset_attribute().end_offset();
let delta = start_offset as i32 - self.base.postings_array.last_offsets[term_id] as i32;
self.base.write_vint(1, delta);
self.base.write_vint(1, (end_offset - start_offset) as i32);
self.base.postings_array.last_offsets[term_id] = end_offset as u32;
}
if self.do_vector_positions {
let mut payload: &[u8] = &[0u8; 0];
if let Some(attr) = token_stream.payload_attribute() {
payload = attr.get_payload();
}
let pos =
field_state.position - self.base.postings_array.last_positions[term_id] as i32;
if payload.is_empty() {
self.base.write_vint(0, pos << 1);
} else {
self.base.write_vint(0, (pos << 1) | 1);
self.base.write_vint(0, payload.len() as i32);
self.base.write_bytes(0, payload);
}
self.base.postings_array.last_positions[term_id] = field_state.position as u32;
}
}
}
impl<D, C, MS, MP> TermsHashPerField for TermVectorsConsumerPerField<D, C, MS, MP>
where
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
{
type P = TermVectorPostingsArray;
fn base(&self) -> &TermsHashPerFieldBase<TermVectorPostingsArray> {
&self.base
}
fn base_mut(&mut self) -> &mut TermsHashPerFieldBase<TermVectorPostingsArray> {
&mut self.base
}
fn init(&mut self) {
self.base.init();
self.inited = true;
}
fn reset_ptr(&mut self, parent: &mut TermsHashBase) {
self.base.reset_ptr(parent);
}
fn start(
&mut self,
_field_state: &FieldInvertState,
field: &impl Fieldable,
first: bool,
) -> Result<bool> {
debug_assert!(self.inited);
debug_assert_ne!(field.field_type().index_options(), IndexOptions::Null);
if first {
unsafe {
if !self.base.bytes_hash.get_ref().is_empty() {
// Only necessary if previous doc hit a
// non-aborting exception while writing vectors in
// this field:
self.reset();
}
self.base.bytes_hash.get_mut().reinit();
}
self.has_payloads = false;
self.do_vectors = field.field_type().store_term_vectors();
if self.do_vectors {
self.term_vectors_writer().set_has_vectors(true);
self.do_vector_positions = field.field_type().store_term_vector_positions();
// Somewhat confusingly, unlike postings, you are
// allowed to index TV offsets without TV positions:
self.do_vector_offsets = field.field_type().store_term_vector_offsets();
if self.do_vector_positions {
self.do_vector_payloads = field.field_type().store_term_vector_payloads();
} else {
self.do_vector_payloads = false;
if field.field_type().store_term_vector_payloads() {
bail!(ErrorKind::IllegalArgument(
"cannot index term vector payloads without term vector positions"
.into()
));
}
}
} else {
if field.field_type().store_term_vector_offsets() {
bail!(ErrorKind::IllegalArgument(
"cannot index term vector offsets when term vectors are not indexed".into()
));
}
if field.field_type().store_term_vector_positions() {
bail!(ErrorKind::IllegalArgument(
"cannot index term vector positions when term vectors are not indexed"
.into()
));
}
if field.field_type().store_term_vector_payloads() {
bail!(ErrorKind::IllegalArgument(
"cannot index term vector payloads when term vectors are not indexed"
.into()
));
}
}
} else {
if self.do_vectors != field.field_type().store_term_vectors() {
bail!(ErrorKind::IllegalArgument(
"all instances of a given field name must have the same term vectors settings \
(storeTermVectors changed)"
.into()
));
}
if self.do_vector_positions != field.field_type().store_term_vector_positions() {
bail!(ErrorKind::IllegalArgument(
"all instances of a given field name must have the same term vectors settings \
(store_term_vector_positions changed)"
.into()
));
}
if self.do_vector_offsets != field.field_type().store_term_vector_offsets() {
bail!(ErrorKind::IllegalArgument(
"all instances of a given field name must have the same term vectors settings \
(store_term_vector_offsets changed)"
.into()
));
}
if self.do_vector_payloads != field.field_type().store_term_vector_payloads() {
bail!(ErrorKind::IllegalArgument(
"all instances of a given field name must have the same term vectors settings \
(store_term_vector_payloads changed)"
.into()
));
}
}
Ok(self.do_vectors)
}
/// Called once per field per document if term vectors
/// are enabled, to write the vectors to
/// RAMOutputStream, which is then quickly flushed to
/// the real term vectors files in the Directory.
fn finish(&mut self, _field_state: &FieldInvertState) -> Result<()> {
if self.do_vectors && unsafe { !self.base.bytes_hash.get_ref().is_empty() } {
self.term_vectors_writer().add_field_to_flush(self);
}
Ok(())
}
fn new_term(
&mut self,
term_id: usize,
field_state: &mut FieldInvertState,
token_stream: &dyn TokenStream,
_doc_id: i32,
) -> Result<()> {
self.base.postings_array.freqs[term_id] = 1;
self.base.postings_array.last_offsets[term_id] = 0;
self.base.postings_array.last_positions[term_id] = 0;
self.write_prox(term_id, field_state, token_stream);
Ok(())
}
fn add_term(
&mut self,
term_id: usize,
field_state: &mut FieldInvertState,
token_stream: &dyn TokenStream,
_doc_id: i32,
) -> Result<()> {
self.base.postings_array.freqs[term_id] += 1;
self.write_prox(term_id, field_state, token_stream);
Ok(())
}
fn create_postings_array(&self, size: usize) -> TermVectorPostingsArray {
TermVectorPostingsArray::new(size)
}
}
impl<D, C, MS, MP> Eq for TermVectorsConsumerPerField<D, C, MS, MP>
where
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
{
}
impl<D, C, MS, MP> PartialEq for TermVectorsConsumerPerField<D, C, MS, MP>
where
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
{
fn eq(&self, other: &Self) -> bool {
self.base.field_info.name.eq(&other.base.field_info.name)
}
}
impl<D, C, MS, MP> Ord for TermVectorsConsumerPerField<D, C, MS, MP>
where
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
{
fn cmp(&self, other: &Self) -> Ordering {
self.base.field_info.name.cmp(&other.base.field_info.name)
}
}
impl<D, C, MS, MP> PartialOrd for TermVectorsConsumerPerField<D, C, MS, MP>
where
D: Directory + Send + Sync + 'static,
C: Codec,
MS: MergeScheduler,
MP: MergePolicy,
{
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
pub struct TermVectorPostingsArray {
base: ParallelPostingsArray,
freqs: Vec<u32>,
// How many times this term occurred in the current doc
last_offsets: Vec<u32>,
// Last offset we saw
last_positions: Vec<u32>,
// Last position where this term occurred
}
impl Default for TermVectorPostingsArray {
fn default() -> Self {
TermVectorPostingsArray {
base: ParallelPostingsArray::default(),
freqs: vec![0u32; 1024],
last_offsets: vec![0u32; 1024],
last_positions: vec![0u32; 1024],
}
}
}
impl TermVectorPostingsArray {
fn new(size: usize) -> Self {
let base = ParallelPostingsArray::new(size);
TermVectorPostingsArray {
base,
freqs: vec![0u32; size],
last_offsets: vec![0u32; size],
last_positions: vec![0u32; size],
}
}
}
impl PostingsArray for TermVectorPostingsArray {
fn parallel_array(&self) -> &ParallelPostingsArray {
&self.base
}
fn parallel_array_mut(&mut self) -> &mut ParallelPostingsArray {
&mut self.base
}
fn bytes_per_posting(&self) -> usize {
self.base.bytes_per_posting() + 3 * 4
}
fn grow(&mut self) {
self.base.grow();
let new_size = self.base.size;
self.freqs.resize(new_size, 0u32);
self.last_offsets.resize(new_size, 0u32);
self.last_positions.resize(new_size, 0u32);
}
fn clear(&mut self) {
self.base.clear();
self.freqs = Vec::with_capacity(0);
self.last_offsets = Vec::with_capacity(0);
self.last_positions = Vec::with_capacity(0);
}
}
| 33.850791 | 100 | 0.563593 |
fe3dfe96332de430a3a86a002f5e02960e380a8a | 337 | // Copyright 2020-2021 The Datafuse Authors.
//
// SPDX-License-Identifier: Apache-2.0.
mod tests;
mod database;
mod datasource;
mod local;
mod remote;
mod system;
mod table;
mod table_function;
pub use database::IDatabase;
pub use datasource::{DataSource, IDataSource};
pub use table::ITable;
pub use table_function::ITableFunction;
| 17.736842 | 46 | 0.765579 |
4847bba8815b886383c5dd8f5fd26201afa5156e | 1,540 | pub use self::client::Client;
mod client;
pub mod model;
#[derive(Debug, Copy, Clone)]
pub enum Interval {
Minute1,
Minute2,
Minute5,
Minute15,
Minute30,
Minute60,
Minute90,
Hour1,
Day1,
Day5,
Week1,
Month1,
Month3,
}
impl std::fmt::Display for Interval {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Interval::*;
let s = match self {
Minute1 => "1m",
Minute2 => "2m",
Minute5 => "5m",
Minute15 => "15m",
Minute30 => "30m",
Minute60 => "60m",
Minute90 => "90m",
Hour1 => "1h",
Day1 => "1d",
Day5 => "5d",
Week1 => "1wk",
Month1 => "1mo",
Month3 => "3mo",
};
write!(f, "{}", s)
}
}
#[derive(Debug, Copy, Clone)]
pub enum Range {
Day1,
Day5,
Month1,
Month3,
Month6,
Year1,
Year2,
Year5,
Year10,
Ytd,
Max,
}
impl std::fmt::Display for Range {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Range::*;
let s = match self {
Day1 => "1d",
Day5 => "5d",
Month1 => "1mo",
Month3 => "3mo",
Month6 => "6mo",
Year1 => "1y",
Year2 => "2y",
Year5 => "5y",
Year10 => "10y",
Ytd => "ytd",
Max => "max",
};
write!(f, "{}", s)
}
}
| 18.554217 | 72 | 0.412987 |
0e0b5e28f1a672439e8c27ed5c117787374df3ca | 9,682 | use crate::{
file::{self, File},
graph,
};
use byteorder::{BigEndian, ByteOrder};
use git_object::{borrowed, owned, SHA1_SIZE};
use std::{
convert::{TryFrom, TryInto},
fmt::{Debug, Formatter},
slice::Chunks,
};
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("commit {0}'s extra edges overflows the commit-graph file's extra edges list")]
ExtraEdgesListOverflow(owned::Id),
#[error("commit {0}'s first parent is an extra edge index, which is invalid")]
FirstParentIsExtraEdgeIndex(owned::Id),
#[error("commit {0} has extra edges, but commit-graph file has no extra edges list")]
MissingExtraEdgesList(owned::Id),
#[error("commit {0} has a second parent but not a first parent")]
SecondParentWithoutFirstParent(owned::Id),
}
// Note that git's commit-graph-format.txt as of v2.28.0 gives an incorrect value 0x0700_0000 for
// NO_PARENT. Fixed in https://github.com/git/git/commit/4d515253afcef985e94400adbfed7044959f9121 .
const NO_PARENT: u32 = 0x7000_0000;
const EXTENDED_EDGES_MASK: u32 = 0x8000_0000;
pub struct Commit<'a> {
file: &'a File,
pos: file::Position,
// We can parse the below fields lazily if needed.
commit_timestamp: u64,
generation: u32,
parent1: ParentEdge,
parent2: ParentEdge,
root_tree_id: borrowed::Id<'a>,
}
impl<'a> Commit<'a> {
pub(crate) fn new(file: &'a File, pos: file::Position) -> Self {
let bytes = file.commit_data_bytes(pos);
Commit {
file,
pos,
root_tree_id: borrowed::Id::try_from(&bytes[..SHA1_SIZE]).expect("20 bytes SHA1 to be alright"),
parent1: ParentEdge::from_raw(BigEndian::read_u32(&bytes[SHA1_SIZE..SHA1_SIZE + 4])),
parent2: ParentEdge::from_raw(BigEndian::read_u32(&bytes[SHA1_SIZE + 4..SHA1_SIZE + 8])),
generation: BigEndian::read_u32(&bytes[SHA1_SIZE + 8..SHA1_SIZE + 12]) >> 2,
commit_timestamp: BigEndian::read_u64(&bytes[SHA1_SIZE + 8..SHA1_SIZE + 16]) & 0x0003_ffff_ffff,
}
}
/// Returns the committer timestamp of this commit.
///
/// The value is the number of seconds since 1970-01-01 00:00:00 UTC.
pub fn committer_timestamp(&self) -> u64 {
self.commit_timestamp
}
/// Returns the generation number of this commit.
///
/// Commits without parents have generation number 1. Commits with parents have a generation
/// number that is the max of their parents' generation numbers + 1.
pub fn generation(&self) -> u32 {
self.generation
}
pub fn iter_parents(&'a self) -> impl Iterator<Item = Result<graph::Position, Error>> + 'a {
// I didn't find a combinator approach that a) was as strict as ParentIterator, b) supported
// fuse-after-first-error behavior, and b) was significantly shorter or more understandable
// than ParentIterator. So here we are.
ParentIterator {
commit_data: self,
state: ParentIteratorState::First,
}
}
// Allow the return value to outlive this Commit object, as it only needs to be bound by the
// lifetime of the parent file.
pub fn id<'b>(&'b self) -> borrowed::Id<'a>
where
'a: 'b,
{
self.file.id_at(self.pos)
}
pub fn parent1(&self) -> Result<Option<graph::Position>, Error> {
self.iter_parents().next().transpose()
}
pub fn position(&self) -> file::Position {
self.pos
}
// Allow the return value to outlive this Commit object, as it only needs to be bound by the
// lifetime of the parent file.
pub fn root_tree_id<'b>(&'b self) -> borrowed::Id<'a>
where
'a: 'b,
{
self.root_tree_id
}
}
impl<'a> Debug for Commit<'a> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Commit {{ id: {}, lex_pos: {}, generation: {}, root_tree_id: {}, parent1: {:?}, parent2: {:?} }}",
self.id(),
self.pos,
self.generation(),
self.root_tree_id(),
self.parent1,
self.parent2,
)
}
}
impl<'a> Eq for Commit<'a> {}
impl<'a> PartialEq for Commit<'a> {
fn eq(&self, other: &Self) -> bool {
self.file as *const File == other.file as *const File && self.pos == other.pos
}
}
pub struct ParentIterator<'a> {
commit_data: &'a Commit<'a>,
state: ParentIteratorState<'a>,
}
impl<'a> Iterator for ParentIterator<'a> {
type Item = Result<graph::Position, Error>;
fn next(&mut self) -> Option<Self::Item> {
let state = std::mem::replace(&mut self.state, ParentIteratorState::Exhausted);
match state {
ParentIteratorState::First => match self.commit_data.parent1 {
ParentEdge::None => match self.commit_data.parent2 {
ParentEdge::None => None,
_ => Some(Err(Error::SecondParentWithoutFirstParent(self.commit_data.id().into()))),
},
ParentEdge::GraphPosition(pos) => {
self.state = ParentIteratorState::Second;
Some(Ok(pos))
}
ParentEdge::ExtraEdgeIndex(_) => {
Some(Err(Error::FirstParentIsExtraEdgeIndex(self.commit_data.id().into())))
}
},
ParentIteratorState::Second => match self.commit_data.parent2 {
ParentEdge::None => None,
ParentEdge::GraphPosition(pos) => Some(Ok(pos)),
ParentEdge::ExtraEdgeIndex(extra_edge_index) => {
if let Some(extra_edges_list) = self.commit_data.file.extra_edges_data() {
let start_offset: usize = extra_edge_index
.try_into()
.expect("an architecture able to hold 32 bits of integer");
let start_offset = start_offset
.checked_mul(4)
.expect("an extended edge index small enough to fit in usize");
if let Some(tail) = extra_edges_list.get(start_offset..) {
self.state = ParentIteratorState::Extra(tail.chunks(4));
// This recursive call is what blocks me from replacing ParentIterator
// with a std::iter::from_fn closure.
self.next()
} else {
Some(Err(Error::ExtraEdgesListOverflow(self.commit_data.id().into())))
}
} else {
Some(Err(Error::MissingExtraEdgesList(self.commit_data.id().into())))
}
}
},
ParentIteratorState::Extra(mut chunks) => {
if let Some(chunk) = chunks.next() {
let extra_edge = BigEndian::read_u32(chunk);
match ExtraEdge::from_raw(extra_edge) {
ExtraEdge::Internal(pos) => {
self.state = ParentIteratorState::Extra(chunks);
Some(Ok(pos))
}
ExtraEdge::Last(pos) => Some(Ok(pos)),
}
} else {
Some(Err(Error::ExtraEdgesListOverflow(self.commit_data.id().into())))
}
}
ParentIteratorState::Exhausted => None,
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match (&self.state, self.commit_data.parent1, self.commit_data.parent2) {
(ParentIteratorState::First, ParentEdge::None, ParentEdge::None) => (0, Some(0)),
(ParentIteratorState::First, ParentEdge::None, _) => (1, Some(1)),
(ParentIteratorState::First, ParentEdge::GraphPosition(_), ParentEdge::None) => (1, Some(1)),
(ParentIteratorState::First, ParentEdge::GraphPosition(_), ParentEdge::GraphPosition(_)) => (2, Some(2)),
(ParentIteratorState::First, ParentEdge::GraphPosition(_), ParentEdge::ExtraEdgeIndex(_)) => (3, None),
(ParentIteratorState::First, ParentEdge::ExtraEdgeIndex(_), _) => (1, Some(1)),
(ParentIteratorState::Second, _, ParentEdge::None) => (0, Some(0)),
(ParentIteratorState::Second, _, ParentEdge::GraphPosition(_)) => (1, Some(1)),
(ParentIteratorState::Second, _, ParentEdge::ExtraEdgeIndex(_)) => (2, None),
(ParentIteratorState::Extra(_), _, _) => (1, None),
(ParentIteratorState::Exhausted, _, _) => (0, Some(0)),
}
}
}
#[derive(Debug)]
enum ParentIteratorState<'a> {
First,
Second,
Extra(Chunks<'a, u8>),
Exhausted,
}
#[derive(Clone, Copy, Debug)]
enum ParentEdge {
None,
GraphPosition(graph::Position),
ExtraEdgeIndex(u32),
}
impl ParentEdge {
pub fn from_raw(raw: u32) -> ParentEdge {
if raw == NO_PARENT {
return ParentEdge::None;
}
if raw & EXTENDED_EDGES_MASK != 0 {
ParentEdge::ExtraEdgeIndex(raw & !EXTENDED_EDGES_MASK)
} else {
ParentEdge::GraphPosition(graph::Position(raw))
}
}
}
const LAST_EXTENDED_EDGE_MASK: u32 = 0x8000_0000;
enum ExtraEdge {
Internal(graph::Position),
Last(graph::Position),
}
impl ExtraEdge {
pub fn from_raw(raw: u32) -> Self {
if raw & LAST_EXTENDED_EDGE_MASK != 0 {
Self::Last(graph::Position(raw & !LAST_EXTENDED_EDGE_MASK))
} else {
Self::Internal(graph::Position(raw))
}
}
}
| 37.673152 | 117 | 0.569717 |
08fbc2625a4be465179df0b2138ffbf0a63bbeb0 | 1,885 | //! General-purpose I/O heads.
#[cfg(any(
stm32_mcu = "stm32f100",
stm32_mcu = "stm32f101",
stm32_mcu = "stm32f102",
stm32_mcu = "stm32f103",
stm32_mcu = "stm32f107",
))]
mod f1;
#[cfg(any(stm32_mcu = "stm32f303"))]
mod f3;
#[cfg(any(
stm32_mcu = "stm32f401",
stm32_mcu = "stm32f405",
stm32_mcu = "stm32f407",
stm32_mcu = "stm32f410",
stm32_mcu = "stm32f411",
stm32_mcu = "stm32f412",
stm32_mcu = "stm32f413",
stm32_mcu = "stm32f427",
stm32_mcu = "stm32f429",
stm32_mcu = "stm32f446",
stm32_mcu = "stm32f469",
))]
mod f4;
#[cfg(any(
stm32_mcu = "stm32l4x1",
stm32_mcu = "stm32l4x2",
stm32_mcu = "stm32l4x3",
stm32_mcu = "stm32l4x5",
stm32_mcu = "stm32l4x6",
stm32_mcu = "stm32l4r5",
stm32_mcu = "stm32l4r7",
stm32_mcu = "stm32l4r9",
stm32_mcu = "stm32l4s5",
stm32_mcu = "stm32l4s7",
stm32_mcu = "stm32l4s9"
))]
mod l4_all;
#[cfg(any(
stm32_mcu = "stm32f100",
stm32_mcu = "stm32f101",
stm32_mcu = "stm32f102",
stm32_mcu = "stm32f103",
stm32_mcu = "stm32f107",
))]
pub use self::f1::*;
#[cfg(any(stm32_mcu = "stm32f303"))]
pub use self::f3::*;
#[cfg(any(
stm32_mcu = "stm32f401",
stm32_mcu = "stm32f405",
stm32_mcu = "stm32f407",
stm32_mcu = "stm32f410",
stm32_mcu = "stm32f411",
stm32_mcu = "stm32f412",
stm32_mcu = "stm32f413",
stm32_mcu = "stm32f427",
stm32_mcu = "stm32f429",
stm32_mcu = "stm32f446",
stm32_mcu = "stm32f469",
))]
pub use self::f4::*;
#[cfg(any(
stm32_mcu = "stm32l4x1",
stm32_mcu = "stm32l4x2",
stm32_mcu = "stm32l4x3",
stm32_mcu = "stm32l4x5",
stm32_mcu = "stm32l4x6",
stm32_mcu = "stm32l4r5",
stm32_mcu = "stm32l4r7",
stm32_mcu = "stm32l4r9",
stm32_mcu = "stm32l4s5",
stm32_mcu = "stm32l4s7",
stm32_mcu = "stm32l4s9"
))]
pub use self::l4_all::*;
| 23.5625 | 36 | 0.611671 |
e5ef422d70640aa48cf5c436f05fa6f7c5c32665 | 2,679 | use std::io;
use std::io::Write;
use std::process;
use std::collections::LinkedList;
fn main() {
let mut inventory: LinkedList<char> = LinkedList::from(['-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-',]);
loop {
let mut input = String::new();
print!("Enter command:> ");
io::stdout().flush().unwrap();
io::stdin().read_line(&mut input).expect("Can not read line!");
let input: Vec<&str> = input.split(' ').collect();
if input.len() > 2 {
println!("Invalid command.");
} else {
command_handler(input, &mut inventory);
}
}
}
fn command_handler(commands: Vec<&str>, inv: &mut LinkedList<char>) {
if commands.len() == 2 {
let command = commands[0].trim();
let item = commands[1].trim();
match command {
"insert" => insert_item(inv, &item),
"remove" => remove_item(inv, &item),
_ => println!("Invalid command."),
}
} else {
let command = commands[0].trim();
match command {
"exit" => exit_command(),
"help" => print_help(),
"show" => println!("Inventory {:?}", inv),
_ => println!("Invalid command."),
}
}
}
fn insert_item(inv: &mut LinkedList<char>, item: &str) {
let targets: String = String::from("a f p w");
let ch: char = item.chars().next().unwrap();
let mut is_full: bool = true;
if targets.contains(item) {
for one in inv {
if *one == '-' {
*one = ch;
is_full = false;
println!("{} was inserted.", ch);
break;
}
}
if is_full == true {
println!("Inventory is full.");
}
} else {
println!("Invalid item.");
}
}
fn remove_item(inv: &mut LinkedList<char>, item: &str) {
let targets: String = String::from("a f p w");
let ch: char = item.chars().next().unwrap();
if targets.contains(item) {
if inv.contains(&ch) {
for one in inv {
if *one == ch {
*one = '-';
println!("{} was removed.", ch);
break;
}
}
} else {
println!("Invalid item.");
}
} else {
println!("Invalid item.");
}
}
fn print_help() {
println!("Available commands:");
println!("1. insert <itemType>");
println!("2. remove <itemType>");
println!("3. show");
println!("4. help");
println!("5. exit");
}
fn exit_command() {
println!("Bye.");
process::exit(0x0100);
}
| 26.79 | 122 | 0.467339 |
eb0badd4289a8c91967d87cf64f61f1712f71585 | 64,864 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
//
extern crate arch;
extern crate devices;
extern crate epoll;
extern crate kvm_ioctls;
extern crate libc;
extern crate linux_loader;
extern crate net_util;
extern crate signal_hook;
extern crate vfio;
extern crate vm_allocator;
extern crate vm_memory;
extern crate vm_virtio;
extern crate vmm_sys_util;
use crate::config::{ConsoleOutputMode, VmConfig};
use arch::RegionType;
use devices::ioapic;
use kvm_bindings::{
kvm_enable_cap, kvm_msi, kvm_pit_config, kvm_userspace_memory_region, KVM_CAP_SPLIT_IRQCHIP,
KVM_PIT_SPEAKER_DUMMY,
};
use kvm_ioctls::*;
use libc::O_TMPFILE;
use libc::{c_void, siginfo_t, EFD_NONBLOCK, TIOCGWINSZ};
use linux_loader::loader::KernelLoader;
use net_util::Tap;
use pci::{
InterruptDelivery, InterruptParameters, PciConfigIo, PciDevice, PciInterruptPin, PciRoot,
};
use qcow::{self, ImageType, QcowFile};
use signal_hook::{iterator::Signals, SIGWINCH};
use std::ffi::CString;
use std::fs::{File, OpenOptions};
use std::io::{self, sink, stdout};
use std::ops::Deref;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::ptr::null_mut;
use std::sync::{Arc, Barrier, Mutex, RwLock};
use std::{fmt, result, str, thread};
use vfio::{VfioDevice, VfioPciDevice, VfioPciError};
use vm_allocator::{GsiApic, SystemAllocator};
use vm_memory::guest_memory::FileOffset;
use vm_memory::{
Address, Bytes, Error as MmapError, GuestAddress, GuestMemory, GuestMemoryMmap,
GuestMemoryRegion, GuestUsize,
};
use vm_virtio::transport::VirtioPciDevice;
use vm_virtio::{VirtioSharedMemory, VirtioSharedMemoryList};
use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::signal::register_signal_handler;
use vmm_sys_util::terminal::Terminal;
const VCPU_RTSIG_OFFSET: i32 = 0;
const X86_64_IRQ_BASE: u32 = 5;
const DEFAULT_MSIX_VEC_NUM: u16 = 2;
// CPUID feature bits
const TSC_DEADLINE_TIMER_ECX_BIT: u8 = 24; // tsc deadline timer ecx bit.
const HYPERVISOR_ECX_BIT: u8 = 31; // Hypervisor ecx bit.
// 64 bit direct boot entry offset for bzImage
const KERNEL_64BIT_ENTRY_OFFSET: u64 = 0x200;
// IOAPIC address range
const IOAPIC_RANGE_ADDR: u64 = 0xfec0_0000;
const IOAPIC_RANGE_SIZE: u64 = 0x20;
// Debug I/O port
#[cfg(target_arch = "x86_64")]
const DEBUG_IOPORT: u16 = 0x80;
const DEBUG_IOPORT_PREFIX: &str = "Debug I/O port";
/// Debug I/O port, see:
/// https://www.intel.com/content/www/us/en/support/articles/000005500/boards-and-kits.html
///
/// Since we're not a physical platform, we can freely assign code ranges for
/// debugging specific parts of our virtual platform.
pub enum DebugIoPortRange {
Firmware,
Bootloader,
Kernel,
Userspace,
Custom,
}
impl DebugIoPortRange {
fn from_u8(value: u8) -> DebugIoPortRange {
match value {
0x00..=0x1f => DebugIoPortRange::Firmware,
0x20..=0x3f => DebugIoPortRange::Bootloader,
0x40..=0x5f => DebugIoPortRange::Kernel,
0x60..=0x7f => DebugIoPortRange::Userspace,
_ => DebugIoPortRange::Custom,
}
}
}
impl fmt::Display for DebugIoPortRange {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
DebugIoPortRange::Firmware => write!(f, "{}: Firmware", DEBUG_IOPORT_PREFIX),
DebugIoPortRange::Bootloader => write!(f, "{}: Bootloader", DEBUG_IOPORT_PREFIX),
DebugIoPortRange::Kernel => write!(f, "{}: Kernel", DEBUG_IOPORT_PREFIX),
DebugIoPortRange::Userspace => write!(f, "{}: Userspace", DEBUG_IOPORT_PREFIX),
DebugIoPortRange::Custom => write!(f, "{}: Custom", DEBUG_IOPORT_PREFIX),
}
}
}
/// Errors associated with VM management
#[derive(Debug)]
pub enum Error {
/// Cannot open the VM file descriptor.
VmFd(io::Error),
/// Cannot create the KVM instance
VmCreate(io::Error),
/// Cannot set the VM up
VmSetup(io::Error),
/// Cannot open the kernel image
KernelFile(io::Error),
/// Mmap backed guest memory error
GuestMemory(MmapError),
/// Cannot load the kernel in memory
KernelLoad(linux_loader::loader::Error),
/// Cannot load the command line in memory
CmdLine,
/// Cannot open the VCPU file descriptor.
VcpuFd(io::Error),
/// Cannot run the VCPUs.
VcpuRun(io::Error),
/// Cannot spawn a new vCPU thread.
VcpuSpawn(io::Error),
#[cfg(target_arch = "x86_64")]
/// Cannot set the local interruption due to bad configuration.
LocalIntConfiguration(arch::x86_64::interrupts::Error),
#[cfg(target_arch = "x86_64")]
/// Error configuring the MSR registers
MSRSConfiguration(arch::x86_64::regs::Error),
#[cfg(target_arch = "x86_64")]
/// Error configuring the general purpose registers
REGSConfiguration(arch::x86_64::regs::Error),
#[cfg(target_arch = "x86_64")]
/// Error configuring the special registers
SREGSConfiguration(arch::x86_64::regs::Error),
#[cfg(target_arch = "x86_64")]
/// Error configuring the floating point related registers
FPUConfiguration(arch::x86_64::regs::Error),
/// The call to KVM_SET_CPUID2 failed.
SetSupportedCpusFailed(io::Error),
/// Cannot create a device manager.
DeviceManager(DeviceManagerError),
/// Cannot create EventFd.
EventFd(io::Error),
/// Cannot add legacy device to Bus.
BusError(devices::BusError),
/// Cannot create epoll context.
EpollError(io::Error),
/// Write to the serial console failed.
Serial(vmm_sys_util::errno::Error),
/// Write to the virtio console failed.
Console(vmm_sys_util::errno::Error),
/// Cannot setup terminal in raw mode.
SetTerminalRaw(vmm_sys_util::errno::Error),
/// Cannot setup terminal in canonical mode.
SetTerminalCanon(vmm_sys_util::errno::Error),
/// Cannot create the system allocator
CreateSystemAllocator,
/// Failed parsing network parameters
ParseNetworkParameters,
/// Unexpected KVM_RUN exit reason
VcpuUnhandledKvmExit,
/// Memory is overflow
MemOverflow,
/// Failed to create shared file.
SharedFileCreate(io::Error),
/// Failed to set shared file length.
SharedFileSetLen(io::Error),
/// Failed to allocate a memory range.
MemoryRangeAllocation,
/// Failed to allocate the IOAPIC memory range.
IoapicRangeAllocation,
}
pub type Result<T> = result::Result<T, Error>;
/// Errors associated with device manager
#[derive(Debug)]
pub enum DeviceManagerError {
/// Cannot create EventFd.
EventFd(io::Error),
/// Cannot open disk path
Disk(io::Error),
/// Cannot create vhost-user-net device
CreateVhostUserNet(vm_virtio::vhost_user::Error),
/// Cannot create virtio-blk device
CreateVirtioBlock(io::Error),
/// Cannot create virtio-net device
CreateVirtioNet(vm_virtio::net::Error),
/// Cannot create virtio-console device
CreateVirtioConsole(io::Error),
/// Cannot create virtio-rng device
CreateVirtioRng(io::Error),
/// Cannot create virtio-fs device
CreateVirtioFs(vm_virtio::vhost_user::Error),
/// Cannot create virtio-pmem device
CreateVirtioPmem(io::Error),
/// Failed parsing disk image format
DetectImageType(qcow::Error),
/// Cannot open qcow disk path
QcowDeviceCreate(qcow::Error),
/// Cannot open tap interface
OpenTap(net_util::TapError),
/// Cannot allocate IRQ.
AllocateIrq,
/// Cannot configure the IRQ.
Irq(io::Error),
/// Cannot allocate PCI BARs
AllocateBars(pci::PciDeviceError),
/// Cannot register ioevent.
RegisterIoevent(io::Error),
/// Cannot create virtio device
VirtioDevice(vmm_sys_util::errno::Error),
/// Cannot add PCI device
AddPciDevice(pci::PciRootError),
/// Cannot open persistent memory file
PmemFileOpen(io::Error),
/// Cannot set persistent memory file size
PmemFileSetLen(io::Error),
/// Cannot find a memory range for persistent memory
PmemRangeAllocation,
/// Cannot find a memory range for virtio-fs
FsRangeAllocation,
/// Error creating serial output file
SerialOutputFileOpen(io::Error),
/// Error creating console output file
ConsoleOutputFileOpen(io::Error),
/// Cannot create a VFIO device
VfioCreate(vfio::VfioError),
/// Cannot create a VFIO PCI device
VfioPciCreate(vfio::VfioPciError),
/// Failed to map VFIO MMIO region.
VfioMapRegion(VfioPciError),
/// Failed to create the KVM device.
CreateKvmDevice(io::Error),
/// Failed to memory map.
Mmap(io::Error),
}
pub type DeviceManagerResult<T> = result::Result<T, DeviceManagerError>;
#[allow(dead_code)]
#[derive(Copy, Clone)]
enum CpuidReg {
EAX,
EBX,
ECX,
EDX,
}
struct CpuidPatch {
function: u32,
index: u32,
flags_bit: Option<u8>,
eax_bit: Option<u8>,
ebx_bit: Option<u8>,
ecx_bit: Option<u8>,
edx_bit: Option<u8>,
}
impl CpuidPatch {
fn set_cpuid_reg(
cpuid: &mut CpuId,
function: u32,
index: Option<u32>,
reg: CpuidReg,
value: u32,
) {
let entries = cpuid.mut_entries_slice();
for entry in entries.iter_mut() {
if entry.function == function && (index == None || index.unwrap() == entry.index) {
match reg {
CpuidReg::EAX => {
entry.eax = value;
}
CpuidReg::EBX => {
entry.ebx = value;
}
CpuidReg::ECX => {
entry.ecx = value;
}
CpuidReg::EDX => {
entry.edx = value;
}
}
}
}
}
fn patch_cpuid(cpuid: &mut CpuId, patches: Vec<CpuidPatch>) {
let entries = cpuid.mut_entries_slice();
for entry in entries.iter_mut() {
for patch in patches.iter() {
if entry.function == patch.function && entry.index == patch.index {
if let Some(flags_bit) = patch.flags_bit {
entry.flags |= 1 << flags_bit;
}
if let Some(eax_bit) = patch.eax_bit {
entry.eax |= 1 << eax_bit;
}
if let Some(ebx_bit) = patch.ebx_bit {
entry.ebx |= 1 << ebx_bit;
}
if let Some(ecx_bit) = patch.ecx_bit {
entry.ecx |= 1 << ecx_bit;
}
if let Some(edx_bit) = patch.edx_bit {
entry.edx |= 1 << edx_bit;
}
}
}
}
}
}
/// A wrapper around creating and using a kvm-based VCPU.
pub struct Vcpu {
fd: VcpuFd,
id: u8,
io_bus: devices::Bus,
mmio_bus: devices::Bus,
ioapic: Option<Arc<Mutex<ioapic::Ioapic>>>,
vm_ts: std::time::Instant,
}
impl Vcpu {
/// Constructs a new VCPU for `vm`.
///
/// # Arguments
///
/// * `id` - Represents the CPU number between [0, max vcpus).
/// * `vm` - The virtual machine this vcpu will get attached to.
pub fn new(
id: u8,
vm: &Vm,
io_bus: devices::Bus,
mmio_bus: devices::Bus,
ioapic: Option<Arc<Mutex<ioapic::Ioapic>>>,
) -> Result<Self> {
let kvm_vcpu = vm.fd.create_vcpu(id).map_err(Error::VcpuFd)?;
// Initially the cpuid per vCPU is the one supported by this VM.
Ok(Vcpu {
fd: kvm_vcpu,
id,
io_bus,
mmio_bus,
ioapic,
vm_ts: vm.creation_ts,
})
}
/// Configures a x86_64 specific vcpu and should be called once per vcpu from the vcpu's thread.
///
/// # Arguments
///
/// * `machine_config` - Specifies necessary info used for the CPUID configuration.
/// * `kernel_start_addr` - Offset from `guest_mem` at which the kernel starts.
/// * `vm` - The virtual machine this vcpu will get attached to.
pub fn configure(&mut self, kernel_start_addr: GuestAddress, vm: &Vm) -> Result<()> {
let mut cpuid = vm.cpuid.clone();
CpuidPatch::set_cpuid_reg(&mut cpuid, 0xb, None, CpuidReg::EDX, u32::from(self.id));
self.fd
.set_cpuid2(&cpuid)
.map_err(Error::SetSupportedCpusFailed)?;
arch::x86_64::regs::setup_msrs(&self.fd).map_err(Error::MSRSConfiguration)?;
// Safe to unwrap because this method is called after the VM is configured
let vm_memory = vm.get_memory();
arch::x86_64::regs::setup_regs(
&self.fd,
kernel_start_addr.raw_value(),
arch::x86_64::layout::BOOT_STACK_POINTER.raw_value(),
arch::x86_64::layout::ZERO_PAGE_START.raw_value(),
)
.map_err(Error::REGSConfiguration)?;
arch::x86_64::regs::setup_fpu(&self.fd).map_err(Error::FPUConfiguration)?;
arch::x86_64::regs::setup_sregs(&vm_memory.read().unwrap(), &self.fd)
.map_err(Error::SREGSConfiguration)?;
arch::x86_64::interrupts::set_lint(&self.fd).map_err(Error::LocalIntConfiguration)?;
Ok(())
}
/// Runs the VCPU until it exits, returning the reason.
///
/// Note that the state of the VCPU and associated VM must be setup first for this to do
/// anything useful.
pub fn run(&self) -> Result<()> {
match self.fd.run() {
Ok(run) => match run {
VcpuExit::IoIn(addr, data) => {
self.io_bus.read(u64::from(addr), data);
Ok(())
}
VcpuExit::IoOut(addr, data) => {
if addr == DEBUG_IOPORT && data.len() == 1 {
self.log_debug_ioport(data[0]);
}
self.io_bus.write(u64::from(addr), data);
Ok(())
}
VcpuExit::MmioRead(addr, data) => {
self.mmio_bus.read(addr as u64, data);
Ok(())
}
VcpuExit::MmioWrite(addr, data) => {
self.mmio_bus.write(addr as u64, data);
Ok(())
}
VcpuExit::IoapicEoi(vector) => {
if let Some(ioapic) = &self.ioapic {
ioapic.lock().unwrap().end_of_interrupt(vector);
}
Ok(())
}
r => {
error!("Unexpected exit reason on vcpu run: {:?}", r);
Err(Error::VcpuUnhandledKvmExit)
}
},
Err(ref e) => match e.raw_os_error().unwrap() {
libc::EAGAIN | libc::EINTR => Ok(()),
_ => {
error!("VCPU {:?} error {:?}", self.id, e);
Err(Error::VcpuUnhandledKvmExit)
}
},
}
}
// Log debug io port codes.
fn log_debug_ioport(&self, code: u8) {
let ts = self.vm_ts.elapsed();
debug!(
"[{} code 0x{:x}] {}.{:>06} seconds",
DebugIoPortRange::from_u8(code),
code,
ts.as_secs(),
ts.as_micros()
);
}
}
struct VmInfo<'a> {
memory: &'a Arc<RwLock<GuestMemoryMmap>>,
vm_fd: &'a Arc<VmFd>,
vm_cfg: &'a VmConfig<'a>,
}
struct BusInfo<'a> {
io: &'a mut devices::Bus,
mmio: &'a mut devices::Bus,
}
struct InterruptInfo<'a> {
msi_capable: bool,
ioapic: &'a Option<Arc<Mutex<ioapic::Ioapic>>>,
}
struct KernelIoapicIrq {
evt: EventFd,
}
impl KernelIoapicIrq {
fn new(evt: EventFd) -> Self {
KernelIoapicIrq { evt }
}
}
impl devices::Interrupt for KernelIoapicIrq {
fn deliver(&self) -> result::Result<(), io::Error> {
self.evt.write(1)
}
}
struct UserIoapicIrq {
ioapic: Arc<Mutex<ioapic::Ioapic>>,
irq: usize,
}
impl UserIoapicIrq {
fn new(ioapic: Arc<Mutex<ioapic::Ioapic>>, irq: usize) -> Self {
UserIoapicIrq { ioapic, irq }
}
}
pub fn get_win_size() -> (u16, u16) {
#[repr(C)]
struct WS {
rows: u16,
cols: u16,
};
let ws: WS = WS {
rows: 0u16,
cols: 0u16,
};
unsafe {
libc::ioctl(0, TIOCGWINSZ, &ws);
}
(ws.cols, ws.rows)
}
impl devices::Interrupt for UserIoapicIrq {
fn deliver(&self) -> result::Result<(), io::Error> {
self.ioapic
.lock()
.unwrap()
.service_irq(self.irq)
.map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("failed to inject IRQ #{}: {:?}", self.irq, e),
)
})
}
}
struct DeviceManager {
io_bus: devices::Bus,
mmio_bus: devices::Bus,
// Serial port on 0x3f8
serial: Option<Arc<Mutex<devices::legacy::Serial>>>,
console_input: Option<Arc<vm_virtio::ConsoleInput>>,
// i8042 device for exit
i8042: Arc<Mutex<devices::legacy::I8042Device>>,
exit_evt: EventFd,
// IOAPIC
ioapic: Option<Arc<Mutex<ioapic::Ioapic>>>,
// PCI root
pci: Arc<Mutex<PciConfigIo>>,
}
impl DeviceManager {
fn new(
vm_info: &VmInfo,
allocator: &mut SystemAllocator,
msi_capable: bool,
userspace_ioapic: bool,
mut mem_slots: u32,
) -> DeviceManagerResult<Self> {
let mut io_bus = devices::Bus::new();
let mut mmio_bus = devices::Bus::new();
let mut buses = BusInfo {
io: &mut io_bus,
mmio: &mut mmio_bus,
};
let ioapic = if userspace_ioapic {
// Create IOAPIC
Some(Arc::new(Mutex::new(ioapic::Ioapic::new(
vm_info.vm_fd.clone(),
))))
} else {
None
};
let interrupt_info = InterruptInfo {
msi_capable,
ioapic: &ioapic,
};
let serial_writer: Option<Box<dyn io::Write + Send>> = match vm_info.vm_cfg.serial.mode {
ConsoleOutputMode::File => Some(Box::new(
File::create(vm_info.vm_cfg.serial.file.unwrap())
.map_err(DeviceManagerError::SerialOutputFileOpen)?,
)),
ConsoleOutputMode::Tty => Some(Box::new(stdout())),
ConsoleOutputMode::Off | ConsoleOutputMode::Null => None,
};
let serial = if vm_info.vm_cfg.serial.mode != ConsoleOutputMode::Off {
// Serial is tied to IRQ #4
let serial_irq = 4;
let interrupt: Box<dyn devices::Interrupt> = if let Some(ioapic) = &ioapic {
Box::new(UserIoapicIrq::new(ioapic.clone(), serial_irq))
} else {
let serial_evt = EventFd::new(EFD_NONBLOCK).map_err(DeviceManagerError::EventFd)?;
vm_info
.vm_fd
.register_irqfd(serial_evt.as_raw_fd(), serial_irq as u32)
.map_err(DeviceManagerError::Irq)?;
Box::new(KernelIoapicIrq::new(serial_evt))
};
Some(Arc::new(Mutex::new(devices::legacy::Serial::new(
interrupt,
serial_writer,
))))
} else {
None
};
// Add a shutdown device (i8042)
let exit_evt = EventFd::new(EFD_NONBLOCK).map_err(DeviceManagerError::EventFd)?;
let i8042 = Arc::new(Mutex::new(devices::legacy::I8042Device::new(
exit_evt.try_clone().map_err(DeviceManagerError::EventFd)?,
)));
let pci_root = PciRoot::new(None);
let mut pci = PciConfigIo::new(pci_root);
let console_writer: Option<Box<dyn io::Write + Send>> = match vm_info.vm_cfg.console.mode {
ConsoleOutputMode::File => Some(Box::new(
File::create(vm_info.vm_cfg.console.file.unwrap())
.map_err(DeviceManagerError::ConsoleOutputFileOpen)?,
)),
ConsoleOutputMode::Tty => Some(Box::new(stdout())),
ConsoleOutputMode::Null => Some(Box::new(sink())),
ConsoleOutputMode::Off => None,
};
let (col, row) = get_win_size();
let console = if console_writer.is_some() {
let (virtio_console_device, console_input) =
vm_virtio::Console::new(console_writer, col, row)
.map_err(DeviceManagerError::CreateVirtioConsole)?;
DeviceManager::add_virtio_pci_device(
Box::new(virtio_console_device),
vm_info.memory,
allocator,
vm_info.vm_fd,
&mut pci,
&mut buses,
&interrupt_info,
)?;
Some(console_input)
} else {
None
};
DeviceManager::add_virtio_devices(
vm_info,
allocator,
&mut pci,
&mut buses,
&interrupt_info,
&mut mem_slots,
)?;
DeviceManager::add_vfio_devices(vm_info, allocator, &mut pci, &mut buses, mem_slots)?;
let pci = Arc::new(Mutex::new(pci));
Ok(DeviceManager {
io_bus,
mmio_bus,
serial,
console_input: console,
i8042,
exit_evt,
ioapic,
pci,
})
}
fn add_virtio_devices(
vm_info: &VmInfo,
allocator: &mut SystemAllocator,
pci: &mut PciConfigIo,
buses: &mut BusInfo,
interrupt_info: &InterruptInfo,
mut mem_slots: &mut u32,
) -> DeviceManagerResult<()> {
// Add virtio-blk if required
DeviceManager::add_virtio_block_devices(vm_info, allocator, pci, buses, &interrupt_info)?;
// Add virtio-net if required
DeviceManager::add_virtio_net_devices(vm_info, allocator, pci, buses, &interrupt_info)?;
// Add virtio-rng if required
DeviceManager::add_virtio_rng_devices(vm_info, allocator, pci, buses, &interrupt_info)?;
// Add virtio-fs if required
DeviceManager::add_virtio_fs_devices(
vm_info,
allocator,
pci,
buses,
&interrupt_info,
&mut mem_slots,
)?;
// Add virtio-pmem if required
DeviceManager::add_virtio_pmem_devices(
vm_info,
allocator,
pci,
buses,
&interrupt_info,
&mut mem_slots,
)?;
// Add virtio-vhost-user-net if required
DeviceManager::add_virtio_vhost_user_net_devices(
vm_info,
allocator,
pci,
buses,
&interrupt_info,
)?;
Ok(())
}
fn add_virtio_block_devices(
vm_info: &VmInfo,
allocator: &mut SystemAllocator,
pci: &mut PciConfigIo,
buses: &mut BusInfo,
interrupt_info: &InterruptInfo,
) -> DeviceManagerResult<()> {
if let Some(disk_list_cfg) = &vm_info.vm_cfg.disks {
for disk_cfg in disk_list_cfg.iter() {
// Open block device path
let raw_img: File = OpenOptions::new()
.read(true)
.write(true)
.open(disk_cfg.path)
.map_err(DeviceManagerError::Disk)?;
let image_type = qcow::detect_image_type(&raw_img)
.map_err(DeviceManagerError::DetectImageType)?;
let block = match image_type {
ImageType::Raw => {
let raw_img = vm_virtio::RawFile::new(raw_img);
let dev =
vm_virtio::Block::new(raw_img, disk_cfg.path.to_path_buf(), false)
.map_err(DeviceManagerError::CreateVirtioBlock)?;
Box::new(dev) as Box<dyn vm_virtio::VirtioDevice>
}
ImageType::Qcow2 => {
let qcow_img = QcowFile::from(raw_img)
.map_err(DeviceManagerError::QcowDeviceCreate)?;
let dev =
vm_virtio::Block::new(qcow_img, disk_cfg.path.to_path_buf(), false)
.map_err(DeviceManagerError::CreateVirtioBlock)?;
Box::new(dev) as Box<dyn vm_virtio::VirtioDevice>
}
};
DeviceManager::add_virtio_pci_device(
block,
vm_info.memory,
allocator,
vm_info.vm_fd,
pci,
buses,
&interrupt_info,
)?;
}
}
Ok(())
}
fn add_virtio_net_devices(
vm_info: &VmInfo,
allocator: &mut SystemAllocator,
pci: &mut PciConfigIo,
buses: &mut BusInfo,
interrupt_info: &InterruptInfo,
) -> DeviceManagerResult<()> {
// Add virtio-net if required
if let Some(net_list_cfg) = &vm_info.vm_cfg.net {
for net_cfg in net_list_cfg.iter() {
let virtio_net_device: vm_virtio::Net;
if let Some(tap_if_name) = net_cfg.tap {
let tap = Tap::open_named(tap_if_name).map_err(DeviceManagerError::OpenTap)?;
virtio_net_device = vm_virtio::Net::new_with_tap(tap, Some(&net_cfg.mac))
.map_err(DeviceManagerError::CreateVirtioNet)?;
} else {
virtio_net_device =
vm_virtio::Net::new(net_cfg.ip, net_cfg.mask, Some(&net_cfg.mac))
.map_err(DeviceManagerError::CreateVirtioNet)?;
}
DeviceManager::add_virtio_pci_device(
Box::new(virtio_net_device),
vm_info.memory,
allocator,
vm_info.vm_fd,
pci,
buses,
&interrupt_info,
)?;
}
}
Ok(())
}
fn add_virtio_rng_devices(
vm_info: &VmInfo,
allocator: &mut SystemAllocator,
pci: &mut PciConfigIo,
buses: &mut BusInfo,
interrupt_info: &InterruptInfo,
) -> DeviceManagerResult<()> {
// Add virtio-rng if required
if let Some(rng_path) = vm_info.vm_cfg.rng.src.to_str() {
let virtio_rng_device =
vm_virtio::Rng::new(rng_path).map_err(DeviceManagerError::CreateVirtioRng)?;
DeviceManager::add_virtio_pci_device(
Box::new(virtio_rng_device),
vm_info.memory,
allocator,
vm_info.vm_fd,
pci,
buses,
&interrupt_info,
)?;
}
Ok(())
}
fn add_virtio_fs_devices(
vm_info: &VmInfo,
allocator: &mut SystemAllocator,
pci: &mut PciConfigIo,
buses: &mut BusInfo,
interrupt_info: &InterruptInfo,
mem_slots: &mut u32,
) -> DeviceManagerResult<()> {
// Add virtio-fs if required
if let Some(fs_list_cfg) = &vm_info.vm_cfg.fs {
for fs_cfg in fs_list_cfg.iter() {
if let Some(fs_sock) = fs_cfg.sock.to_str() {
let mut cache: Option<(VirtioSharedMemoryList, u64)> = None;
if let Some(fs_cache) = fs_cfg.cache_size {
// The memory needs to be 2MiB aligned in order to support
// hugepages.
let fs_guest_addr = allocator
.allocate_mmio_addresses(
None,
fs_cache as GuestUsize,
Some(0x0020_0000),
)
.ok_or(DeviceManagerError::FsRangeAllocation)?;
let addr = unsafe {
libc::mmap(
null_mut(),
fs_cache as usize,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_NORESERVE | libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
-1,
0 as libc::off_t,
)
};
if addr == libc::MAP_FAILED {
return Err(DeviceManagerError::Mmap(io::Error::last_os_error()));
}
let mem_region = kvm_userspace_memory_region {
slot: *mem_slots as u32,
guest_phys_addr: fs_guest_addr.raw_value(),
memory_size: fs_cache,
userspace_addr: addr as u64,
flags: 0,
};
// Safe because the guest regions are guaranteed not to overlap.
let _ = unsafe { vm_info.vm_fd.set_user_memory_region(mem_region) };
// Increment the KVM slot number
*mem_slots += 1;
let mut region_list = Vec::new();
region_list.push(VirtioSharedMemory {
offset: 0,
len: fs_cache,
});
cache = Some((
VirtioSharedMemoryList {
addr: fs_guest_addr,
len: fs_cache as GuestUsize,
region_list,
},
addr as u64,
));
}
let virtio_fs_device = vm_virtio::vhost_user::Fs::new(
fs_sock,
fs_cfg.tag,
fs_cfg.num_queues,
fs_cfg.queue_size,
cache,
)
.map_err(DeviceManagerError::CreateVirtioFs)?;
DeviceManager::add_virtio_pci_device(
Box::new(virtio_fs_device),
vm_info.memory,
allocator,
vm_info.vm_fd,
pci,
buses,
&interrupt_info,
)?;
}
}
}
Ok(())
}
fn add_virtio_pmem_devices(
vm_info: &VmInfo,
allocator: &mut SystemAllocator,
pci: &mut PciConfigIo,
buses: &mut BusInfo,
interrupt_info: &InterruptInfo,
mem_slots: &mut u32,
) -> DeviceManagerResult<()> {
// Add virtio-pmem if required
if let Some(pmem_list_cfg) = &vm_info.vm_cfg.pmem {
for pmem_cfg in pmem_list_cfg.iter() {
let size = pmem_cfg.size;
// The memory needs to be 2MiB aligned in order to support
// hugepages.
let pmem_guest_addr = allocator
.allocate_mmio_addresses(None, size as GuestUsize, Some(0x0020_0000))
.ok_or(DeviceManagerError::PmemRangeAllocation)?;
let (custom_flags, set_len) = if pmem_cfg.file.is_dir() {
(O_TMPFILE, true)
} else {
(0, false)
};
let file = OpenOptions::new()
.read(true)
.write(true)
.custom_flags(custom_flags)
.open(pmem_cfg.file)
.map_err(DeviceManagerError::PmemFileOpen)?;
if set_len {
file.set_len(size)
.map_err(DeviceManagerError::PmemFileSetLen)?;
}
let addr = unsafe {
libc::mmap(
null_mut(),
size as usize,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_NORESERVE | libc::MAP_SHARED,
file.as_raw_fd(),
0 as libc::off_t,
) as *mut u8
};
let mem_region = kvm_userspace_memory_region {
slot: *mem_slots as u32,
guest_phys_addr: pmem_guest_addr.raw_value(),
memory_size: size,
userspace_addr: addr as u64,
flags: 0,
};
// Safe because the guest regions are guaranteed not to overlap.
let _ = unsafe { vm_info.vm_fd.set_user_memory_region(mem_region) };
// Increment the KVM slot number
*mem_slots += 1;
let virtio_pmem_device =
vm_virtio::Pmem::new(file, pmem_guest_addr, size as GuestUsize)
.map_err(DeviceManagerError::CreateVirtioPmem)?;
DeviceManager::add_virtio_pci_device(
Box::new(virtio_pmem_device),
vm_info.memory,
allocator,
vm_info.vm_fd,
pci,
buses,
&interrupt_info,
)?;
}
}
Ok(())
}
fn add_virtio_vhost_user_net_devices(
vm_info: &VmInfo,
allocator: &mut SystemAllocator,
pci: &mut PciConfigIo,
buses: &mut BusInfo,
interrupt_info: &InterruptInfo,
) -> DeviceManagerResult<()> {
// Add vhost-user-net if required
if let Some(vhost_user_net_list_cfg) = &vm_info.vm_cfg.vhost_user_net {
for vhost_user_net_cfg in vhost_user_net_list_cfg.iter() {
let vhost_user_net_device = vm_virtio::vhost_user::Net::new(
vhost_user_net_cfg.mac,
vhost_user_net_cfg.vu_cfg,
)
.map_err(DeviceManagerError::CreateVhostUserNet)?;
DeviceManager::add_virtio_pci_device(
Box::new(vhost_user_net_device),
vm_info.memory,
allocator,
vm_info.vm_fd,
pci,
buses,
&interrupt_info,
)?;
}
}
Ok(())
}
fn create_kvm_device(vm: &Arc<VmFd>) -> DeviceManagerResult<DeviceFd> {
let mut vfio_dev = kvm_bindings::kvm_create_device {
type_: kvm_bindings::kvm_device_type_KVM_DEV_TYPE_VFIO,
fd: 0,
flags: 0,
};
vm.create_device(&mut vfio_dev)
.map_err(DeviceManagerError::CreateKvmDevice)
}
fn add_vfio_devices(
vm_info: &VmInfo,
allocator: &mut SystemAllocator,
pci: &mut PciConfigIo,
buses: &mut BusInfo,
mem_slots: u32,
) -> DeviceManagerResult<()> {
let mut mem_slot = mem_slots;
if let Some(device_list_cfg) = &vm_info.vm_cfg.devices {
// Create the KVM VFIO device
let device_fd = DeviceManager::create_kvm_device(vm_info.vm_fd)?;
let device_fd = Arc::new(device_fd);
for device_cfg in device_list_cfg.iter() {
let vfio_device =
VfioDevice::new(device_cfg.path, device_fd.clone(), vm_info.memory.clone())
.map_err(DeviceManagerError::VfioCreate)?;
let mut vfio_pci_device = VfioPciDevice::new(vm_info.vm_fd, allocator, vfio_device)
.map_err(DeviceManagerError::VfioPciCreate)?;
let bars = vfio_pci_device
.allocate_bars(allocator)
.map_err(DeviceManagerError::AllocateBars)?;
mem_slot = vfio_pci_device
.map_mmio_regions(vm_info.vm_fd, mem_slot)
.map_err(DeviceManagerError::VfioMapRegion)?;
let vfio_pci_device = Arc::new(Mutex::new(vfio_pci_device));
pci.add_device(vfio_pci_device.clone())
.map_err(DeviceManagerError::AddPciDevice)?;
pci.register_mapping(vfio_pci_device.clone(), buses.io, buses.mmio, bars)
.map_err(DeviceManagerError::AddPciDevice)?;
}
}
Ok(())
}
fn add_virtio_pci_device(
virtio_device: Box<dyn vm_virtio::VirtioDevice>,
memory: &Arc<RwLock<GuestMemoryMmap>>,
allocator: &mut SystemAllocator,
vm_fd: &Arc<VmFd>,
pci: &mut PciConfigIo,
buses: &mut BusInfo,
interrupt_info: &InterruptInfo,
) -> DeviceManagerResult<()> {
let msix_num = if interrupt_info.msi_capable {
DEFAULT_MSIX_VEC_NUM
} else {
0
};
let mut virtio_pci_device = VirtioPciDevice::new(memory.clone(), virtio_device, msix_num)
.map_err(DeviceManagerError::VirtioDevice)?;
let bars = virtio_pci_device
.allocate_bars(allocator)
.map_err(DeviceManagerError::AllocateBars)?;
for (event, addr, _) in virtio_pci_device.ioeventfds() {
let io_addr = IoEventAddress::Mmio(addr);
vm_fd
.register_ioevent(event.as_raw_fd(), &io_addr, NoDatamatch)
.map_err(DeviceManagerError::RegisterIoevent)?;
}
if interrupt_info.msi_capable {
let vm_fd_clone = vm_fd.clone();
let msi_cb = Arc::new(Box::new(move |p: InterruptParameters| {
if let Some(entry) = p.msix {
let msi_queue = kvm_msi {
address_lo: entry.msg_addr_lo,
address_hi: entry.msg_addr_hi,
data: entry.msg_data,
flags: 0u32,
devid: 0u32,
pad: [0u8; 12],
};
return vm_fd_clone.signal_msi(msi_queue).map(|ret| {
if ret > 0 {
debug!("MSI message successfully delivered");
} else if ret == 0 {
warn!("failed to deliver MSI message, blocked by guest");
}
});
}
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"missing MSI-X entry",
))
}) as InterruptDelivery);
virtio_pci_device.assign_msix(msi_cb);
} else {
let irq_num = allocator
.allocate_irq()
.ok_or(DeviceManagerError::AllocateIrq)?;
let irq_cb = if let Some(ioapic) = interrupt_info.ioapic {
let ioapic_clone = ioapic.clone();
Box::new(move |_p: InterruptParameters| {
ioapic_clone
.lock()
.unwrap()
.service_irq(irq_num as usize)
.map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("failed to inject IRQ #{}: {:?}", irq_num, e),
)
})
}) as InterruptDelivery
} else {
let irqfd = EventFd::new(EFD_NONBLOCK).map_err(DeviceManagerError::EventFd)?;
vm_fd
.register_irqfd(irqfd.as_raw_fd(), irq_num)
.map_err(DeviceManagerError::Irq)?;
Box::new(move |_p: InterruptParameters| irqfd.write(1)) as InterruptDelivery
};
virtio_pci_device.assign_pin_irq(
Arc::new(irq_cb),
irq_num as u32,
PciInterruptPin::IntA,
);
}
let virtio_pci_device = Arc::new(Mutex::new(virtio_pci_device));
pci.add_device(virtio_pci_device.clone())
.map_err(DeviceManagerError::AddPciDevice)?;
pci.register_mapping(
virtio_pci_device.clone(),
&mut buses.io,
&mut buses.mmio,
bars,
)
.map_err(DeviceManagerError::AddPciDevice)?;
Ok(())
}
pub fn register_devices(&mut self) -> Result<()> {
if self.serial.is_some() {
// Insert serial device
self.io_bus
.insert(self.serial.as_ref().unwrap().clone(), 0x3f8, 0x8)
.map_err(Error::BusError)?;
}
// Insert i8042 device
self.io_bus
.insert(self.i8042.clone(), 0x61, 0x4)
.map_err(Error::BusError)?;
// Insert the PCI root configuration space.
self.io_bus
.insert(self.pci.clone(), 0xcf8, 0x8)
.map_err(Error::BusError)?;
if let Some(ioapic) = &self.ioapic {
// Insert IOAPIC
self.mmio_bus
.insert(ioapic.clone(), IOAPIC_RANGE_ADDR, IOAPIC_RANGE_SIZE)
.map_err(Error::BusError)?;
}
Ok(())
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum EpollDispatch {
Exit,
Stdin,
}
pub struct EpollContext {
raw_fd: RawFd,
dispatch_table: Vec<Option<EpollDispatch>>,
}
impl EpollContext {
pub fn new() -> result::Result<EpollContext, io::Error> {
let raw_fd = epoll::create(true)?;
// Initial capacity needs to be large enough to hold:
// * 1 exit event
// * 1 stdin event
let mut dispatch_table = Vec::with_capacity(3);
dispatch_table.push(None);
Ok(EpollContext {
raw_fd,
dispatch_table,
})
}
pub fn add_stdin(&mut self) -> result::Result<(), io::Error> {
let dispatch_index = self.dispatch_table.len() as u64;
epoll::ctl(
self.raw_fd,
epoll::ControlOptions::EPOLL_CTL_ADD,
libc::STDIN_FILENO,
epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index),
)?;
self.dispatch_table.push(Some(EpollDispatch::Stdin));
Ok(())
}
fn add_event<T>(&mut self, fd: &T, token: EpollDispatch) -> result::Result<(), io::Error>
where
T: AsRawFd,
{
let dispatch_index = self.dispatch_table.len() as u64;
epoll::ctl(
self.raw_fd,
epoll::ControlOptions::EPOLL_CTL_ADD,
fd.as_raw_fd(),
epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index),
)?;
self.dispatch_table.push(Some(token));
Ok(())
}
}
impl AsRawFd for EpollContext {
fn as_raw_fd(&self) -> RawFd {
self.raw_fd
}
}
pub struct Vm<'a> {
fd: Arc<VmFd>,
kernel: File,
memory: Arc<RwLock<GuestMemoryMmap>>,
vcpus: Vec<thread::JoinHandle<()>>,
devices: DeviceManager,
cpuid: CpuId,
config: VmConfig<'a>,
epoll: EpollContext,
on_tty: bool,
creation_ts: std::time::Instant,
}
impl<'a> Vm<'a> {
pub fn new(kvm: &Kvm, config: VmConfig<'a>) -> Result<Self> {
let kernel = File::open(&config.kernel.path).map_err(Error::KernelFile)?;
let fd = kvm.create_vm().map_err(Error::VmCreate)?;
let fd = Arc::new(fd);
let creation_ts = std::time::Instant::now();
// Init guest memory
let arch_mem_regions = arch::arch_memory_regions(config.memory.size);
let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions
.iter()
.filter(|r| r.2 == RegionType::Ram)
.map(|r| (r.0, r.1))
.collect();
let sub_regions: Vec<(GuestAddress, usize)> = arch_mem_regions
.iter()
.filter(|r| r.2 == RegionType::SubRegion)
.map(|r| (r.0, r.1))
.collect();
// Check the number of reserved regions, and only take the first one
// that's acrtually a 32-bit hole.
let mut mem_hole = (GuestAddress(0), 0);
for region in sub_regions.iter() {
if region.0.unchecked_add(region.1 as u64).raw_value() <= 0x1_0000_0000 {
mem_hole = (region.0, region.1);
break;
}
}
let guest_memory = match config.memory.file {
Some(file) => {
let mut mem_regions = Vec::<(GuestAddress, usize, Option<FileOffset>)>::new();
for region in ram_regions.iter() {
if file.is_file() {
let file = OpenOptions::new()
.read(true)
.write(true)
.open(file)
.map_err(Error::SharedFileCreate)?;
file.set_len(region.1 as u64)
.map_err(Error::SharedFileSetLen)?;
mem_regions.push((region.0, region.1, Some(FileOffset::new(file, 0))));
} else if file.is_dir() {
let fs_str = format!("{}{}", file.display(), "/tmpfile_XXXXXX");
let fs = std::ffi::CString::new(fs_str).unwrap();
let mut path = fs.as_bytes_with_nul().to_owned();
let path_ptr = path.as_mut_ptr() as *mut _;
let fd = unsafe { libc::mkstemp(path_ptr) };
unsafe { libc::unlink(path_ptr) };
let f = unsafe { File::from_raw_fd(fd) };
f.set_len(region.1 as u64)
.map_err(Error::SharedFileSetLen)?;
mem_regions.push((region.0, region.1, Some(FileOffset::new(f, 0))));
}
}
GuestMemoryMmap::with_files(&mem_regions).map_err(Error::GuestMemory)?
}
None => GuestMemoryMmap::new(&ram_regions).map_err(Error::GuestMemory)?,
};
guest_memory
.with_regions(|index, region| {
let mem_region = kvm_userspace_memory_region {
slot: index as u32,
guest_phys_addr: region.start_addr().raw_value(),
memory_size: region.len() as u64,
userspace_addr: region.as_ptr() as u64,
flags: 0,
};
// Safe because the guest regions are guaranteed not to overlap.
unsafe { fd.set_user_memory_region(mem_region) }
})
.map_err(|_| Error::GuestMemory(MmapError::NoMemoryRegion))?;
// Set TSS
fd.set_tss_address(arch::x86_64::layout::KVM_TSS_ADDRESS.raw_value() as usize)
.map_err(Error::VmSetup)?;
// Supported CPUID
let mut cpuid = kvm
.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES)
.map_err(Error::VmSetup)?;
let msi_capable = kvm.check_extension(Cap::SignalMsi);
let mut cpuid_patches = Vec::new();
let mut userspace_ioapic = false;
if kvm.check_extension(Cap::TscDeadlineTimer) {
if kvm.check_extension(Cap::SplitIrqchip) && msi_capable {
// Create split irqchip
// Only the local APIC is emulated in kernel, both PICs and IOAPIC
// are not.
let mut cap: kvm_enable_cap = Default::default();
cap.cap = KVM_CAP_SPLIT_IRQCHIP;
cap.args[0] = ioapic::NUM_IOAPIC_PINS as u64;
fd.enable_cap(&cap).map_err(Error::VmSetup)?;
// Because of the split irqchip, we need a userspace IOAPIC.
userspace_ioapic = true;
} else {
// Create irqchip
// A local APIC, 2 PICs and an IOAPIC are emulated in kernel.
fd.create_irq_chip().map_err(Error::VmSetup)?;
}
// Patch tsc deadline timer bit
cpuid_patches.push(CpuidPatch {
function: 1,
index: 0,
flags_bit: None,
eax_bit: None,
ebx_bit: None,
ecx_bit: Some(TSC_DEADLINE_TIMER_ECX_BIT),
edx_bit: None,
});
} else {
// Create irqchip
// A local APIC, 2 PICs and an IOAPIC are emulated in kernel.
fd.create_irq_chip().map_err(Error::VmSetup)?;
// Creates an in-kernel device model for the PIT.
let mut pit_config = kvm_pit_config::default();
// We need to enable the emulation of a dummy speaker port stub so that writing to port 0x61
// (i.e. KVM_SPEAKER_BASE_ADDRESS) does not trigger an exit to user space.
pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
fd.create_pit2(pit_config).map_err(Error::VmSetup)?;
}
// Patch hypervisor bit
cpuid_patches.push(CpuidPatch {
function: 1,
index: 0,
flags_bit: None,
eax_bit: None,
ebx_bit: None,
ecx_bit: Some(HYPERVISOR_ECX_BIT),
edx_bit: None,
});
CpuidPatch::patch_cpuid(&mut cpuid, cpuid_patches);
let ioapic = GsiApic::new(
X86_64_IRQ_BASE,
ioapic::NUM_IOAPIC_PINS as u32 - X86_64_IRQ_BASE,
);
// Let's allocate 64 GiB of addressable MMIO space, starting at 0.
let mut allocator = SystemAllocator::new(
GuestAddress(0),
1 << 16 as GuestUsize,
GuestAddress(0),
1 << 36 as GuestUsize,
mem_hole.0,
mem_hole.1 as GuestUsize,
vec![ioapic],
)
.ok_or(Error::CreateSystemAllocator)?;
// Allocate RAM and Reserved address ranges.
for region in arch_mem_regions.iter() {
allocator
.allocate_mmio_addresses(Some(region.0), region.1 as GuestUsize, None)
.ok_or(Error::MemoryRangeAllocation)?;
}
// Convert the guest memory into an Arc. The point being able to use it
// anywhere in the code, no matter which thread might use it.
// Add the RwLock aspect to guest memory as we might want to perform
// additions to the memory during runtime.
let guest_memory = Arc::new(RwLock::new(guest_memory));
let vm_info = VmInfo {
memory: &guest_memory,
vm_fd: &fd,
vm_cfg: &config,
};
let device_manager = DeviceManager::new(
&vm_info,
&mut allocator,
msi_capable,
userspace_ioapic,
ram_regions.len() as u32,
)
.map_err(Error::DeviceManager)?;
// Let's add our STDIN fd.
let mut epoll = EpollContext::new().map_err(Error::EpollError)?;
let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0;
if on_tty {
epoll.add_stdin().map_err(Error::EpollError)?;
}
// Let's add an exit event.
epoll
.add_event(&device_manager.exit_evt, EpollDispatch::Exit)
.map_err(Error::EpollError)?;
let vcpus = Vec::with_capacity(u8::from(&config.cpus) as usize);
Ok(Vm {
fd,
kernel,
memory: guest_memory,
vcpus,
devices: device_manager,
cpuid,
config,
epoll,
on_tty,
creation_ts,
})
}
pub fn load_kernel(&mut self) -> Result<GuestAddress> {
let cmdline_cstring =
CString::new(self.config.cmdline.args.clone()).map_err(|_| Error::CmdLine)?;
let mem = self.memory.read().unwrap();
let entry_addr = match linux_loader::loader::Elf::load(
mem.deref(),
None,
&mut self.kernel,
Some(arch::HIMEM_START),
) {
Ok(entry_addr) => entry_addr,
Err(linux_loader::loader::Error::InvalidElfMagicNumber) => {
linux_loader::loader::BzImage::load(
mem.deref(),
None,
&mut self.kernel,
Some(arch::HIMEM_START),
)
.map_err(Error::KernelLoad)?
}
_ => panic!("Invalid elf file"),
};
linux_loader::loader::load_cmdline(
mem.deref(),
self.config.cmdline.offset,
&cmdline_cstring,
)
.map_err(|_| Error::CmdLine)?;
let vcpu_count = u8::from(&self.config.cpus);
match entry_addr.setup_header {
Some(hdr) => {
arch::configure_system(
&mem,
self.config.cmdline.offset,
cmdline_cstring.to_bytes().len() + 1,
vcpu_count,
Some(hdr),
)
.map_err(|_| Error::CmdLine)?;
let load_addr = entry_addr
.kernel_load
.raw_value()
.checked_add(KERNEL_64BIT_ENTRY_OFFSET)
.ok_or(Error::MemOverflow)?;
Ok(GuestAddress(load_addr))
}
None => {
arch::configure_system(
&mem,
self.config.cmdline.offset,
cmdline_cstring.to_bytes().len() + 1,
vcpu_count,
None,
)
.map_err(|_| Error::CmdLine)?;
Ok(entry_addr.kernel_load)
}
}
}
pub fn control_loop(&mut self) -> Result<()> {
// Let's start the STDIN polling thread.
const EPOLL_EVENTS_LEN: usize = 100;
let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN];
let epoll_fd = self.epoll.as_raw_fd();
if (self.devices.serial.is_some() || self.devices.console_input.is_some()) && self.on_tty {
io::stdin()
.lock()
.set_raw_mode()
.map_err(Error::SetTerminalRaw)?;
}
'outer: loop {
let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) {
Ok(res) => res,
Err(e) => {
if e.kind() == io::ErrorKind::Interrupted {
// It's well defined from the epoll_wait() syscall
// documentation that the epoll loop can be interrupted
// before any of the requested events occurred or the
// timeout expired. In both those cases, epoll_wait()
// returns an error of type EINTR, but this should not
// be considered as a regular error. Instead it is more
// appropriate to retry, by calling into epoll_wait().
continue;
}
return Err(Error::EpollError(e));
}
};
for event in events.iter().take(num_events) {
let dispatch_idx = event.data as usize;
if let Some(dispatch_type) = self.epoll.dispatch_table[dispatch_idx] {
match dispatch_type {
EpollDispatch::Exit => {
// Consume the event.
self.devices.exit_evt.read().map_err(Error::EventFd)?;
break 'outer;
}
EpollDispatch::Stdin => {
let mut out = [0u8; 64];
let count = io::stdin()
.lock()
.read_raw(&mut out)
.map_err(Error::Serial)?;
if self.devices.serial.is_some()
&& self.config.serial.mode.input_enabled()
{
self.devices
.serial
.as_ref()
.unwrap()
.lock()
.expect("Failed to process stdin event due to poisoned lock")
.queue_input_bytes(&out[..count])
.map_err(Error::Serial)?;
}
if self.devices.console_input.is_some()
&& self.config.console.mode.input_enabled()
{
self.devices
.console_input
.as_ref()
.unwrap()
.queue_input_bytes(&out[..count]);
}
}
}
}
}
}
if self.on_tty {
// Don't forget to set the terminal in canonical mode
// before to exit.
io::stdin()
.lock()
.set_canon_mode()
.map_err(Error::SetTerminalCanon)?;
}
Ok(())
}
fn os_signal_handler(signals: Signals, console_input_clone: Arc<vm_virtio::ConsoleInput>) {
for signal in signals.forever() {
if signal == SIGWINCH {
let (col, row) = get_win_size();
console_input_clone.update_console_size(col, row);
}
}
}
pub fn start(&mut self, entry_addr: GuestAddress) -> Result<()> {
self.devices.register_devices()?;
let vcpu_count = u8::from(&self.config.cpus);
// let vcpus: Vec<thread::JoinHandle<()>> = Vec::with_capacity(vcpu_count as usize);
let vcpu_thread_barrier = Arc::new(Barrier::new((vcpu_count + 1) as usize));
for cpu_id in 0..vcpu_count {
let io_bus = self.devices.io_bus.clone();
let mmio_bus = self.devices.mmio_bus.clone();
let ioapic = if let Some(ioapic) = &self.devices.ioapic {
Some(ioapic.clone())
} else {
None
};
let mut vcpu = Vcpu::new(cpu_id, &self, io_bus, mmio_bus, ioapic)?;
vcpu.configure(entry_addr, &self)?;
let vcpu_thread_barrier = vcpu_thread_barrier.clone();
self.vcpus.push(
thread::Builder::new()
.name(format!("cloud-hypervisor_vcpu{}", vcpu.id))
.spawn(move || {
unsafe {
extern "C" fn handle_signal(_: i32, _: *mut siginfo_t, _: *mut c_void) {
}
// This uses an async signal safe handler to kill the vcpu handles.
register_signal_handler(
VCPU_RTSIG_OFFSET,
vmm_sys_util::signal::SignalHandler::Siginfo(handle_signal),
true,
0,
)
.expect("Failed to register vcpu signal handler");
}
// Block until all CPUs are ready.
vcpu_thread_barrier.wait();
while vcpu.run().is_ok() {}
})
.map_err(Error::VcpuSpawn)?,
);
}
// Unblock all CPU threads.
vcpu_thread_barrier.wait();
if let Some(console_input) = &self.devices.console_input {
let console_input_clone = console_input.clone();
let signals = Signals::new(&[SIGWINCH]);
match signals {
Ok(sig) => {
thread::spawn(move || Vm::os_signal_handler(sig, console_input_clone));
}
Err(e) => error!("Signal not found {}", e),
}
}
self.control_loop()?;
Ok(())
}
/// Gets an Arc to the guest memory owned by this VM.
pub fn get_memory(&self) -> Arc<RwLock<GuestMemoryMmap>> {
self.memory.clone()
}
}
#[allow(unused)]
pub fn test_vm() {
// This example based on https://lwn.net/Articles/658511/
let code = [
0xba, 0xf8, 0x03, /* mov $0x3f8, %dx */
0x00, 0xd8, /* add %bl, %al */
0x04, b'0', /* add $'0', %al */
0xee, /* out %al, (%dx) */
0xb0, b'\n', /* mov $'\n', %al */
0xee, /* out %al, (%dx) */
0xf4, /* hlt */
];
let mem_size = 0x1000;
let load_addr = GuestAddress(0x1000);
let mem = GuestMemoryMmap::new(&[(load_addr, mem_size)]).unwrap();
let kvm = Kvm::new().expect("new KVM instance creation failed");
let vm_fd = kvm.create_vm().expect("new VM fd creation failed");
mem.with_regions(|index, region| {
let mem_region = kvm_userspace_memory_region {
slot: index as u32,
guest_phys_addr: region.start_addr().raw_value(),
memory_size: region.len() as u64,
userspace_addr: region.as_ptr() as u64,
flags: 0,
};
// Safe because the guest regions are guaranteed not to overlap.
unsafe { vm_fd.set_user_memory_region(mem_region) }
})
.expect("Cannot configure guest memory");
mem.write_slice(&code, load_addr)
.expect("Writing code to memory failed");
let vcpu_fd = vm_fd.create_vcpu(0).expect("new VcpuFd failed");
let mut vcpu_sregs = vcpu_fd.get_sregs().expect("get sregs failed");
vcpu_sregs.cs.base = 0;
vcpu_sregs.cs.selector = 0;
vcpu_fd.set_sregs(&vcpu_sregs).expect("set sregs failed");
let mut vcpu_regs = vcpu_fd.get_regs().expect("get regs failed");
vcpu_regs.rip = 0x1000;
vcpu_regs.rax = 2;
vcpu_regs.rbx = 3;
vcpu_regs.rflags = 2;
vcpu_fd.set_regs(&vcpu_regs).expect("set regs failed");
loop {
match vcpu_fd.run().expect("run failed") {
VcpuExit::IoIn(addr, data) => {
println!(
"IO in -- addr: {:#x} data [{:?}]",
addr,
str::from_utf8(&data).unwrap()
);
}
VcpuExit::IoOut(addr, data) => {
println!(
"IO out -- addr: {:#x} data [{:?}]",
addr,
str::from_utf8(&data).unwrap()
);
}
VcpuExit::MmioRead(_addr, _data) => {}
VcpuExit::MmioWrite(_addr, _data) => {}
VcpuExit::Unknown => {}
VcpuExit::Exception => {}
VcpuExit::Hypercall => {}
VcpuExit::Debug => {}
VcpuExit::Hlt => {
println!("HLT");
}
VcpuExit::IrqWindowOpen => {}
VcpuExit::Shutdown => {}
VcpuExit::FailEntry => {}
VcpuExit::Intr => {}
VcpuExit::SetTpr => {}
VcpuExit::TprAccess => {}
VcpuExit::S390Sieic => {}
VcpuExit::S390Reset => {}
VcpuExit::Dcr => {}
VcpuExit::Nmi => {}
VcpuExit::InternalError => {}
VcpuExit::Osi => {}
VcpuExit::PaprHcall => {}
VcpuExit::S390Ucontrol => {}
VcpuExit::Watchdog => {}
VcpuExit::S390Tsch => {}
VcpuExit::Epr => {}
VcpuExit::SystemEvent => {}
VcpuExit::S390Stsi => {}
VcpuExit::IoapicEoi(_vector) => {}
VcpuExit::Hyperv => {}
}
// r => panic!("unexpected exit reason: {:?}", r),
}
}
| 33.53878 | 104 | 0.514569 |
62c549c9612f14ed889498b054dbc7b59c384b78 | 303 | fn next_birthday(name: &str, current_age: u8) {
let next_age = current_age + 1;
println!("Hi {}, on your next birthday, you'll be {}!", name, next_age);
}
fn square(num: i32) -> i32 {
num * num
}
fn main() {
next_birthday("Attila", 25);
println!("The answer is {}", square(3));
}
| 20.2 | 76 | 0.590759 |
eb0c899ca8d68dd4c17cfd553109d474d6572f6c | 301 | #[macro_export]
macro_rules! my_vec {
( $( $x:expr );* ) => {
{
let mut temp_vec = Vec::new();
$(
temp_vec.push($x);
)*
temp_vec
}
}
}
fn main() {
let v = my_vec![1; 2; 3];
println!("{:?}", v);
}
| 16.722222 | 42 | 0.33887 |
b932d46e9a9ba780eb3315606109b18db5c8bd87 | 11,341 | pub mod broker;
mod generated;
mod inv;
pub use crate::generated::ctliface::*;
use actix_rt::time::delay_for;
use futures::stream::StreamExt;
use futures::TryStreamExt;
pub use inv::{Invocation, InvocationResponse};
use inv::WasccEntity;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::time::Duration;
use wascap::prelude::KeyPair;
type Result<T> = ::std::result::Result<T, Box<dyn ::std::error::Error + Send + Sync>>;
pub struct Client {
nc: nats::asynk::Connection,
nsprefix: Option<String>,
timeout: Duration,
key: KeyPair,
}
impl Client {
pub fn new(nc: nats::asynk::Connection, nsprefix: Option<String>, timeout: Duration) -> Self {
Client {
nc,
nsprefix,
timeout,
key: KeyPair::new_server(),
}
}
pub async fn get_hosts(&self, timeout: Duration) -> Result<Vec<Host>> {
let subject = broker::queries::hosts(&self.nsprefix);
self.nc
.request_multi(&subject, vec![])
.await?
.map(|m| deserialize::<Host>(&m.data))
.take_until(delay_for(timeout))
.try_collect()
.await
}
pub async fn perform_actor_auction(
&self,
actor_ref: &str,
constraints: HashMap<String, String>,
timeout: Duration,
) -> Result<Vec<ActorAuctionAck>> {
let subject = broker::actor_auction_subject(&self.nsprefix);
let bytes = serialize(ActorAuctionRequest {
actor_ref: actor_ref.to_string(),
constraints,
})?;
self.nc
.request_multi(&subject, bytes)
.await?
.map(|m| deserialize::<ActorAuctionAck>(&m.data))
.take_until(delay_for(timeout))
.try_collect()
.await
}
pub async fn perform_provider_auction(
&self,
provider_ref: &str,
link_name: &str,
constraints: HashMap<String, String>,
timeout: Duration,
) -> Result<Vec<ProviderAuctionAck>> {
let subject = broker::provider_auction_subject(&self.nsprefix);
let bytes = serialize(ProviderAuctionRequest {
provider_ref: provider_ref.to_string(),
link_name: link_name.to_string(),
constraints,
})?;
self.nc
.request_multi(&subject, bytes)
.await?
.map(|m| deserialize::<ProviderAuctionAck>(&m.data))
.take_until(delay_for(timeout))
.try_collect()
.await
}
pub async fn get_host_inventory(&self, host_id: &str) -> Result<HostInventory> {
let subject = broker::queries::host_inventory(&self.nsprefix, host_id);
match actix_rt::time::timeout(self.timeout, self.nc.request(&subject, vec![])).await? {
Ok(msg) => {
let hi: HostInventory = deserialize(&msg.data)?;
Ok(hi)
}
Err(e) => Err(format!("Did not receive host inventory from target host: {}", e).into()),
}
}
pub async fn start_actor(&self, host_id: &str, actor_ref: &str) -> Result<StartActorAck> {
let subject = broker::commands::start_actor(&self.nsprefix, host_id);
let bytes = serialize(StartActorCommand {
actor_ref: actor_ref.to_string(),
host_id: host_id.to_string(),
})?;
match actix_rt::time::timeout(self.timeout, self.nc.request(&subject, &bytes)).await? {
Ok(msg) => {
let ack: StartActorAck = deserialize(&msg.data)?;
Ok(ack)
}
Err(e) => Err(format!("Did not receive start actor acknowledgement: {}", e).into()),
}
}
/// Performs a remote procedure call over the lattice, targeting the given actor. This call will appear
/// to originate from the "system" actor and from a unique host ID that was generated by the control
/// interface client when it was instantiated. If there are multiple actors with the same public key
/// actively running in the lattice, then the message broker is responsible for choosing the appropriate
/// target. Under current NATS implementations, that means an actor is chosen psuedo-randomly among the
/// known queue subscribers, and will **not** be invoked in round-robin fashion
pub async fn call_actor(
&self,
target_id: &str,
operation: &str,
data: &[u8],
) -> Result<InvocationResponse> {
let subject = broker::rpc::call_actor(&self.nsprefix, target_id);
let bytes = crate::generated::ctliface::serialize(Invocation::new(
&self.key,
WasccEntity::Actor("system".to_string()),
WasccEntity::Actor(target_id.to_string()),
operation,
data.to_vec(),
))?;
match actix_rt::time::timeout(self.timeout, self.nc.request(&subject, &bytes)).await? {
Ok(msg) => {
let resp: InvocationResponse = crate::generated::ctliface::deserialize(&msg.data)?;
Ok(resp)
}
Err(e) => Err(format!("Actor RPC call did not succeed: {}", e).into()),
}
}
/// Publishes the link advertisement message to the lattice that is published when code invokes the `set_link`
/// function on a `Host` struct instance. No confirmation or acknowledgement is available for this operation
/// because it is publish-only.
pub async fn advertise_link(
&self,
actor_id: &str,
provider_id: &str,
contract_id: &str,
link_name: &str,
values: HashMap<String, String>,
) -> Result<()> {
let subject = broker::rpc::advertise_links(&self.nsprefix);
let ld = LinkDefinition {
actor_id: actor_id.to_string(),
provider_id: provider_id.to_string(),
contract_id: contract_id.to_string(),
link_name: link_name.to_string(),
values,
};
let bytes = crate::generated::ctliface::serialize(&ld)?;
self.nc.publish(&subject, &bytes).await?;
Ok(())
}
/// Issue a command to a host instructing that it replace an existing actor (indicated by its
/// public key) with a new actor indicated by an OCI image reference. The host will acknowledge
/// this request as soon as it verifies that the target actor is running. This acknowledgement
/// occurs **before** the new bytes are downloaded. Live-updating an actor can take a long
/// time and control clients cannot block waiting for a reply that could come several seconds
/// later. If you need to verify that the actor has been updated, you will want to set up a
/// listener for the appropriate **ControlEvent** which will be published on the control events
/// channel in JSON
pub async fn update_actor(
&self,
host_id: &str,
existing_actor_id: &str,
new_actor_ref: &str,
) -> Result<UpdateActorAck> {
let subject = broker::commands::update_actor(&self.nsprefix, host_id);
let bytes = serialize(UpdateActorCommand {
host_id: host_id.to_string(),
actor_id: existing_actor_id.to_string(),
new_actor_ref: new_actor_ref.to_string(),
})?;
match actix_rt::time::timeout(self.timeout, self.nc.request(&subject, &bytes)).await? {
Ok(msg) => {
let ack: UpdateActorAck = deserialize(&msg.data)?;
Ok(ack)
}
Err(e) => Err(format!("Did not receive update actor acknowledgement: {}", e).into()),
}
}
pub async fn start_provider(
&self,
host_id: &str,
provider_ref: &str,
link_name: Option<String>,
) -> Result<StartProviderAck> {
let subject = broker::commands::start_provider(&self.nsprefix, host_id);
let bytes = serialize(StartProviderCommand {
host_id: host_id.to_string(),
provider_ref: provider_ref.to_string(),
link_name: link_name.unwrap_or("default".to_string()),
})?;
match actix_rt::time::timeout(self.timeout, self.nc.request(&subject, &bytes)).await? {
Ok(msg) => {
let ack: StartProviderAck = deserialize(&msg.data)?;
Ok(ack)
}
Err(e) => Err(format!("Did not receive start provider acknowledgement: {}", e).into()),
}
}
pub async fn stop_provider(
&self,
host_id: &str,
provider_ref: &str,
link_name: &str,
contract_id: &str,
) -> Result<StopProviderAck> {
let subject = broker::commands::stop_provider(&self.nsprefix, host_id);
let bytes = serialize(StopProviderCommand {
host_id: host_id.to_string(),
provider_ref: provider_ref.to_string(),
link_name: link_name.to_string(),
contract_id: contract_id.to_string(),
})?;
match actix_rt::time::timeout(self.timeout, self.nc.request(&subject, &bytes)).await? {
Ok(msg) => {
let ack: StopProviderAck = deserialize(&msg.data)?;
Ok(ack)
}
Err(e) => Err(format!("Did not receive stop provider acknowledgement: {}", e).into()),
}
}
pub async fn stop_actor(&self, host_id: &str, actor_ref: &str) -> Result<StopActorAck> {
let subject = broker::commands::stop_actor(&self.nsprefix, host_id);
let bytes = serialize(StopActorCommand {
host_id: host_id.to_string(),
actor_ref: actor_ref.to_string(),
})?;
match actix_rt::time::timeout(self.timeout, self.nc.request(&subject, &bytes)).await? {
Ok(msg) => {
let ack: StopActorAck = deserialize(&msg.data)?;
Ok(ack)
}
Err(e) => Err(format!("Did not receive stop actor acknowledgement: {}", e).into()),
}
}
pub async fn get_claims(&self) -> Result<ClaimsList> {
let subject = broker::queries::claims(&self.nsprefix);
match actix_rt::time::timeout(self.timeout, self.nc.request(&subject, vec![])).await? {
Ok(msg) => {
let list: ClaimsList = deserialize(&msg.data)?;
Ok(list)
}
Err(e) => Err(format!("Did not receive claims from lattice: {}", e).into()),
}
}
}
/// The standard function for serializing codec structs into a format that can be
/// used for message exchange between actor and host. Use of any other function to
/// serialize could result in breaking incompatibilities.
pub fn serialize<T>(
item: T,
) -> ::std::result::Result<Vec<u8>, Box<dyn std::error::Error + Send + Sync>>
where
T: Serialize,
{
serde_json::to_vec(&item).map_err(|_e| "JSON serialization failure".into())
}
/// The standard function for de-serializing codec structs from a format suitable
/// for message exchange between actor and host. Use of any other function to
/// deserialize could result in breaking incompatibilities.
pub fn deserialize<'de, T: Deserialize<'de>>(
buf: &'de [u8],
) -> ::std::result::Result<T, Box<dyn std::error::Error + Send + Sync>> {
serde_json::from_slice(buf).map_err(|_e| "JSON deserialization failure".into())
}
| 39.242215 | 114 | 0.597214 |
18e30c9664e1ef2d112e449bb41a46493e975c19 | 492 | use crate::host_exit_code::HostExitCode;
use std::io;
quick_error! {
/// An error struct encompassing all possible errors of this crate.
#[derive(Debug)]
pub enum Error {
DlOpen(err: dlopen::Error) {
from()
display("dlopen error: {}", err)
source(err)
}
IO(err: io::Error) {
from()
display("io error: {}", err)
source(err)
}
Hostfxr(error_code: HostExitCode)
}
}
| 23.428571 | 71 | 0.51626 |
7112566eb39e6aff3c64fa7d61e66c18748fc375 | 242 |
#[allow(unused_imports)]
#[macro_use] extern crate log;
#[allow(unused_imports)]
extern crate pretty_env_logger;
extern crate rg_lib ;
#[macro_use] extern crate rg_core ;
#[macro_use] extern crate shells ;
pub mod unix ;
pub mod service ;
| 20.166667 | 35 | 0.752066 |
1435354313b9291e7be3f724f8764fab0d214830 | 5,853 | //! Common primitives for the Ethereum network interaction.
// Built-in deps
use std::{convert::TryFrom, fmt, str::FromStr};
// External uses
use ethabi::{decode, ParamType};
use serde::{Deserialize, Serialize};
// Local uses
use crate::{Action, Operation};
use zksync_basic_types::{Log, H256, U256};
/// Numerical identifier of the Ethereum operation.
pub type EthOpId = i64;
/// Type of the transactions sent to the Ethereum network.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum OperationType {
/// Commit action (`commitBlock` method of the smart contract).
Commit,
/// Verify action (`verifyBlock` method of the smart contract).
Verify,
/// Withdraw action (`completeWithdrawals` method of the smart contract).
Withdraw,
}
impl fmt::Display for OperationType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Commit => write!(f, "commit"),
Self::Verify => write!(f, "verify"),
Self::Withdraw => write!(f, "withdraw"),
}
}
}
impl FromStr for OperationType {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let op = match s {
"commit" => Self::Commit,
"verify" => Self::Verify,
"withdraw" => Self::Withdraw,
_ => anyhow::bail!("Unknown type of operation: {}", s),
};
Ok(op)
}
}
/// Stored Ethereum operation.
#[derive(Debug, Clone)]
pub struct ETHOperation {
// Numeric ID of the operation.
pub id: i64,
/// Type of the operation.
pub op_type: OperationType,
/// Optional ZKSync operation associated with Ethereum operation.
pub op: Option<Operation>,
/// Used nonce (fixed for all the sent transactions).
pub nonce: U256,
/// Deadline block of the last sent transaction.
pub last_deadline_block: u64,
/// Gas price used in the last sent transaction.
pub last_used_gas_price: U256,
/// Hashes of all the sent transactions.
pub used_tx_hashes: Vec<H256>,
/// Tx payload (not signed).
pub encoded_tx_data: Vec<u8>,
/// Flag showing if the operation was completed and
/// confirmed on the Ethereum blockchain.
pub confirmed: bool,
/// Hash of the accepted Ethereum transaction (if operation
/// is confirmed).
pub final_hash: Option<H256>,
}
impl ETHOperation {
/// Checks whether the transaction is considered "stuck".
/// "Stuck" transactions are ones that were not included into any block
/// within a desirable amount of time, and thus require re-sending with
/// increased gas amount.
pub fn is_stuck(&self, current_block: u64) -> bool {
current_block >= self.last_deadline_block
}
/// Checks whether this object relates to the `Verify` zkSync operation.
pub fn is_verify(&self) -> bool {
if let Some(op) = &self.op {
matches!(op.action, Action::Verify { .. })
&& matches!(self.op_type, OperationType::Verify)
} else {
false
}
}
/// Completes the object state with the data obtained from the database.
pub fn complete(&mut self, inserted_data: InsertedOperationResponse) {
self.id = inserted_data.id;
self.nonce = inserted_data.nonce;
}
}
impl PartialEq for ETHOperation {
fn eq(&self, other: &Self) -> bool {
// We assume that there will be no two different `ETHOperation`s with
// the same identifiers.
// However, the volatile fields (e.g. `used_tx_hashes` and `confirmed`) may vary
// for the same operation in different states, so we compare them as well.
(self.id == other.id)
&& (self.last_deadline_block == other.last_deadline_block)
&& (self.last_used_gas_price == other.last_used_gas_price)
&& (self.used_tx_hashes == other.used_tx_hashes)
&& (self.confirmed == other.confirmed)
&& (self.final_hash == other.final_hash)
}
}
/// Structure representing the result of the insertion of the Ethereum
/// operation into the database.
/// Contains the assigned nonce and ID for the operation.
pub struct InsertedOperationResponse {
/// Unique numeric identifier of the Ethereum operation.
pub id: i64,
/// Nonce assigned for the Ethereum operation. Meant to be used for all the
/// transactions sent within one particular Ethereum operation.
pub nonce: U256,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CompleteWithdrawalsTx {
pub tx_hash: H256,
pub pending_withdrawals_queue_start_index: u32,
pub pending_withdrawals_queue_end_index: u32,
}
impl TryFrom<Log> for CompleteWithdrawalsTx {
type Error = anyhow::Error;
fn try_from(event: Log) -> Result<CompleteWithdrawalsTx, anyhow::Error> {
let mut decoded_event = decode(
&[
ParamType::Uint(32), // queueStartIndex
ParamType::Uint(32), // queueEndIndex
],
&event.data.0,
)
.map_err(|e| anyhow::format_err!("Event data decode: {:?}", e))?;
Ok(CompleteWithdrawalsTx {
tx_hash: event
.transaction_hash
.expect("complete withdrawals transaction should have hash"),
pending_withdrawals_queue_start_index: decoded_event
.remove(0)
.to_uint()
.as_ref()
.map(U256::as_u32)
.expect("pending_withdrawals_queue_start_index value conversion failed"),
pending_withdrawals_queue_end_index: decoded_event
.remove(0)
.to_uint()
.as_ref()
.map(U256::as_u32)
.expect("pending_withdrawals_queue_end_index value conversion failed"),
})
}
}
| 34.839286 | 89 | 0.627712 |
22c0befeb01bbf180b8908a919810d5a69aa5b5a | 1,333 | use glam::DAffine2;
use glam::DVec2;
use kurbo::Point;
use crate::intersection::intersect_quad_bez_path;
use crate::LayerId;
use super::style;
use super::LayerData;
use serde::{Deserialize, Serialize};
use std::fmt::Write;
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct Rect {}
impl Rect {
pub fn new() -> Rect {
Rect {}
}
}
impl LayerData for Rect {
fn to_kurbo_path(&self, transform: glam::DAffine2, _style: style::PathStyle) -> kurbo::BezPath {
fn new_point(a: DVec2) -> Point {
Point::new(a.x, a.y)
}
let mut path = kurbo::BezPath::new();
path.move_to(new_point(transform.translation));
// TODO: Use into_iter when new impls get added in rust 2021
[(1., 0.), (1., 1.), (0., 1.)].iter().for_each(|v| path.line_to(new_point(transform.transform_point2((*v).into()))));
path.close_path();
path
}
fn render(&mut self, svg: &mut String, transform: glam::DAffine2, style: style::PathStyle) {
let _ = write!(svg, r#"<path d="{}" {} />"#, self.to_kurbo_path(transform, style).to_svg(), style.render());
}
fn intersects_quad(&self, quad: [DVec2; 4], path: &mut Vec<LayerId>, intersections: &mut Vec<Vec<LayerId>>, style: style::PathStyle) {
if intersect_quad_bez_path(quad, &self.to_kurbo_path(DAffine2::IDENTITY, style), true) {
intersections.push(path.clone());
}
}
}
| 28.978261 | 135 | 0.676669 |
e43793312321b0e3c85d0baf768b1023118805c1 | 1,431 | //! Error types
use num_derive::FromPrimitive;
use thiserror::Error;
use solana_program::{decode_error::DecodeError, program_error::ProgramError};
#[derive(Clone, Debug, Eq, Error, FromPrimitive, PartialEq)]
pub enum FaucetError {
/// Invalid instruction
#[error("Invalid Instruction")]
InvalidInstruction,
/// Incorrect initialization data
#[error("Incorrect Initialization Data")]
IncorrectInitializationData,
/// Not Rent Excempt
#[error("Account Not Rent Exempt")]
AccountNotRentExempt,
/// Account Already In Use
#[error("Account Already In Use")]
AccountAlreadyInUse,
/// Requesting Too Many Tokens
#[error("Requesting Too Many Tokens")]
RequestingTooManyTokens,
/// Non Admin Closure Attempt
#[error("Non Admin Closure Attempt")]
NonAdminClosureAttempt,
/// Non Closable Faucet Closure Attempt
#[error("Non Closable Faucet Closure Attempt")]
NonClosableFaucetClosureAttempt,
/// Overflow
#[error("Overflow")]
Overflow,
/// Invalid Mint
#[error("Invalid Mint")]
InvalidMint,
/// Incorrect Mint Authority
#[error("Incorrect Mint Authority")]
IncorrectMintAuthority,
}
impl From<FaucetError> for ProgramError {
fn from(e: FaucetError) -> Self {
ProgramError::Custom(e as u32)
}
}
impl<T> DecodeError<T> for FaucetError {
fn type_of() -> &'static str {
"FaucetError"
}
}
| 27 | 77 | 0.67645 |
fc8662203fea97534fc9863524782367f9a09136 | 16,924 | //! Provides a strongly typed way to build emails
//!
//! ### Creating messages
//!
//! This section explains how to create emails.
//!
//! ## Usage
//!
//! ### Format email messages
//!
//! #### With string body
//!
//! The easiest way how we can create email message with simple string.
//!
//! ```rust
//! use lettre::message::Message;
//!
//! let m = Message::builder()
//! .from("NoBody <[email protected]>".parse().unwrap())
//! .reply_to("Yuin <[email protected]>".parse().unwrap())
//! .to("Hei <[email protected]>".parse().unwrap())
//! .subject("Happy new year")
//! .body("Be happy!")
//! .unwrap();
//! ```
//!
//! Will produce:
//!
//! ```sh
//! From: NoBody <[email protected]>
//! Reply-To: Yuin <[email protected]>
//! To: Hei <[email protected]>
//! Subject: Happy new year
//!
//! Be happy!
//! ```
//!
//! The unicode header data will be encoded using _UTF8-Base64_ encoding.
//!
//! ### With MIME body
//!
//! ##### Single part
//!
//! The more complex way is using MIME contents.
//!
//! ```rust
//! use lettre::message::{header, Message, SinglePart, Part};
//!
//! let m = Message::builder()
//! .from("NoBody <[email protected]>".parse().unwrap())
//! .reply_to("Yuin <[email protected]>".parse().unwrap())
//! .to("Hei <[email protected]>".parse().unwrap())
//! .subject("Happy new year")
//! .singlepart(
//! SinglePart::builder()
//! .header(header::ContentType(
//! "text/plain; charset=utf8".parse().unwrap(),
//! )).header(header::ContentTransferEncoding::QuotedPrintable)
//! .body("Привет, мир!"),
//! )
//! .unwrap();
//! ```
//!
//! The body will be encoded using selected `Content-Transfer-Encoding`.
//!
//! ```sh
//! From: NoBody <[email protected]>
//! Reply-To: Yuin <[email protected]>
//! To: Hei <[email protected]>
//! Subject: Happy new year
//! MIME-Version: 1.0
//! Content-Type: text/plain; charset=utf8
//! Content-Transfer-Encoding: quoted-printable
//!
//! =D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82, =D0=BC=D0=B8=D1=80!
//!
//! ```
//!
//! ##### Multiple parts
//!
//! And more advanced way of building message by using multipart MIME contents.
//!
//! ```rust
//! use lettre::message::{header, Message, MultiPart, SinglePart, Part};
//!
//! let m = Message::builder()
//! .from("NoBody <[email protected]>".parse().unwrap())
//! .reply_to("Yuin <[email protected]>".parse().unwrap())
//! .to("Hei <[email protected]>".parse().unwrap())
//! .subject("Happy new year")
//! .multipart(
//! MultiPart::mixed()
//! .multipart(
//! MultiPart::alternative()
//! .singlepart(
//! SinglePart::quoted_printable()
//! .header(header::ContentType("text/plain; charset=utf8".parse().unwrap()))
//! .body("Привет, мир!")
//! )
//! .multipart(
//! MultiPart::related()
//! .singlepart(
//! SinglePart::eight_bit()
//! .header(header::ContentType("text/html; charset=utf8".parse().unwrap()))
//! .body("<p><b>Hello</b>, <i>world</i>! <img src=smile.png></p>")
//! )
//! .singlepart(
//! SinglePart::base64()
//! .header(header::ContentType("image/png".parse().unwrap()))
//! .header(header::ContentDisposition {
//! disposition: header::DispositionType::Inline,
//! parameters: vec![],
//! })
//! .body("<smile-raw-image-data>")
//! )
//! )
//! )
//! .singlepart(
//! SinglePart::seven_bit()
//! .header(header::ContentType("text/plain; charset=utf8".parse().unwrap()))
//! .header(header::ContentDisposition {
//! disposition: header::DispositionType::Attachment,
//! parameters: vec![
//! header::DispositionParam::Filename(
//! header::Charset::Ext("utf-8".into()),
//! None, "example.c".as_bytes().into()
//! )
//! ]
//! })
//! .body("int main() { return 0; }")
//! )
//! ).unwrap();
//! ```
//!
//! ```sh
//! From: NoBody <[email protected]>
//! Reply-To: Yuin <[email protected]>
//! To: Hei <[email protected]>
//! Subject: Happy new year
//! MIME-Version: 1.0
//! Content-Type: multipart/mixed; boundary="RTxPCn9p31oAAAAAeQxtr1FbXr/i5vW1hFlH9oJqZRMWxRMK1QLjQ4OPqFk9R+0xUb/m"
//!
//! --RTxPCn9p31oAAAAAeQxtr1FbXr/i5vW1hFlH9oJqZRMWxRMK1QLjQ4OPqFk9R+0xUb/m
//! Content-Type: multipart/alternative; boundary="qW9QCn9p31oAAAAAodFBg1L1Qrraa5hEl0bDJ6kfJMUcRT2LLSWEoeyhSEbUBIqbjWqy"
//!
//! --qW9QCn9p31oAAAAAodFBg1L1Qrraa5hEl0bDJ6kfJMUcRT2LLSWEoeyhSEbUBIqbjWqy
//! Content-Transfer-Encoding: quoted-printable
//! Content-Type: text/plain; charset=utf8
//!
//! =D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82, =D0=BC=D0=B8=D1=80!
//! --qW9QCn9p31oAAAAAodFBg1L1Qrraa5hEl0bDJ6kfJMUcRT2LLSWEoeyhSEbUBIqbjWqy
//! Content-Type: multipart/related; boundary="BV5RCn9p31oAAAAAUt42E9bYMDEAGCOWlxEz89Bv0qFA5Xsy6rOC3zRahMQ39IFZNnp8"
//!
//! --BV5RCn9p31oAAAAAUt42E9bYMDEAGCOWlxEz89Bv0qFA5Xsy6rOC3zRahMQ39IFZNnp8
//! Content-Transfer-Encoding: 8bit
//! Content-Type: text/html; charset=utf8
//!
//! <p><b>Hello</b>, <i>world</i>! <img src=smile.png></p>
//! --BV5RCn9p31oAAAAAUt42E9bYMDEAGCOWlxEz89Bv0qFA5Xsy6rOC3zRahMQ39IFZNnp8
//! Content-Transfer-Encoding: base64
//! Content-Type: image/png
//! Content-Disposition: inline
//!
//! PHNtaWxlLXJhdy1pbWFnZS1kYXRhPg==
//! --BV5RCn9p31oAAAAAUt42E9bYMDEAGCOWlxEz89Bv0qFA5Xsy6rOC3zRahMQ39IFZNnp8--
//! --qW9QCn9p31oAAAAAodFBg1L1Qrraa5hEl0bDJ6kfJMUcRT2LLSWEoeyhSEbUBIqbjWqy--
//! --RTxPCn9p31oAAAAAeQxtr1FbXr/i5vW1hFlH9oJqZRMWxRMK1QLjQ4OPqFk9R+0xUb/m
//! Content-Transfer-Encoding: 7bit
//! Content-Type: text/plain; charset=utf8
//! Content-Disposition: attachment; filename="example.c"
//!
//! int main() { return 0; }
//! --RTxPCn9p31oAAAAAeQxtr1FbXr/i5vW1hFlH9oJqZRMWxRMK1QLjQ4OPqFk9R+0xUb/m--
//!
//! ```
pub use encoder::*;
pub use mailbox::*;
pub use mimebody::*;
pub use mime;
mod encoder;
pub mod header;
mod mailbox;
mod mimebody;
mod utf8_b;
use crate::{
message::header::{EmailDate, Header, Headers, MailboxesHeader},
Envelope, Error as EmailError,
};
use std::{convert::TryFrom, time::SystemTime};
use uuid::Uuid;
const DEFAULT_MESSAGE_ID_DOMAIN: &str = "localhost";
pub trait EmailFormat {
// Use a writer?
fn format(&self, out: &mut Vec<u8>);
}
/// A builder for messages
#[derive(Debug, Clone)]
pub struct MessageBuilder {
headers: Headers,
envelope: Option<Envelope>,
}
impl MessageBuilder {
/// Creates a new default message builder
pub fn new() -> Self {
Self {
headers: Headers::new(),
envelope: None,
}
}
/// Set custom header to message
pub fn header<H: Header>(mut self, header: H) -> Self {
self.headers.set(header);
self
}
/// Add mailbox to header
pub fn mailbox<H: Header + MailboxesHeader>(mut self, header: H) -> Self {
if self.headers.has::<H>() {
self.headers.get_mut::<H>().unwrap().join_mailboxes(header);
self
} else {
self.header(header)
}
}
/// Add `Date` header to message
///
/// Shortcut for `self.header(header::Date(date))`.
pub fn date(self, date: EmailDate) -> Self {
self.header(header::Date(date))
}
/// Set `Date` header using current date/time
///
/// Shortcut for `self.date(SystemTime::now())`.
pub fn date_now(self) -> Self {
self.date(SystemTime::now().into())
}
/// Set `Subject` header to message
///
/// Shortcut for `self.header(header::Subject(subject.into()))`.
pub fn subject<S: Into<String>>(self, subject: S) -> Self {
self.header(header::Subject(subject.into()))
}
/// Set `Mime-Version` header to 1.0
///
/// Shortcut for `self.header(header::MIME_VERSION_1_0)`.
///
/// Not exposed as it is set by body methods
fn mime_1_0(self) -> Self {
self.header(header::MIME_VERSION_1_0)
}
/// Set `Sender` header. Should be used when providing several `From` mailboxes.
///
/// https://tools.ietf.org/html/rfc5322#section-3.6.2
///
/// Shortcut for `self.header(header::Sender(mbox))`.
pub fn sender(self, mbox: Mailbox) -> Self {
self.header(header::Sender(mbox))
}
/// Set or add mailbox to `From` header
///
/// https://tools.ietf.org/html/rfc5322#section-3.6.2
///
/// Shortcut for `self.mailbox(header::From(mbox))`.
pub fn from(self, mbox: Mailbox) -> Self {
self.mailbox(header::From(mbox.into()))
}
/// Set or add mailbox to `ReplyTo` header
///
/// https://tools.ietf.org/html/rfc5322#section-3.6.2
///
/// Shortcut for `self.mailbox(header::ReplyTo(mbox))`.
pub fn reply_to(self, mbox: Mailbox) -> Self {
self.mailbox(header::ReplyTo(mbox.into()))
}
/// Set or add mailbox to `To` header
///
/// Shortcut for `self.mailbox(header::To(mbox))`.
pub fn to(self, mbox: Mailbox) -> Self {
self.mailbox(header::To(mbox.into()))
}
/// Set or add mailbox to `Cc` header
///
/// Shortcut for `self.mailbox(header::Cc(mbox))`.
pub fn cc(self, mbox: Mailbox) -> Self {
self.mailbox(header::Cc(mbox.into()))
}
/// Set or add mailbox to `Bcc` header
///
/// Shortcut for `self.mailbox(header::Bcc(mbox))`.
pub fn bcc(self, mbox: Mailbox) -> Self {
self.mailbox(header::Bcc(mbox.into()))
}
/// Set or add message id to [`In-Reply-To`
/// header](https://tools.ietf.org/html/rfc5322#section-3.6.4)
pub fn in_reply_to(self, id: String) -> Self {
self.header(header::InReplyTo(id))
}
/// Set or add message id to [`References`
/// header](https://tools.ietf.org/html/rfc5322#section-3.6.4)
pub fn references(self, id: String) -> Self {
self.header(header::References(id))
}
/// Set [Message-Id
/// header](https://tools.ietf.org/html/rfc5322#section-3.6.4)
///
/// Should generally be inserted by the mail relay.
///
/// If `None` is provided, an id will be generated in the
/// `<UUID@HOSTNAME>`.
pub fn message_id(self, id: Option<String>) -> Self {
match id {
Some(i) => self.header(header::MessageId(i)),
None => {
#[cfg(feature = "hostname")]
let hostname = hostname::get()
.map_err(|_| ())
.and_then(|s| s.into_string().map_err(|_| ()))
.unwrap_or_else(|_| DEFAULT_MESSAGE_ID_DOMAIN.to_string());
#[cfg(not(feature = "hostname"))]
let hostname = DEFAULT_MESSAGE_ID_DOMAIN.to_string();
self.header(header::MessageId(
// https://tools.ietf.org/html/rfc5322#section-3.6.4
format!("<{}@{}>", Uuid::new_v4(), hostname),
))
}
}
}
/// Set [User-Agent
/// header](https://tools.ietf.org/html/draft-melnikov-email-user-agent-004)
pub fn user_agent(self, id: String) -> Self {
self.header(header::UserAgent(id))
}
/// Force specific envelope (by default it is derived from headers)
pub fn envelope(mut self, envelope: Envelope) -> Self {
self.envelope = Some(envelope);
self
}
// TODO: High-level methods for attachments and embedded files
/// Create message from body
fn build(self, body: Body) -> Result<Message, EmailError> {
// Check for missing required headers
// https://tools.ietf.org/html/rfc5322#section-3.6
// Insert Date if missing
let res = if self.headers.get::<header::Date>().is_none() {
self.date_now()
} else {
self
};
// Fail is missing correct originator (Sender or From)
match res.headers.get::<header::From>() {
Some(header::From(f)) => {
let from: Vec<Mailbox> = f.clone().into();
if from.len() > 1 && res.headers.get::<header::Sender>().is_none() {
return Err(EmailError::TooManyFrom);
}
}
None => {
return Err(EmailError::MissingFrom);
}
}
let envelope = match res.envelope {
Some(e) => e,
None => Envelope::try_from(&res.headers)?,
};
Ok(Message {
headers: res.headers,
body,
envelope,
})
}
// In theory having a body is optional
/// Plain ASCII body
///
/// *WARNING*: Generally not what you want
pub fn body<T: Into<String>>(self, body: T) -> Result<Message, EmailError> {
// 998 chars by line
// CR and LF MUST only occur together as CRLF; they MUST NOT appear
// independently in the body.
let body = body.into();
if !&body.is_ascii() {
return Err(EmailError::NonAsciiChars);
}
self.build(Body::Raw(body))
}
/// Create message using mime body ([`MultiPart`][self::MultiPart])
pub fn multipart(self, part: MultiPart) -> Result<Message, EmailError> {
self.mime_1_0().build(Body::Mime(Part::Multi(part)))
}
/// Create message using mime body ([`SinglePart`][self::SinglePart])
pub fn singlepart(self, part: SinglePart) -> Result<Message, EmailError> {
self.mime_1_0().build(Body::Mime(Part::Single(part)))
}
}
/// Email message which can be formatted
#[derive(Clone, Debug)]
pub struct Message {
headers: Headers,
body: Body,
envelope: Envelope,
}
#[derive(Clone, Debug)]
enum Body {
Mime(Part),
Raw(String),
}
impl Message {
/// Create a new message builder without headers
pub fn builder() -> MessageBuilder {
MessageBuilder::new()
}
/// Get the headers from the Message
pub fn headers(&self) -> &Headers {
&self.headers
}
/// Get `Message` envelope
pub fn envelope(&self) -> &Envelope {
&self.envelope
}
/// Get message content formatted for SMTP
pub fn formatted(&self) -> Vec<u8> {
let mut out = Vec::new();
self.format(&mut out);
out
}
}
impl EmailFormat for Message {
fn format(&self, out: &mut Vec<u8>) {
out.extend_from_slice(self.headers.to_string().as_bytes());
match &self.body {
Body::Mime(p) => p.format(out),
Body::Raw(r) => {
out.extend_from_slice(b"\r\n");
out.extend(r.as_bytes())
}
}
}
}
impl Default for MessageBuilder {
fn default() -> Self {
MessageBuilder::new()
}
}
#[cfg(test)]
mod test {
use crate::message::{header, mailbox::Mailbox, Message};
#[test]
fn email_missing_originator() {
assert!(Message::builder().body("Happy new year!").is_err());
}
#[test]
fn email_miminal_message() {
assert!(Message::builder()
.from("NoBody <[email protected]>".parse().unwrap())
.to("NoBody <[email protected]>".parse().unwrap())
.body("Happy new year!")
.is_ok());
}
#[test]
fn email_missing_sender() {
assert!(Message::builder()
.from("NoBody <[email protected]>".parse().unwrap())
.from("AnyBody <[email protected]>".parse().unwrap())
.body("Happy new year!")
.is_err());
}
#[test]
fn email_message() {
let date = "Tue, 15 Nov 1994 08:12:31 GMT".parse().unwrap();
let email = Message::builder()
.date(date)
.header(header::From(
vec![Mailbox::new(
Some("Каи".into()),
"[email protected]".parse().unwrap(),
)]
.into(),
))
.header(header::To(
vec!["Pony O.P. <[email protected]>".parse().unwrap()].into(),
))
.header(header::Subject("яңа ел белән!".into()))
.body("Happy new year!")
.unwrap();
assert_eq!(
String::from_utf8(email.formatted()).unwrap(),
concat!(
"Date: Tue, 15 Nov 1994 08:12:31 GMT\r\n",
"From: =?utf-8?b?0JrQsNC4?= <[email protected]>\r\n",
"To: Pony O.P. <[email protected]>\r\n",
"Subject: =?utf-8?b?0Y/So9CwINC10Lsg0LHQtdC705nQvSE=?=\r\n",
"\r\n",
"Happy new year!"
)
);
}
}
| 30.883212 | 120 | 0.557788 |
5094fb58d26f9e4331f961d6bc822780a6b6a18a | 1,308 | use serde_derive::{Serialize, Deserialize};
use lambda_runtime::{lambda, Context, error::HandlerError};
use rand::Rng;
use rand::distributions::{Bernoulli, Normal, Uniform};
use std::error::Error;
use std::ops::Range;
#[derive(Deserialize)]
#[serde(tag = "distribution", content = "parameters", rename_all = "lowercase")]
enum RngRequest {
Uniform {
#[serde(flatten)]
range: Range<i32>,
},
Normal {
mean: f64,
std_dev: f64,
},
Bernoulli {
p: f64,
},
}
#[derive(Serialize)]
struct RngResponse {
value: f64,
}
fn main() -> Result<(), Box<dyn Error>> {
simple_logger::init_with_level(log::Level::Debug).unwrap();
lambda!(rng_handler);
Ok(())
}
fn rng_handler(event: RngRequest, _ctx: Context) -> Result<RngResponse, HandlerError> {
let mut rng = rand::thread_rng();
let value = {
match event {
RngRequest::Uniform { range } => {
rng.sample(Uniform::from(range)) as f64
},
RngRequest::Normal { mean, std_dev } => {
rng.sample(Normal::new(mean, std_dev)) as f64
},
RngRequest::Bernoulli { p } => {
rng.sample(Bernoulli::new(p)) as i8 as f64
},
}
};
Ok(RngResponse { value })
}
| 25.153846 | 87 | 0.564985 |
bf8d2f544018e8418549abb010672a07e6c6a043 | 144 | //! Use this module to interact with Kraken exchange.
//! See examples for more informations.
pub mod api;
pub mod generic_api;
pub mod utils;
| 20.571429 | 53 | 0.75 |
2fd0bb7f61fd804ddfb6d64c5bf26f3deb56b355 | 44,476 | #[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::vec::Vec;
#[cfg(feature = "arbitrary")]
use crate::base::storage::Owned;
#[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen};
use num::{Bounded, One, Zero};
#[cfg(feature = "rand-no-std")]
use rand::{
distributions::{Distribution, Standard},
Rng,
};
use std::iter;
use std::mem;
use typenum::{self, Cmp, Greater};
use simba::scalar::{ClosedAdd, ClosedMul};
use crate::base::allocator::Allocator;
use crate::base::dimension::{Dim, DimName, Dynamic, U1, U2, U3, U4, U5, U6};
use crate::base::storage::Storage;
use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vector, VectorN};
/// When "no_unsound_assume_init" is enabled, expands to `unimplemented!()` instead of `new_uninitialized_generic().assume_init()`.
/// Intended as a placeholder, each callsite should be refactored to use uninitialized memory soundly
#[macro_export]
macro_rules! unimplemented_or_uninitialized_generic {
($nrows:expr, $ncols:expr) => {{
#[cfg(feature="no_unsound_assume_init")] {
// Some of the call sites need the number of rows and columns from this to infer a type, so
// uninitialized memory is used to infer the type, as `N: Zero` isn't available at all callsites.
// This may technically still be UB even though the assume_init is dead code, but all callsites should be fixed before #556 is closed.
let typeinference_helper = crate::base::Matrix::new_uninitialized_generic($nrows, $ncols);
unimplemented!();
typeinference_helper.assume_init()
}
#[cfg(not(feature="no_unsound_assume_init"))] { crate::base::Matrix::new_uninitialized_generic($nrows, $ncols).assume_init() }
}}
}
/// # Generic constructors
/// This set of matrix and vector construction functions are all generic
/// with-regard to the matrix dimensions. They all expect to be given
/// the dimension as inputs.
///
/// These functions should only be used when working on dimension-generic code.
impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
where
DefaultAllocator: Allocator<N, R, C>,
{
/// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics
/// if `nrows != R::to_usize()` or `ncols != C::to_usize()`.
#[inline]
pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> mem::MaybeUninit<Self> {
Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols))
}
/// Creates a matrix with all its elements set to `elem`.
#[inline]
pub fn from_element_generic(nrows: R, ncols: C, elem: N) -> Self {
let len = nrows.value() * ncols.value();
Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len))
}
/// Creates a matrix with all its elements set to `elem`.
///
/// Same as `from_element_generic`.
#[inline]
pub fn repeat_generic(nrows: R, ncols: C, elem: N) -> Self {
let len = nrows.value() * ncols.value();
Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len))
}
/// Creates a matrix with all its elements set to 0.
#[inline]
pub fn zeros_generic(nrows: R, ncols: C) -> Self
where
N: Zero,
{
Self::from_element_generic(nrows, ncols, N::zero())
}
/// Creates a matrix with all its elements filled by an iterator.
#[inline]
pub fn from_iterator_generic<I>(nrows: R, ncols: C, iter: I) -> Self
where
I: IntoIterator<Item = N>,
{
Self::from_data(DefaultAllocator::allocate_from_iterator(nrows, ncols, iter))
}
/// Creates a matrix with its elements filled with the components provided by a slice in
/// row-major order.
///
/// The order of elements in the slice must follow the usual mathematic writing, i.e.,
/// row-by-row.
#[inline]
pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[N]) -> Self {
assert!(
slice.len() == nrows.value() * ncols.value(),
"Matrix init. error: the slice did not contain the right number of elements."
);
let mut res = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) };
let mut iter = slice.iter();
for i in 0..nrows.value() {
for j in 0..ncols.value() {
unsafe { *res.get_unchecked_mut((i, j)) = iter.next().unwrap().inlined_clone() }
}
}
res
}
/// Creates a matrix with its elements filled with the components provided by a slice. The
/// components must have the same layout as the matrix data storage (i.e. column-major).
#[inline]
pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[N]) -> Self {
Self::from_iterator_generic(nrows, ncols, slice.iter().cloned())
}
/// Creates a matrix filled with the results of a function applied to each of its component
/// coordinates.
#[inline]
pub fn from_fn_generic<F>(nrows: R, ncols: C, mut f: F) -> Self
where
F: FnMut(usize, usize) -> N,
{
let mut res: Self = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) };
for j in 0..ncols.value() {
for i in 0..nrows.value() {
unsafe { *res.get_unchecked_mut((i, j)) = f(i, j) }
}
}
res
}
/// Creates a new identity matrix.
///
/// If the matrix is not square, the largest square submatrix starting at index `(0, 0)` is set
/// to the identity matrix. All other entries are set to zero.
#[inline]
pub fn identity_generic(nrows: R, ncols: C) -> Self
where
N: Zero + One,
{
Self::from_diagonal_element_generic(nrows, ncols, N::one())
}
/// Creates a new matrix with its diagonal filled with copies of `elt`.
///
/// If the matrix is not square, the largest square submatrix starting at index `(0, 0)` is set
/// to the identity matrix. All other entries are set to zero.
#[inline]
pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: N) -> Self
where
N: Zero + One,
{
let mut res = Self::zeros_generic(nrows, ncols);
for i in 0..crate::min(nrows.value(), ncols.value()) {
unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() }
}
res
}
/// Creates a new matrix that may be rectangular. The first `elts.len()` diagonal elements are
/// filled with the content of `elts`. Others are set to 0.
///
/// Panics if `elts.len()` is larger than the minimum among `nrows` and `ncols`.
#[inline]
pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[N]) -> Self
where
N: Zero,
{
let mut res = Self::zeros_generic(nrows, ncols);
assert!(
elts.len() <= crate::min(nrows.value(), ncols.value()),
"Too many diagonal elements provided."
);
for (i, elt) in elts.iter().enumerate() {
unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() }
}
res
}
/// Builds a new matrix from its rows.
///
/// Panics if not enough rows are provided (for statically-sized matrices), or if all rows do
/// not have the same dimensions.
///
/// # Example
/// ```
/// # use nalgebra::{RowVector3, Matrix3};
/// # use std::iter;
///
/// let m = Matrix3::from_rows(&[ RowVector3::new(1.0, 2.0, 3.0), RowVector3::new(4.0, 5.0, 6.0), RowVector3::new(7.0, 8.0, 9.0) ]);
///
/// assert!(m.m11 == 1.0 && m.m12 == 2.0 && m.m13 == 3.0 &&
/// m.m21 == 4.0 && m.m22 == 5.0 && m.m23 == 6.0 &&
/// m.m31 == 7.0 && m.m32 == 8.0 && m.m33 == 9.0);
/// ```
#[inline]
pub fn from_rows<SB>(rows: &[Matrix<N, U1, C, SB>]) -> Self
where
SB: Storage<N, U1, C>,
{
assert!(!rows.is_empty(), "At least one row must be given.");
let nrows = R::try_to_usize().unwrap_or_else(|| rows.len());
let ncols = rows[0].len();
assert!(
rows.len() == nrows,
"Invalid number of rows provided to build this matrix."
);
if C::try_to_usize().is_none() {
assert!(
rows.iter().all(|r| r.len() == ncols),
"The provided rows must all have the same dimension."
);
}
// TODO: optimize that.
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
rows[i][(0, j)].inlined_clone()
})
}
/// Builds a new matrix from its columns.
///
/// Panics if not enough columns are provided (for statically-sized matrices), or if all
/// columns do not have the same dimensions.
///
/// # Example
/// ```
/// # use nalgebra::{Vector3, Matrix3};
/// # use std::iter;
///
/// let m = Matrix3::from_columns(&[ Vector3::new(1.0, 2.0, 3.0), Vector3::new(4.0, 5.0, 6.0), Vector3::new(7.0, 8.0, 9.0) ]);
///
/// assert!(m.m11 == 1.0 && m.m12 == 4.0 && m.m13 == 7.0 &&
/// m.m21 == 2.0 && m.m22 == 5.0 && m.m23 == 8.0 &&
/// m.m31 == 3.0 && m.m32 == 6.0 && m.m33 == 9.0);
/// ```
#[inline]
pub fn from_columns<SB>(columns: &[Vector<N, R, SB>]) -> Self
where
SB: Storage<N, R>,
{
assert!(!columns.is_empty(), "At least one column must be given.");
let ncols = C::try_to_usize().unwrap_or_else(|| columns.len());
let nrows = columns[0].len();
assert!(
columns.len() == ncols,
"Invalid number of columns provided to build this matrix."
);
if R::try_to_usize().is_none() {
assert!(
columns.iter().all(|r| r.len() == nrows),
"The columns provided must all have the same dimension."
);
}
// TODO: optimize that.
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
columns[j][i].inlined_clone()
})
}
/// Creates a matrix filled with random values.
#[inline]
#[cfg(feature = "rand")]
pub fn new_random_generic(nrows: R, ncols: C) -> Self
where
Standard: Distribution<N>,
{
let mut rng = rand::thread_rng();
Self::from_fn_generic(nrows, ncols, |_, _| rng.gen())
}
/// Creates a matrix filled with random values from the given distribution.
#[inline]
#[cfg(feature = "rand-no-std")]
pub fn from_distribution_generic<Distr: Distribution<N> + ?Sized, G: Rng + ?Sized>(
nrows: R,
ncols: C,
distribution: &Distr,
rng: &mut G,
) -> Self {
Self::from_fn_generic(nrows, ncols, |_, _| distribution.sample(rng))
}
/// Creates a matrix backed by a given `Vec`.
///
/// The output matrix is filled column-by-column.
///
/// # Example
/// ```
/// # use nalgebra::{Dynamic, DMatrix, Matrix, U1};
///
/// let vec = vec![0, 1, 2, 3, 4, 5];
/// let vec_ptr = vec.as_ptr();
///
/// let matrix = Matrix::from_vec_generic(Dynamic::new(vec.len()), U1, vec);
/// let matrix_storage_ptr = matrix.data.as_vec().as_ptr();
///
/// // `matrix` is backed by exactly the same `Vec` as it was constructed from.
/// assert_eq!(matrix_storage_ptr, vec_ptr);
/// ```
#[inline]
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn from_vec_generic(nrows: R, ncols: C, data: Vec<N>) -> Self {
Self::from_iterator_generic(nrows, ncols, data)
}
}
impl<N, D: Dim> MatrixN<N, D>
where
N: Scalar,
DefaultAllocator: Allocator<N, D, D>,
{
/// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0.
///
/// # Example
/// ```
/// # use nalgebra::{Vector3, DVector, Matrix3, DMatrix};
/// # use std::iter;
///
/// let m = Matrix3::from_diagonal(&Vector3::new(1.0, 2.0, 3.0));
/// // The two additional arguments represent the matrix dimensions.
/// let dm = DMatrix::from_diagonal(&DVector::from_row_slice(&[1.0, 2.0, 3.0]));
///
/// assert!(m.m11 == 1.0 && m.m12 == 0.0 && m.m13 == 0.0 &&
/// m.m21 == 0.0 && m.m22 == 2.0 && m.m23 == 0.0 &&
/// m.m31 == 0.0 && m.m32 == 0.0 && m.m33 == 3.0);
/// assert!(dm[(0, 0)] == 1.0 && dm[(0, 1)] == 0.0 && dm[(0, 2)] == 0.0 &&
/// dm[(1, 0)] == 0.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 0.0 &&
/// dm[(2, 0)] == 0.0 && dm[(2, 1)] == 0.0 && dm[(2, 2)] == 3.0);
/// ```
#[inline]
pub fn from_diagonal<SB: Storage<N, D>>(diag: &Vector<N, D, SB>) -> Self
where
N: Zero,
{
let (dim, _) = diag.data.shape();
let mut res = Self::zeros_generic(dim, dim);
for i in 0..diag.len() {
unsafe {
*res.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone();
}
}
res
}
}
/*
*
* Generate constructors with varying number of arguments, depending on the object type.
*
*/
macro_rules! impl_constructors(
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
/// Creates a new uninitialized matrix or vector.
#[inline]
pub unsafe fn new_uninitialized($($args: usize),*) -> mem::MaybeUninit<Self> {
Self::new_uninitialized_generic($($gargs),*)
}
/// Creates a matrix or vector with all its elements set to `elem`.
///
/// # Example
/// ```
/// # use nalgebra::{Matrix2x3, Vector3, DVector, DMatrix};
///
/// let v = Vector3::from_element(2.0);
/// // The additional argument represents the vector dimension.
/// let dv = DVector::from_element(3, 2.0);
/// let m = Matrix2x3::from_element(2.0);
/// // The two additional arguments represent the matrix dimensions.
/// let dm = DMatrix::from_element(2, 3, 2.0);
///
/// assert!(v.x == 2.0 && v.y == 2.0 && v.z == 2.0);
/// assert!(dv[0] == 2.0 && dv[1] == 2.0 && dv[2] == 2.0);
/// assert!(m.m11 == 2.0 && m.m12 == 2.0 && m.m13 == 2.0 &&
/// m.m21 == 2.0 && m.m22 == 2.0 && m.m23 == 2.0);
/// assert!(dm[(0, 0)] == 2.0 && dm[(0, 1)] == 2.0 && dm[(0, 2)] == 2.0 &&
/// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0);
/// ```
#[inline]
pub fn from_element($($args: usize,)* elem: N) -> Self {
Self::from_element_generic($($gargs, )* elem)
}
/// Creates a matrix or vector with all its elements set to `elem`.
///
/// Same as `.from_element`.
///
/// # Example
/// ```
/// # use nalgebra::{Matrix2x3, Vector3, DVector, DMatrix};
///
/// let v = Vector3::repeat(2.0);
/// // The additional argument represents the vector dimension.
/// let dv = DVector::repeat(3, 2.0);
/// let m = Matrix2x3::repeat(2.0);
/// // The two additional arguments represent the matrix dimensions.
/// let dm = DMatrix::repeat(2, 3, 2.0);
///
/// assert!(v.x == 2.0 && v.y == 2.0 && v.z == 2.0);
/// assert!(dv[0] == 2.0 && dv[1] == 2.0 && dv[2] == 2.0);
/// assert!(m.m11 == 2.0 && m.m12 == 2.0 && m.m13 == 2.0 &&
/// m.m21 == 2.0 && m.m22 == 2.0 && m.m23 == 2.0);
/// assert!(dm[(0, 0)] == 2.0 && dm[(0, 1)] == 2.0 && dm[(0, 2)] == 2.0 &&
/// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0);
/// ```
#[inline]
pub fn repeat($($args: usize,)* elem: N) -> Self {
Self::repeat_generic($($gargs, )* elem)
}
/// Creates a matrix or vector with all its elements set to `0`.
///
/// # Example
/// ```
/// # use nalgebra::{Matrix2x3, Vector3, DVector, DMatrix};
///
/// let v = Vector3::<f32>::zeros();
/// // The argument represents the vector dimension.
/// let dv = DVector::<f32>::zeros(3);
/// let m = Matrix2x3::<f32>::zeros();
/// // The two arguments represent the matrix dimensions.
/// let dm = DMatrix::<f32>::zeros(2, 3);
///
/// assert!(v.x == 0.0 && v.y == 0.0 && v.z == 0.0);
/// assert!(dv[0] == 0.0 && dv[1] == 0.0 && dv[2] == 0.0);
/// assert!(m.m11 == 0.0 && m.m12 == 0.0 && m.m13 == 0.0 &&
/// m.m21 == 0.0 && m.m22 == 0.0 && m.m23 == 0.0);
/// assert!(dm[(0, 0)] == 0.0 && dm[(0, 1)] == 0.0 && dm[(0, 2)] == 0.0 &&
/// dm[(1, 0)] == 0.0 && dm[(1, 1)] == 0.0 && dm[(1, 2)] == 0.0);
/// ```
#[inline]
pub fn zeros($($args: usize),*) -> Self
where N: Zero {
Self::zeros_generic($($gargs),*)
}
/// Creates a matrix or vector with all its elements filled by an iterator.
///
/// The output matrix is filled column-by-column.
///
/// # Example
/// ```
/// # use nalgebra::{Matrix2x3, Vector3, DVector, DMatrix};
/// # use std::iter;
///
/// let v = Vector3::from_iterator((0..3).into_iter());
/// // The additional argument represents the vector dimension.
/// let dv = DVector::from_iterator(3, (0..3).into_iter());
/// let m = Matrix2x3::from_iterator((0..6).into_iter());
/// // The two additional arguments represent the matrix dimensions.
/// let dm = DMatrix::from_iterator(2, 3, (0..6).into_iter());
///
/// assert!(v.x == 0 && v.y == 1 && v.z == 2);
/// assert!(dv[0] == 0 && dv[1] == 1 && dv[2] == 2);
/// assert!(m.m11 == 0 && m.m12 == 2 && m.m13 == 4 &&
/// m.m21 == 1 && m.m22 == 3 && m.m23 == 5);
/// assert!(dm[(0, 0)] == 0 && dm[(0, 1)] == 2 && dm[(0, 2)] == 4 &&
/// dm[(1, 0)] == 1 && dm[(1, 1)] == 3 && dm[(1, 2)] == 5);
/// ```
#[inline]
pub fn from_iterator<I>($($args: usize,)* iter: I) -> Self
where I: IntoIterator<Item = N> {
Self::from_iterator_generic($($gargs, )* iter)
}
/// Creates a matrix or vector filled with the results of a function applied to each of its
/// component coordinates.
///
/// # Example
/// ```
/// # use nalgebra::{Matrix2x3, Vector3, DVector, DMatrix};
/// # use std::iter;
///
/// let v = Vector3::from_fn(|i, _| i);
/// // The additional argument represents the vector dimension.
/// let dv = DVector::from_fn(3, |i, _| i);
/// let m = Matrix2x3::from_fn(|i, j| i * 3 + j);
/// // The two additional arguments represent the matrix dimensions.
/// let dm = DMatrix::from_fn(2, 3, |i, j| i * 3 + j);
///
/// assert!(v.x == 0 && v.y == 1 && v.z == 2);
/// assert!(dv[0] == 0 && dv[1] == 1 && dv[2] == 2);
/// assert!(m.m11 == 0 && m.m12 == 1 && m.m13 == 2 &&
/// m.m21 == 3 && m.m22 == 4 && m.m23 == 5);
/// assert!(dm[(0, 0)] == 0 && dm[(0, 1)] == 1 && dm[(0, 2)] == 2 &&
/// dm[(1, 0)] == 3 && dm[(1, 1)] == 4 && dm[(1, 2)] == 5);
/// ```
#[inline]
pub fn from_fn<F>($($args: usize,)* f: F) -> Self
where F: FnMut(usize, usize) -> N {
Self::from_fn_generic($($gargs, )* f)
}
/// Creates an identity matrix. If the matrix is not square, the largest square
/// submatrix (starting at the first row and column) is set to the identity while all
/// other entries are set to zero.
///
/// # Example
/// ```
/// # use nalgebra::{Matrix2x3, DMatrix};
/// # use std::iter;
///
/// let m = Matrix2x3::<f32>::identity();
/// // The two additional arguments represent the matrix dimensions.
/// let dm = DMatrix::<f32>::identity(2, 3);
///
/// assert!(m.m11 == 1.0 && m.m12 == 0.0 && m.m13 == 0.0 &&
/// m.m21 == 0.0 && m.m22 == 1.0 && m.m23 == 0.0);
/// assert!(dm[(0, 0)] == 1.0 && dm[(0, 1)] == 0.0 && dm[(0, 2)] == 0.0 &&
/// dm[(1, 0)] == 0.0 && dm[(1, 1)] == 1.0 && dm[(1, 2)] == 0.0);
/// ```
#[inline]
pub fn identity($($args: usize,)*) -> Self
where N: Zero + One {
Self::identity_generic($($gargs),* )
}
/// Creates a matrix filled with its diagonal filled with `elt` and all other
/// components set to zero.
///
/// # Example
/// ```
/// # use nalgebra::{Matrix2x3, DMatrix};
/// # use std::iter;
///
/// let m = Matrix2x3::from_diagonal_element(5.0);
/// // The two additional arguments represent the matrix dimensions.
/// let dm = DMatrix::from_diagonal_element(2, 3, 5.0);
///
/// assert!(m.m11 == 5.0 && m.m12 == 0.0 && m.m13 == 0.0 &&
/// m.m21 == 0.0 && m.m22 == 5.0 && m.m23 == 0.0);
/// assert!(dm[(0, 0)] == 5.0 && dm[(0, 1)] == 0.0 && dm[(0, 2)] == 0.0 &&
/// dm[(1, 0)] == 0.0 && dm[(1, 1)] == 5.0 && dm[(1, 2)] == 0.0);
/// ```
#[inline]
pub fn from_diagonal_element($($args: usize,)* elt: N) -> Self
where N: Zero + One {
Self::from_diagonal_element_generic($($gargs, )* elt)
}
/// Creates a new matrix that may be rectangular. The first `elts.len()` diagonal
/// elements are filled with the content of `elts`. Others are set to 0.
///
/// Panics if `elts.len()` is larger than the minimum among `nrows` and `ncols`.
///
/// # Example
/// ```
/// # use nalgebra::{Matrix3, DMatrix};
/// # use std::iter;
///
/// let m = Matrix3::from_partial_diagonal(&[1.0, 2.0]);
/// // The two additional arguments represent the matrix dimensions.
/// let dm = DMatrix::from_partial_diagonal(3, 3, &[1.0, 2.0]);
///
/// assert!(m.m11 == 1.0 && m.m12 == 0.0 && m.m13 == 0.0 &&
/// m.m21 == 0.0 && m.m22 == 2.0 && m.m23 == 0.0 &&
/// m.m31 == 0.0 && m.m32 == 0.0 && m.m33 == 0.0);
/// assert!(dm[(0, 0)] == 1.0 && dm[(0, 1)] == 0.0 && dm[(0, 2)] == 0.0 &&
/// dm[(1, 0)] == 0.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 0.0 &&
/// dm[(2, 0)] == 0.0 && dm[(2, 1)] == 0.0 && dm[(2, 2)] == 0.0);
/// ```
#[inline]
pub fn from_partial_diagonal($($args: usize,)* elts: &[N]) -> Self
where N: Zero {
Self::from_partial_diagonal_generic($($gargs, )* elts)
}
/// Creates a matrix or vector filled with random values from the given distribution.
#[inline]
#[cfg(feature = "rand-no-std")]
pub fn from_distribution<Distr: Distribution<N> + ?Sized, G: Rng + ?Sized>(
$($args: usize,)*
distribution: &Distr,
rng: &mut G,
) -> Self {
Self::from_distribution_generic($($gargs, )* distribution, rng)
}
/// Creates a matrix filled with random values.
#[inline]
#[cfg(feature = "rand")]
pub fn new_random($($args: usize),*) -> Self
where Standard: Distribution<N> {
Self::new_random_generic($($gargs),*)
}
}
);
/// # Constructors of statically-sized vectors or statically-sized matrices
impl<N: Scalar, R: DimName, C: DimName> MatrixMN<N, R, C>
where
DefaultAllocator: Allocator<N, R, C>,
{
// TODO: this is not very pretty. We could find a better call syntax.
impl_constructors!(R, C; // Arguments for Matrix<N, ..., S>
=> R: DimName, => C: DimName; // Type parameters for impl<N, ..., S>
R::name(), C::name(); // Arguments for `_generic` constructors.
); // Arguments for non-generic constructors.
}
/// # Constructors of matrices with a dynamic number of columns
impl<N: Scalar, R: DimName> MatrixMN<N, R, Dynamic>
where
DefaultAllocator: Allocator<N, R, Dynamic>,
{
impl_constructors!(R, Dynamic;
=> R: DimName;
R::name(), Dynamic::new(ncols);
ncols);
}
/// # Constructors of dynamic vectors and matrices with a dynamic number of rows
impl<N: Scalar, C: DimName> MatrixMN<N, Dynamic, C>
where
DefaultAllocator: Allocator<N, Dynamic, C>,
{
impl_constructors!(Dynamic, C;
=> C: DimName;
Dynamic::new(nrows), C::name();
nrows);
}
/// # Constructors of fully dynamic matrices
impl<N: Scalar> MatrixMN<N, Dynamic, Dynamic>
where
DefaultAllocator: Allocator<N, Dynamic, Dynamic>,
{
impl_constructors!(Dynamic, Dynamic;
;
Dynamic::new(nrows), Dynamic::new(ncols);
nrows, ncols);
}
/*
*
* Constructors that don't necessarily require all dimensions
* to be specified when one dimension is already known.
*
*/
macro_rules! impl_constructors_from_data(
($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<N: Scalar, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
where DefaultAllocator: Allocator<N $(, $Dims)*> {
/// Creates a matrix with its elements filled with the components provided by a slice
/// in row-major order.
///
/// The order of elements in the slice must follow the usual mathematic writing, i.e.,
/// row-by-row.
///
/// # Example
/// ```
/// # use nalgebra::{Matrix2x3, Vector3, DVector, DMatrix};
/// # use std::iter;
///
/// let v = Vector3::from_row_slice(&[0, 1, 2]);
/// // The additional argument represents the vector dimension.
/// let dv = DVector::from_row_slice(&[0, 1, 2]);
/// let m = Matrix2x3::from_row_slice(&[0, 1, 2, 3, 4, 5]);
/// // The two additional arguments represent the matrix dimensions.
/// let dm = DMatrix::from_row_slice(2, 3, &[0, 1, 2, 3, 4, 5]);
///
/// assert!(v.x == 0 && v.y == 1 && v.z == 2);
/// assert!(dv[0] == 0 && dv[1] == 1 && dv[2] == 2);
/// assert!(m.m11 == 0 && m.m12 == 1 && m.m13 == 2 &&
/// m.m21 == 3 && m.m22 == 4 && m.m23 == 5);
/// assert!(dm[(0, 0)] == 0 && dm[(0, 1)] == 1 && dm[(0, 2)] == 2 &&
/// dm[(1, 0)] == 3 && dm[(1, 1)] == 4 && dm[(1, 2)] == 5);
/// ```
#[inline]
pub fn from_row_slice($($args: usize,)* $data: &[N]) -> Self {
Self::from_row_slice_generic($($gargs, )* $data)
}
/// Creates a matrix with its elements filled with the components provided by a slice
/// in column-major order.
///
/// # Example
/// ```
/// # use nalgebra::{Matrix2x3, Vector3, DVector, DMatrix};
/// # use std::iter;
///
/// let v = Vector3::from_column_slice(&[0, 1, 2]);
/// // The additional argument represents the vector dimension.
/// let dv = DVector::from_column_slice(&[0, 1, 2]);
/// let m = Matrix2x3::from_column_slice(&[0, 1, 2, 3, 4, 5]);
/// // The two additional arguments represent the matrix dimensions.
/// let dm = DMatrix::from_column_slice(2, 3, &[0, 1, 2, 3, 4, 5]);
///
/// assert!(v.x == 0 && v.y == 1 && v.z == 2);
/// assert!(dv[0] == 0 && dv[1] == 1 && dv[2] == 2);
/// assert!(m.m11 == 0 && m.m12 == 2 && m.m13 == 4 &&
/// m.m21 == 1 && m.m22 == 3 && m.m23 == 5);
/// assert!(dm[(0, 0)] == 0 && dm[(0, 1)] == 2 && dm[(0, 2)] == 4 &&
/// dm[(1, 0)] == 1 && dm[(1, 1)] == 3 && dm[(1, 2)] == 5);
/// ```
#[inline]
pub fn from_column_slice($($args: usize,)* $data: &[N]) -> Self {
Self::from_column_slice_generic($($gargs, )* $data)
}
/// Creates a matrix backed by a given `Vec`.
///
/// The output matrix is filled column-by-column.
///
/// # Example
/// ```
/// # use nalgebra::{DMatrix, Matrix2x3};
///
/// let m = Matrix2x3::from_vec(vec![0, 1, 2, 3, 4, 5]);
///
/// assert!(m.m11 == 0 && m.m12 == 2 && m.m13 == 4 &&
/// m.m21 == 1 && m.m22 == 3 && m.m23 == 5);
///
///
/// // The two additional arguments represent the matrix dimensions.
/// let dm = DMatrix::from_vec(2, 3, vec![0, 1, 2, 3, 4, 5]);
///
/// assert!(dm[(0, 0)] == 0 && dm[(0, 1)] == 2 && dm[(0, 2)] == 4 &&
/// dm[(1, 0)] == 1 && dm[(1, 1)] == 3 && dm[(1, 2)] == 5);
/// ```
#[inline]
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn from_vec($($args: usize,)* $data: Vec<N>) -> Self {
Self::from_vec_generic($($gargs, )* $data)
}
}
}
);
// TODO: this is not very pretty. We could find a better call syntax.
impl_constructors_from_data!(data; R, C; // Arguments for Matrix<N, ..., S>
=> R: DimName, => C: DimName; // Type parameters for impl<N, ..., S>
R::name(), C::name(); // Arguments for `_generic` constructors.
); // Arguments for non-generic constructors.
impl_constructors_from_data!(data; R, Dynamic;
=> R: DimName;
R::name(), Dynamic::new(data.len() / R::dim());
);
impl_constructors_from_data!(data; Dynamic, C;
=> C: DimName;
Dynamic::new(data.len() / C::dim()), C::name();
);
impl_constructors_from_data!(data; Dynamic, Dynamic;
;
Dynamic::new(nrows), Dynamic::new(ncols);
nrows, ncols);
/*
*
* Zero, One, Rand traits.
*
*/
impl<N, R: DimName, C: DimName> Zero for MatrixMN<N, R, C>
where
N: Scalar + Zero + ClosedAdd,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn zero() -> Self {
Self::from_element(N::zero())
}
#[inline]
fn is_zero(&self) -> bool {
self.iter().all(|e| e.is_zero())
}
}
impl<N, D: DimName> One for MatrixN<N, D>
where
N: Scalar + Zero + One + ClosedMul + ClosedAdd,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]
fn one() -> Self {
Self::identity()
}
}
impl<N, R: DimName, C: DimName> Bounded for MatrixMN<N, R, C>
where
N: Scalar + Bounded,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn max_value() -> Self {
Self::from_element(N::max_value())
}
#[inline]
fn min_value() -> Self {
Self::from_element(N::min_value())
}
}
#[cfg(feature = "rand-no-std")]
impl<N: Scalar, R: Dim, C: Dim> Distribution<MatrixMN<N, R, C>> for Standard
where
DefaultAllocator: Allocator<N, R, C>,
Standard: Distribution<N>,
{
#[inline]
fn sample<'a, G: Rng + ?Sized>(&self, rng: &'a mut G) -> MatrixMN<N, R, C> {
let nrows = R::try_to_usize().unwrap_or_else(|| rng.gen_range(0..10));
let ncols = C::try_to_usize().unwrap_or_else(|| rng.gen_range(0..10));
MatrixMN::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| rng.gen())
}
}
#[cfg(feature = "arbitrary")]
impl<N, R, C> Arbitrary for MatrixMN<N, R, C>
where
R: Dim,
C: Dim,
N: Scalar + Arbitrary + Send,
DefaultAllocator: Allocator<N, R, C>,
Owned<N, R, C>: Clone + Send,
{
#[inline]
fn arbitrary(g: &mut Gen) -> Self {
let nrows = R::try_to_usize().unwrap_or(usize::arbitrary(g) % 10);
let ncols = C::try_to_usize().unwrap_or(usize::arbitrary(g) % 10);
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| {
N::arbitrary(g)
})
}
}
// TODO(specialization): faster impls possible for D≤4 (see rand_distr::{UnitCircle, UnitSphere})
#[cfg(feature = "rand")]
impl<N: crate::RealField, D: DimName> Distribution<Unit<VectorN<N, D>>> for Standard
where
DefaultAllocator: Allocator<N, D>,
rand_distr::StandardNormal: Distribution<N>,
{
/// Generate a uniformly distributed random unit vector.
#[inline]
fn sample<'a, G: Rng + ?Sized>(&self, rng: &'a mut G) -> Unit<VectorN<N, D>> {
Unit::new_normalize(VectorN::from_distribution_generic(
D::name(),
U1,
&rand_distr::StandardNormal,
rng,
))
}
}
/*
*
* Constructors for small matrices and vectors.
*
*/
macro_rules! componentwise_constructors_impl(
($($R: ty, $C: ty, $($args: ident:($irow: expr,$icol: expr)),*);* $(;)*) => {$(
impl<N> MatrixMN<N, $R, $C>
where N: Scalar,
DefaultAllocator: Allocator<N, $R, $C> {
/// Initializes this matrix from its components.
#[inline]
pub fn new($($args: N),*) -> Self {
unsafe {
#[cfg(feature="no_unsound_assume_init")]
let mut res: Self = unimplemented!();
#[cfg(not(feature="no_unsound_assume_init"))]
let mut res = Self::new_uninitialized().assume_init();
$( *res.get_unchecked_mut(($irow, $icol)) = $args; )*
res
}
}
}
)*}
);
componentwise_constructors_impl!(
/*
* Square matrices 1 .. 6.
*/
U2, U2, m11:(0,0), m12:(0,1),
m21:(1,0), m22:(1,1);
U3, U3, m11:(0,0), m12:(0,1), m13:(0,2),
m21:(1,0), m22:(1,1), m23:(1,2),
m31:(2,0), m32:(2,1), m33:(2,2);
U4, U4, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3),
m31:(2,0), m32:(2,1), m33:(2,2), m34:(2,3),
m41:(3,0), m42:(3,1), m43:(3,2), m44:(3,3);
U5, U5, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3), m15:(0,4),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3), m25:(1,4),
m31:(2,0), m32:(2,1), m33:(2,2), m34:(2,3), m35:(2,4),
m41:(3,0), m42:(3,1), m43:(3,2), m44:(3,3), m45:(3,4),
m51:(4,0), m52:(4,1), m53:(4,2), m54:(4,3), m55:(4,4);
U6, U6, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3), m15:(0,4), m16:(0,5),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3), m25:(1,4), m26:(1,5),
m31:(2,0), m32:(2,1), m33:(2,2), m34:(2,3), m35:(2,4), m36:(2,5),
m41:(3,0), m42:(3,1), m43:(3,2), m44:(3,3), m45:(3,4), m46:(3,5),
m51:(4,0), m52:(4,1), m53:(4,2), m54:(4,3), m55:(4,4), m56:(4,5),
m61:(5,0), m62:(5,1), m63:(5,2), m64:(5,3), m65:(5,4), m66:(5,5);
/*
* Rectangular matrices with 2 rows.
*/
U2, U3, m11:(0,0), m12:(0,1), m13:(0,2),
m21:(1,0), m22:(1,1), m23:(1,2);
U2, U4, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3);
U2, U5, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3), m15:(0,4),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3), m25:(1,4);
U2, U6, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3), m15:(0,4), m16:(0,5),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3), m25:(1,4), m26:(1,5);
/*
* Rectangular matrices with 3 rows.
*/
U3, U2, m11:(0,0), m12:(0,1),
m21:(1,0), m22:(1,1),
m31:(2,0), m32:(2,1);
U3, U4, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3),
m31:(2,0), m32:(2,1), m33:(2,2), m34:(2,3);
U3, U5, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3), m15:(0,4),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3), m25:(1,4),
m31:(2,0), m32:(2,1), m33:(2,2), m34:(2,3), m35:(2,4);
U3, U6, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3), m15:(0,4), m16:(0,5),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3), m25:(1,4), m26:(1,5),
m31:(2,0), m32:(2,1), m33:(2,2), m34:(2,3), m35:(2,4), m36:(2,5);
/*
* Rectangular matrices with 4 rows.
*/
U4, U2, m11:(0,0), m12:(0,1),
m21:(1,0), m22:(1,1),
m31:(2,0), m32:(2,1),
m41:(3,0), m42:(3,1);
U4, U3, m11:(0,0), m12:(0,1), m13:(0,2),
m21:(1,0), m22:(1,1), m23:(1,2),
m31:(2,0), m32:(2,1), m33:(2,2),
m41:(3,0), m42:(3,1), m43:(3,2);
U4, U5, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3), m15:(0,4),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3), m25:(1,4),
m31:(2,0), m32:(2,1), m33:(2,2), m34:(2,3), m35:(2,4),
m41:(3,0), m42:(3,1), m43:(3,2), m44:(3,3), m45:(3,4);
U4, U6, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3), m15:(0,4), m16:(0,5),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3), m25:(1,4), m26:(1,5),
m31:(2,0), m32:(2,1), m33:(2,2), m34:(2,3), m35:(2,4), m36:(2,5),
m41:(3,0), m42:(3,1), m43:(3,2), m44:(3,3), m45:(3,4), m46:(3,5);
/*
* Rectangular matrices with 5 rows.
*/
U5, U2, m11:(0,0), m12:(0,1),
m21:(1,0), m22:(1,1),
m31:(2,0), m32:(2,1),
m41:(3,0), m42:(3,1),
m51:(4,0), m52:(4,1);
U5, U3, m11:(0,0), m12:(0,1), m13:(0,2),
m21:(1,0), m22:(1,1), m23:(1,2),
m31:(2,0), m32:(2,1), m33:(2,2),
m41:(3,0), m42:(3,1), m43:(3,2),
m51:(4,0), m52:(4,1), m53:(4,2);
U5, U4, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3),
m31:(2,0), m32:(2,1), m33:(2,2), m34:(2,3),
m41:(3,0), m42:(3,1), m43:(3,2), m44:(3,3),
m51:(4,0), m52:(4,1), m53:(4,2), m54:(4,3);
U5, U6, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3), m15:(0,4), m16:(0,5),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3), m25:(1,4), m26:(1,5),
m31:(2,0), m32:(2,1), m33:(2,2), m34:(2,3), m35:(2,4), m36:(2,5),
m41:(3,0), m42:(3,1), m43:(3,2), m44:(3,3), m45:(3,4), m46:(3,5),
m51:(4,0), m52:(4,1), m53:(4,2), m54:(4,3), m55:(4,4), m56:(4,5);
/*
* Rectangular matrices with 6 rows.
*/
U6, U2, m11:(0,0), m12:(0,1),
m21:(1,0), m22:(1,1),
m31:(2,0), m32:(2,1),
m41:(3,0), m42:(3,1),
m51:(4,0), m52:(4,1),
m61:(5,0), m62:(5,1);
U6, U3, m11:(0,0), m12:(0,1), m13:(0,2),
m21:(1,0), m22:(1,1), m23:(1,2),
m31:(2,0), m32:(2,1), m33:(2,2),
m41:(3,0), m42:(3,1), m43:(3,2),
m51:(4,0), m52:(4,1), m53:(4,2),
m61:(5,0), m62:(5,1), m63:(5,2);
U6, U4, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3),
m31:(2,0), m32:(2,1), m33:(2,2), m34:(2,3),
m41:(3,0), m42:(3,1), m43:(3,2), m44:(3,3),
m51:(4,0), m52:(4,1), m53:(4,2), m54:(4,3),
m61:(5,0), m62:(5,1), m63:(5,2), m64:(5,3);
U6, U5, m11:(0,0), m12:(0,1), m13:(0,2), m14:(0,3), m15:(0,4),
m21:(1,0), m22:(1,1), m23:(1,2), m24:(1,3), m25:(1,4),
m31:(2,0), m32:(2,1), m33:(2,2), m34:(2,3), m35:(2,4),
m41:(3,0), m42:(3,1), m43:(3,2), m44:(3,3), m45:(3,4),
m51:(4,0), m52:(4,1), m53:(4,2), m54:(4,3), m55:(4,4),
m61:(5,0), m62:(5,1), m63:(5,2), m64:(5,3), m65:(5,4);
/*
* Row vectors 1 .. 6.
*/
U1, U1, x:(0,0);
U1, U2, x:(0,0), y:(0,1);
U1, U3, x:(0,0), y:(0,1), z:(0,2);
U1, U4, x:(0,0), y:(0,1), z:(0,2), w:(0,3);
U1, U5, x:(0,0), y:(0,1), z:(0,2), w:(0,3), a:(0,4);
U1, U6, x:(0,0), y:(0,1), z:(0,2), w:(0,3), a:(0,4), b:(0,5);
/*
* Column vectors 1 .. 6.
*/
U2, U1, x:(0,0), y:(1,0);
U3, U1, x:(0,0), y:(1,0), z:(2,0);
U4, U1, x:(0,0), y:(1,0), z:(2,0), w:(3,0);
U5, U1, x:(0,0), y:(1,0), z:(2,0), w:(3,0), a:(4,0);
U6, U1, x:(0,0), y:(1,0), z:(2,0), w:(3,0), a:(4,0), b:(5,0);
);
/*
*
* Axis constructors.
*
*/
impl<N, R: DimName> VectorN<N, R>
where
N: Scalar + Zero + One,
DefaultAllocator: Allocator<N, R>,
{
/// The column vector with `val` as its i-th component.
#[inline]
pub fn ith(i: usize, val: N) -> Self {
let mut res = Self::zeros();
res[i] = val;
res
}
/// The column unit vector with `N::one()` as its i-th component.
#[inline]
pub fn ith_axis(i: usize) -> Unit<Self> {
Unit::new_unchecked(Self::ith(i, N::one()))
}
/// The column vector with a 1 as its first component, and zero elsewhere.
#[inline]
pub fn x() -> Self
where
R::Value: Cmp<typenum::U0, Output = Greater>,
{
let mut res = Self::zeros();
unsafe {
*res.vget_unchecked_mut(0) = N::one();
}
res
}
/// The column vector with a 1 as its second component, and zero elsewhere.
#[inline]
pub fn y() -> Self
where
R::Value: Cmp<typenum::U1, Output = Greater>,
{
let mut res = Self::zeros();
unsafe {
*res.vget_unchecked_mut(1) = N::one();
}
res
}
/// The column vector with a 1 as its third component, and zero elsewhere.
#[inline]
pub fn z() -> Self
where
R::Value: Cmp<typenum::U2, Output = Greater>,
{
let mut res = Self::zeros();
unsafe {
*res.vget_unchecked_mut(2) = N::one();
}
res
}
/// The column vector with a 1 as its fourth component, and zero elsewhere.
#[inline]
pub fn w() -> Self
where
R::Value: Cmp<typenum::U3, Output = Greater>,
{
let mut res = Self::zeros();
unsafe {
*res.vget_unchecked_mut(3) = N::one();
}
res
}
/// The column vector with a 1 as its fifth component, and zero elsewhere.
#[inline]
pub fn a() -> Self
where
R::Value: Cmp<typenum::U4, Output = Greater>,
{
let mut res = Self::zeros();
unsafe {
*res.vget_unchecked_mut(4) = N::one();
}
res
}
/// The column vector with a 1 as its sixth component, and zero elsewhere.
#[inline]
pub fn b() -> Self
where
R::Value: Cmp<typenum::U5, Output = Greater>,
{
let mut res = Self::zeros();
unsafe {
*res.vget_unchecked_mut(5) = N::one();
}
res
}
/// The unit column vector with a 1 as its first component, and zero elsewhere.
#[inline]
pub fn x_axis() -> Unit<Self>
where
R::Value: Cmp<typenum::U0, Output = Greater>,
{
Unit::new_unchecked(Self::x())
}
/// The unit column vector with a 1 as its second component, and zero elsewhere.
#[inline]
pub fn y_axis() -> Unit<Self>
where
R::Value: Cmp<typenum::U1, Output = Greater>,
{
Unit::new_unchecked(Self::y())
}
/// The unit column vector with a 1 as its third component, and zero elsewhere.
#[inline]
pub fn z_axis() -> Unit<Self>
where
R::Value: Cmp<typenum::U2, Output = Greater>,
{
Unit::new_unchecked(Self::z())
}
/// The unit column vector with a 1 as its fourth component, and zero elsewhere.
#[inline]
pub fn w_axis() -> Unit<Self>
where
R::Value: Cmp<typenum::U3, Output = Greater>,
{
Unit::new_unchecked(Self::w())
}
/// The unit column vector with a 1 as its fifth component, and zero elsewhere.
#[inline]
pub fn a_axis() -> Unit<Self>
where
R::Value: Cmp<typenum::U4, Output = Greater>,
{
Unit::new_unchecked(Self::a())
}
/// The unit column vector with a 1 as its sixth component, and zero elsewhere.
#[inline]
pub fn b_axis() -> Unit<Self>
where
R::Value: Cmp<typenum::U5, Output = Greater>,
{
Unit::new_unchecked(Self::b())
}
}
| 36.787428 | 146 | 0.490737 |
9c37ef24554db4c6e0c72a4fe8ee1d70f16b92b2 | 4,082 | #![allow(dead_code)]
use std::net::Ipv4Addr;
use ipnetwork::Ipv4Network;
pub fn should_client_be_propagated(client: &str, server: &str) -> bool {
let client_ip : Ipv4Addr = client.parse().unwrap();
let server_ip : Ipv4Addr = server.parse().unwrap();
match client_ip.is_private() || client_ip.is_loopback() {
true => are_addresses_in_same_network(client_ip, server_ip),
false => true
}
}
pub fn get_subnet_mask(ip: Ipv4Addr) -> u8 {
let mut i = 0;
let reserved_masks_list: &[u8] = &[8, 12, 16, 32];
let reserved_ip_ranges: &[Ipv4Network] = &[
Ipv4Network::new(Ipv4Addr::new(10, 0, 0, 0), reserved_masks_list[0]).unwrap(),
Ipv4Network::new(Ipv4Addr::new(172, 16, 0, 0), reserved_masks_list[1]).unwrap(),
Ipv4Network::new(Ipv4Addr::new(192, 168, 0, 0), reserved_masks_list[2]).unwrap()
];
for range in reserved_ip_ranges {
if range.contains(ip) {
return reserved_masks_list[i];
}
i = i + 1;
}
reserved_masks_list[i]
}
pub fn are_addresses_in_same_network(left :Ipv4Addr, right: Ipv4Addr) -> bool {
let left_subnet = get_subnet_mask(left);
let right_subnet = get_subnet_mask(right);
let left_network = Ipv4Network::new(left, left_subnet).unwrap();
let right_network = Ipv4Network::new(right, right_subnet).unwrap();
left_network.contains(right_network.network())
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn should_return_true_when_client_and_server_are_global() {
let client = "8.8.8.8";
let server = "42.65.97.120";
assert!(should_client_be_propagated(client, server));
}
#[test]
fn should_return_true_when_same_network() {
let client = "10.0.1.3";
let server = "10.45.65.99";
assert!(should_client_be_propagated(client, server));
}
#[test]
fn should_return_true_when_client_is_global_and_server_is_local_network() {
let client = "8.8.8.8";
let server = "192.168.1.1";
assert!(should_client_be_propagated(client, server));
}
#[test]
fn should_return_false_when_client_is_local_network_and_server_is_global() {
let client = "192.168.0.1";
let server = "8.8.8.8";
assert!(!should_client_be_propagated(client, server));
}
#[test]
fn should_return_true_when_client_is_global_and_server_is_localhost() {
let client = "8.8.8.8";
let server = "127.0.0.1";
assert!(should_client_be_propagated(client, server));
}
#[test]
fn should_return_false_when_client_is_localhost_and_server_is_global() {
let client = "127.0.0.1";
let server = "8.8.8.8";
assert!(!should_client_be_propagated(client, server));
}
#[test]
fn should_return_false_when_client_is_local_network_and_server_is_localhost() {
let client = "192.168.1.1";
let server = "127.0.0.1";
assert!(!should_client_be_propagated(client, server));
}
#[test]
fn should_return_false_when_client_is_localhost_and_server_is_local_network() {
let client = "127.0.0.1";
let server = "192.168.1.1";
assert!(!should_client_be_propagated(client, server));
}
#[test]
fn should_return_true_when_same_local_network() {
let client = "192.168.2.2";
let server = "192.168.1.1";
assert!(should_client_be_propagated(client, server));
}
#[test]
fn should_return_false_when_different_local_network() {
let client = "10.0.0.1";
let server = "192.168.1.1";
assert!(!should_client_be_propagated(client, server));
}
#[test]
fn should_return_true_when_same_172_network() {
let client = "172.16.0.1";
let server = "172.31.254.254";
assert!(should_client_be_propagated(client, server));
}
#[test]
fn should_return_false_when_different_172_network() {
let client = "172.16.0.1";
let server = "172.32.0.1";
assert!(!should_client_be_propagated(client, server));
}
} | 30.691729 | 88 | 0.640127 |
3a02071ff22741025dcfd9bc1909af294bd42f19 | 711 | //! Efficient wasm-implementation of an n-dimensional Array for Javascript
//!
//! Wasm restrictions on Rust (at least at wasm boundary):
//! - No generics
//! - No polymorphims (no traits)
//! - No lifetimes
//!
//! Solutions:
//! - no generics => enum NdarrayUnion for different type parameters for Ndarray
//! - no polymorphism => enum Subview for different behavior of ndarrays
//! - no lifetimes => Using reference counting (std::rc::Rc), unsafe
//!
//! Using enums requires minimally more memory (2 enums = 2 Byte)
mod iter;
mod js_interop;
pub mod ndarray;
mod utils;
use wasm_bindgen::prelude::*;
pub use ndarray::*;
// #[wasm_bindgen(start)]
// pub fn main() {
// utils::set_panic_hook();
// }
| 25.392857 | 80 | 0.686357 |
b96e097b9810e12b13dda54d35ed191146e61788 | 2,279 | // Copyright (c) Microsoft. All rights reserved.
use std::fmt;
use failure::Fail;
use futures::Future;
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub enum AuthType {
None,
Sas,
X509,
}
impl fmt::Display for AuthType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = match *self {
AuthType::None => "None",
AuthType::Sas => "Sas",
AuthType::X509 => "X509",
};
write!(f, "{}", s)
}
}
pub trait Identity {
fn module_id(&self) -> &str;
fn managed_by(&self) -> &str;
fn generation_id(&self) -> &str;
fn auth_type(&self) -> AuthType;
}
pub struct IdentitySpec {
module_id: String,
generation_id: Option<String>,
managed_by: Option<String>,
}
impl IdentitySpec {
pub fn new(module_id: &str) -> IdentitySpec {
IdentitySpec {
module_id: module_id.to_string(),
generation_id: None,
managed_by: None,
}
}
pub fn module_id(&self) -> &str {
&self.module_id
}
pub fn generation_id(&self) -> Option<&String> {
self.generation_id.as_ref()
}
pub fn with_generation_id(mut self, generation_id: String) -> Self {
self.generation_id = Some(generation_id);
self
}
pub fn managed_by(&self) -> Option<&String> {
self.managed_by.as_ref()
}
pub fn with_managed_by(mut self, managed_by: String) -> Self {
self.managed_by = Some(managed_by);
self
}
}
pub trait IdentityManager {
type Identity: Identity;
type Error: Fail;
type CreateFuture: Future<Item = Self::Identity, Error = Self::Error>;
type UpdateFuture: Future<Item = Self::Identity, Error = Self::Error>;
type ListFuture: Future<Item = Vec<Self::Identity>, Error = Self::Error>;
type GetFuture: Future<Item = Option<Self::Identity>, Error = Self::Error>;
type DeleteFuture: Future<Item = (), Error = Self::Error>;
fn create(&mut self, id: IdentitySpec) -> Self::CreateFuture;
fn update(&mut self, id: IdentitySpec) -> Self::UpdateFuture;
fn list(&self) -> Self::ListFuture;
fn get(&self, id: IdentitySpec) -> Self::GetFuture;
fn delete(&mut self, id: IdentitySpec) -> Self::DeleteFuture;
}
| 26.5 | 79 | 0.607723 |
effd07de0840ac71f31114959b9926804eaef681 | 1,886 | #[doc = "Register `OUTLINK_DSCR_BF1` reader"]
pub struct R(crate::R<OUTLINK_DSCR_BF1_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<OUTLINK_DSCR_BF1_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<OUTLINK_DSCR_BF1_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<OUTLINK_DSCR_BF1_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Field `DMA_OUTLINK_DSCR_BF1` reader - The content of current out descriptor data buffer pointer."]
pub struct DMA_OUTLINK_DSCR_BF1_R(crate::FieldReader<u32, u32>);
impl DMA_OUTLINK_DSCR_BF1_R {
#[inline(always)]
pub(crate) fn new(bits: u32) -> Self {
DMA_OUTLINK_DSCR_BF1_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for DMA_OUTLINK_DSCR_BF1_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl R {
#[doc = "Bits 0:31 - The content of current out descriptor data buffer pointer."]
#[inline(always)]
pub fn dma_outlink_dscr_bf1(&self) -> DMA_OUTLINK_DSCR_BF1_R {
DMA_OUTLINK_DSCR_BF1_R::new(self.bits as u32)
}
}
#[doc = "Current SPI DMA TX buffer pointer\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [outlink_dscr_bf1](index.html) module"]
pub struct OUTLINK_DSCR_BF1_SPEC;
impl crate::RegisterSpec for OUTLINK_DSCR_BF1_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [outlink_dscr_bf1::R](R) reader structure"]
impl crate::Readable for OUTLINK_DSCR_BF1_SPEC {
type Reader = R;
}
#[doc = "`reset()` method sets OUTLINK_DSCR_BF1 to value 0"]
impl crate::Resettable for OUTLINK_DSCR_BF1_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 34.925926 | 252 | 0.677625 |
6406139a97c1a02820abfd8fef7b2e51852d1c9c | 1,735 | use crate::features::imgui::extract::ImGuiExtractJobImpl;
use crate::imgui_support::ImGuiDrawData;
use crate::render_contexts::{
RenderJobExtractContext, RenderJobPrepareContext, RenderJobWriteContext,
};
use ash::vk::Extent2D;
use atelier_assets::loader::handle::Handle;
use rafx::assets::MaterialAsset;
use rafx::nodes::ExtractJob;
use rafx::nodes::RenderFeature;
use rafx::nodes::RenderFeatureIndex;
use rafx::resources::{ImageViewResource, ResourceArc, VertexDataLayout, VertexDataSetLayout};
use std::convert::TryInto;
mod extract;
mod prepare;
mod write;
pub fn create_imgui_extract_job() -> Box<dyn ExtractJob<RenderJobExtractContext, RenderJobPrepareContext, RenderJobWriteContext>> {
Box::new(ImGuiExtractJobImpl::new())
}
/// Per-pass "global" data
pub type ImGuiUniformBufferObject = shaders::imgui_vert::ArgsUniform;
lazy_static::lazy_static! {
pub static ref IMGUI_VERTEX_LAYOUT : VertexDataSetLayout = {
use rafx::resources::vk_description::Format;
let vertex = imgui::DrawVert {
pos: Default::default(),
col: Default::default(),
uv: Default::default()
};
VertexDataLayout::build_vertex_layout(&vertex, |builder, vertex| {
builder.add_member(&vertex.pos, "POSITION", Format::R32G32_SFLOAT);
builder.add_member(&vertex.uv, "TEXCOORD", Format::R32G32_SFLOAT);
builder.add_member(&vertex.col, "COLOR", Format::R8G8B8A8_UNORM);
}).into_set()
};
}
rafx::declare_render_feature!(ImGuiRenderFeature, DEBUG_3D_FEATURE_INDEX);
pub(self) struct ExtractedImGuiData {
imgui_draw_data: Option<ImGuiDrawData>,
}
#[derive(Debug)]
struct ImGuiDrawCall {
first_element: u32,
count: u32,
}
| 31.545455 | 131 | 0.721037 |
1dd9552a3b5319adf3e84b971427c8c1ea533cd9 | 83 | enum wrapper<T> { wrapped(T), }
fn main() { let w = wrapped(~[1, 2, 3, 4, 5]); }
| 16.6 | 48 | 0.506024 |
e6673ec38b8587b6d0f5b38570389d14efbb590e | 323 | use crate::generated::CssSyntaxFactory;
use rome_css_syntax::CssLanguage;
use rome_rowan::TreeBuilder;
mod generated;
// Re-exported for tests
#[doc(hidden)]
pub use rome_css_syntax as syntax;
pub type CssSyntaxTreeBuilder = TreeBuilder<'static, CssLanguage, CssSyntaxFactory>;
pub use generated::node_factory as make;
| 23.071429 | 84 | 0.798762 |
75e9f4e6cea298295b6681b56feea16083bb5b2a | 9,938 | use super::Transform;
use crate::{
config::{DataType, TransformConfig, TransformContext},
event::{Event, Value},
};
use serde::{Deserialize, Serialize};
use string_cache::DefaultAtom as Atom;
use std::str::FromStr;
use tracing::field;
#[derive(Deserialize, Serialize, Debug)]
#[serde(deny_unknown_fields)]
pub struct GeoipConfig {
pub source: Atom,
pub database: String,
#[serde(default = "default_geoip_target_field")]
pub target: String,
}
pub struct Geoip {
pub dbreader: maxminddb::Reader<Vec<u8>>,
pub source: Atom,
pub target: String,
}
fn default_geoip_target_field() -> String {
"geoip".to_string()
}
#[typetag::serde(name = "geoip")]
impl TransformConfig for GeoipConfig {
fn build(&self, _cx: TransformContext) -> Result<Box<dyn Transform>, crate::Error> {
let reader = maxminddb::Reader::open_readfile(self.database.clone())?;
Ok(Box::new(Geoip::new(
reader,
self.source.clone(),
self.target.clone(),
)))
}
fn input_type(&self) -> DataType {
DataType::Log
}
fn output_type(&self) -> DataType {
DataType::Log
}
fn transform_type(&self) -> &'static str {
"geoip"
}
}
impl Geoip {
pub fn new(dbreader: maxminddb::Reader<Vec<u8>>, source: Atom, target: String) -> Self {
Geoip {
dbreader,
source,
target,
}
}
}
impl Transform for Geoip {
fn transform(&mut self, mut event: Event) -> Option<Event> {
let target_field = self.target.clone();
let ipaddress = event
.as_log()
.get(&self.source)
.map(|s| s.to_string_lossy());
if let Some(ipaddress) = &ipaddress {
if let Ok(ip) = FromStr::from_str(ipaddress) {
if let Ok(data) = self.dbreader.lookup::<maxminddb::geoip2::City>(ip) {
if let Some(city_names) = data.city.and_then(|c| c.names) {
if let Some(city_name_en) = city_names.get("en") {
event.as_mut_log().insert(
Atom::from(format!("{}.city_name", target_field)),
Value::from(city_name_en.to_string()),
);
}
}
let continent_code = data.continent.and_then(|c| c.code);
if let Some(continent_code) = continent_code {
event.as_mut_log().insert(
Atom::from(format!("{}.continent_code", target_field)),
Value::from(continent_code),
);
}
let iso_code = data.country.and_then(|cy| cy.iso_code);
if let Some(iso_code) = iso_code {
event.as_mut_log().insert(
Atom::from(format!("{}.country_code", target_field)),
Value::from(iso_code),
);
}
let time_zone = data.location.clone().and_then(|loc| loc.time_zone);
if let Some(time_zone) = time_zone {
event.as_mut_log().insert(
Atom::from(format!("{}.timezone", target_field)),
Value::from(time_zone),
);
}
let latitude = data.location.clone().and_then(|loc| loc.latitude);
if let Some(latitude) = latitude {
event.as_mut_log().insert(
Atom::from(format!("{}.latitude", target_field)),
Value::from(latitude.to_string()),
);
}
let longitude = data.location.clone().and_then(|loc| loc.longitude);
if let Some(longitude) = longitude {
event.as_mut_log().insert(
Atom::from(format!("{}.longitude", target_field)),
Value::from(longitude.to_string()),
);
}
let postal_code = data.postal.clone().and_then(|p| p.code);
if let Some(postal_code) = postal_code {
event.as_mut_log().insert(
Atom::from(format!("{}.postal_code", target_field)),
Value::from(postal_code),
);
}
}
} else {
debug!(
message = "IP Address not parsed correctly.",
ipaddr = &field::display(&ipaddress),
);
}
} else {
debug!(
message = "Field does not exist.",
field = self.source.as_ref(),
);
};
// If we have any of the geoip fields missing, we insert
// empty values so that we know that the transform was executed
// but the lookup didn't find the result
let geoip_fields = [
format!("{}.city_name", target_field),
format!("{}.country_code", target_field),
format!("{}.continent_code", target_field),
format!("{}.timezone", target_field),
format!("{}.latitude", target_field),
format!("{}.longitude", target_field),
format!("{}.postal_code", target_field),
];
for field in geoip_fields.iter() {
let e = event.as_mut_log();
if e.get(&Atom::from(field.to_string())).is_none() {
e.insert(Atom::from(field.to_string()), Value::from(""));
}
}
Some(event)
}
}
#[cfg(feature = "transforms-json_parser")]
#[cfg(test)]
mod tests {
use super::Geoip;
use crate::{
event::Event,
transforms::json_parser::{JsonParser, JsonParserConfig},
transforms::Transform,
};
use std::collections::HashMap;
use string_cache::DefaultAtom as Atom;
#[test]
fn geoip_lookup_success() {
let mut parser = JsonParser::from(JsonParserConfig::default());
let event = Event::from(r#"{"remote_addr": "2.125.160.216", "request_path": "foo/bar"}"#);
let event = parser.transform(event).unwrap();
let reader = maxminddb::Reader::open_readfile("tests/data/GeoIP2-City-Test.mmdb").unwrap();
let mut augment = Geoip::new(reader, Atom::from("remote_addr"), "geo".to_string());
let new_event = augment.transform(event).unwrap();
let mut exp_geoip_attr = HashMap::new();
exp_geoip_attr.insert("city_name", "Boxford");
exp_geoip_attr.insert("country_code", "GB");
exp_geoip_attr.insert("continent_code", "EU");
exp_geoip_attr.insert("timezone", "Europe/London");
exp_geoip_attr.insert("latitude", "51.75");
exp_geoip_attr.insert("longitude", "-1.25");
exp_geoip_attr.insert("postal_code", "OX1");
for field in exp_geoip_attr.keys() {
let k = Atom::from(format!("geo.{}", field).to_string());
let geodata = new_event.as_log().get(&k).unwrap().to_string_lossy();
assert_eq!(&geodata, exp_geoip_attr.get(field).expect("field exists"));
}
}
#[test]
fn geoip_lookup_partial_results() {
let mut parser = JsonParser::from(JsonParserConfig::default());
let event = Event::from(r#"{"remote_addr": "67.43.156.9", "request_path": "foo/bar"}"#);
let event = parser.transform(event).unwrap();
let reader = maxminddb::Reader::open_readfile("tests/data/GeoIP2-City-Test.mmdb").unwrap();
let mut augment = Geoip::new(reader, Atom::from("remote_addr"), "geo".to_string());
let new_event = augment.transform(event).unwrap();
let mut exp_geoip_attr = HashMap::new();
exp_geoip_attr.insert("city_name", "");
exp_geoip_attr.insert("country_code", "BT");
exp_geoip_attr.insert("continent_code", "AS");
exp_geoip_attr.insert("timezone", "Asia/Thimphu");
exp_geoip_attr.insert("latitude", "27.5");
exp_geoip_attr.insert("longitude", "90.5");
exp_geoip_attr.insert("postal_code", "");
for field in exp_geoip_attr.keys() {
let k = Atom::from(format!("geo.{}", field).to_string());
let geodata = new_event.as_log().get(&k).unwrap().to_string_lossy();
assert_eq!(&geodata, exp_geoip_attr.get(field).expect("field exists"));
}
}
#[test]
fn geoip_lookup_no_results() {
let mut parser = JsonParser::from(JsonParserConfig::default());
let event = Event::from(r#"{"remote_addr": "10.1.12.1", "request_path": "foo/bar"}"#);
let event = parser.transform(event).unwrap();
let reader = maxminddb::Reader::open_readfile("tests/data/GeoIP2-City-Test.mmdb").unwrap();
let mut augment = Geoip::new(reader, Atom::from("remote_addr"), "geo".to_string());
let new_event = augment.transform(event).unwrap();
let mut exp_geoip_attr = HashMap::new();
exp_geoip_attr.insert("city_name", "");
exp_geoip_attr.insert("country_code", "");
exp_geoip_attr.insert("continent_code", "");
exp_geoip_attr.insert("timezone", "");
exp_geoip_attr.insert("latitude", "");
exp_geoip_attr.insert("longitude", "");
exp_geoip_attr.insert("postal_code", "");
for field in exp_geoip_attr.keys() {
let k = Atom::from(format!("geo.{}", field).to_string());
println!("Looking for {:?}", k);
let geodata = new_event.as_log().get(&k).unwrap().to_string_lossy();
assert_eq!(&geodata, exp_geoip_attr.get(field).expect("fields exists"));
}
}
}
| 38.223077 | 99 | 0.535118 |
fee014ea7ef7d09bd0849f8bd60a1aae2983026c | 30,384 | //! Provides the `RustIrDatabase` implementation for `chalk-solve`
//!
//! The purpose of the `chalk_solve::RustIrDatabase` is to get data about
//! specific types, such as bounds, where clauses, or fields. This file contains
//! the minimal logic to assemble the types for `chalk-solve` by calling out to
//! either the `TyCtxt` (for information about types) or
//! `crate::chalk::lowering` (to lower rustc types into Chalk types).
use rustc_middle::traits::ChalkRustInterner as RustInterner;
use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
use rustc_middle::ty::{self, AssocItemContainer, AssocKind, TyCtxt, TypeFoldable};
use rustc_ast::ast;
use rustc_attr as attr;
use rustc_hir::def_id::DefId;
use rustc_span::symbol::sym;
use std::fmt;
use std::sync::Arc;
use crate::chalk::lowering::LowerInto;
pub struct RustIrDatabase<'tcx> {
pub(crate) interner: RustInterner<'tcx>,
}
impl fmt::Debug for RustIrDatabase<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "RustIrDatabase")
}
}
impl<'tcx> RustIrDatabase<'tcx> {
fn where_clauses_for(
&self,
def_id: DefId,
bound_vars: SubstsRef<'tcx>,
) -> Vec<chalk_ir::QuantifiedWhereClause<RustInterner<'tcx>>> {
let predicates = self.interner.tcx.predicates_defined_on(def_id).predicates;
predicates
.iter()
.map(|(wc, _)| wc.subst(self.interner.tcx, bound_vars))
.filter_map(|wc| LowerInto::<
Option<chalk_ir::QuantifiedWhereClause<RustInterner<'tcx>>>
>::lower_into(wc, self.interner)).collect()
}
fn bounds_for<T>(&self, def_id: DefId, bound_vars: SubstsRef<'tcx>) -> Vec<T>
where
ty::Predicate<'tcx>: LowerInto<'tcx, std::option::Option<T>>,
{
self.interner
.tcx
.explicit_item_bounds(def_id)
.iter()
.map(|(bound, _)| bound.subst(self.interner.tcx, &bound_vars))
.filter_map(|bound| LowerInto::<Option<_>>::lower_into(bound, self.interner))
.collect()
}
}
impl<'tcx> chalk_solve::RustIrDatabase<RustInterner<'tcx>> for RustIrDatabase<'tcx> {
fn interner(&self) -> RustInterner<'tcx> {
self.interner
}
fn associated_ty_data(
&self,
assoc_type_id: chalk_ir::AssocTypeId<RustInterner<'tcx>>,
) -> Arc<chalk_solve::rust_ir::AssociatedTyDatum<RustInterner<'tcx>>> {
let def_id = assoc_type_id.0;
let assoc_item = self.interner.tcx.associated_item(def_id);
let AssocItemContainer::TraitContainer(trait_def_id) = assoc_item.container else {
unimplemented!("Not possible??");
};
match assoc_item.kind {
AssocKind::Type => {}
_ => unimplemented!("Not possible??"),
}
let bound_vars = bound_vars_for_item(self.interner.tcx, def_id);
let binders = binders_for(self.interner, bound_vars);
let where_clauses = self.where_clauses_for(def_id, bound_vars);
let bounds = self.bounds_for(def_id, bound_vars);
Arc::new(chalk_solve::rust_ir::AssociatedTyDatum {
trait_id: chalk_ir::TraitId(trait_def_id),
id: assoc_type_id,
name: (),
binders: chalk_ir::Binders::new(
binders,
chalk_solve::rust_ir::AssociatedTyDatumBound { bounds, where_clauses },
),
})
}
fn trait_datum(
&self,
trait_id: chalk_ir::TraitId<RustInterner<'tcx>>,
) -> Arc<chalk_solve::rust_ir::TraitDatum<RustInterner<'tcx>>> {
let def_id = trait_id.0;
let trait_def = self.interner.tcx.trait_def(def_id);
let bound_vars = bound_vars_for_item(self.interner.tcx, def_id);
let binders = binders_for(self.interner, bound_vars);
let where_clauses = self.where_clauses_for(def_id, bound_vars);
let associated_ty_ids: Vec<_> = self
.interner
.tcx
.associated_items(def_id)
.in_definition_order()
.filter(|i| i.kind == AssocKind::Type)
.map(|i| chalk_ir::AssocTypeId(i.def_id))
.collect();
let lang_items = self.interner.tcx.lang_items();
let well_known = if lang_items.sized_trait() == Some(def_id) {
Some(chalk_solve::rust_ir::WellKnownTrait::Sized)
} else if lang_items.copy_trait() == Some(def_id) {
Some(chalk_solve::rust_ir::WellKnownTrait::Copy)
} else if lang_items.clone_trait() == Some(def_id) {
Some(chalk_solve::rust_ir::WellKnownTrait::Clone)
} else if lang_items.drop_trait() == Some(def_id) {
Some(chalk_solve::rust_ir::WellKnownTrait::Drop)
} else if lang_items.fn_trait() == Some(def_id) {
Some(chalk_solve::rust_ir::WellKnownTrait::Fn)
} else if lang_items.fn_once_trait() == Some(def_id) {
Some(chalk_solve::rust_ir::WellKnownTrait::FnOnce)
} else if lang_items.fn_mut_trait() == Some(def_id) {
Some(chalk_solve::rust_ir::WellKnownTrait::FnMut)
} else if lang_items.unsize_trait() == Some(def_id) {
Some(chalk_solve::rust_ir::WellKnownTrait::Unsize)
} else if lang_items.unpin_trait() == Some(def_id) {
Some(chalk_solve::rust_ir::WellKnownTrait::Unpin)
} else if lang_items.coerce_unsized_trait() == Some(def_id) {
Some(chalk_solve::rust_ir::WellKnownTrait::CoerceUnsized)
} else {
None
};
Arc::new(chalk_solve::rust_ir::TraitDatum {
id: trait_id,
binders: chalk_ir::Binders::new(
binders,
chalk_solve::rust_ir::TraitDatumBound { where_clauses },
),
flags: chalk_solve::rust_ir::TraitFlags {
auto: trait_def.has_auto_impl,
marker: trait_def.is_marker,
upstream: !def_id.is_local(),
fundamental: self.interner.tcx.has_attr(def_id, sym::fundamental),
non_enumerable: true,
coinductive: false,
},
associated_ty_ids,
well_known,
})
}
fn adt_datum(
&self,
adt_id: chalk_ir::AdtId<RustInterner<'tcx>>,
) -> Arc<chalk_solve::rust_ir::AdtDatum<RustInterner<'tcx>>> {
let adt_def = adt_id.0;
let bound_vars = bound_vars_for_item(self.interner.tcx, adt_def.did);
let binders = binders_for(self.interner, bound_vars);
let where_clauses = self.where_clauses_for(adt_def.did, bound_vars);
let variants: Vec<_> = adt_def
.variants
.iter()
.map(|variant| chalk_solve::rust_ir::AdtVariantDatum {
fields: variant
.fields
.iter()
.map(|field| field.ty(self.interner.tcx, bound_vars).lower_into(self.interner))
.collect(),
})
.collect();
Arc::new(chalk_solve::rust_ir::AdtDatum {
id: adt_id,
binders: chalk_ir::Binders::new(
binders,
chalk_solve::rust_ir::AdtDatumBound { variants, where_clauses },
),
flags: chalk_solve::rust_ir::AdtFlags {
upstream: !adt_def.did.is_local(),
fundamental: adt_def.is_fundamental(),
phantom_data: adt_def.is_phantom_data(),
},
kind: match adt_def.adt_kind() {
ty::AdtKind::Struct => chalk_solve::rust_ir::AdtKind::Struct,
ty::AdtKind::Union => chalk_solve::rust_ir::AdtKind::Union,
ty::AdtKind::Enum => chalk_solve::rust_ir::AdtKind::Enum,
},
})
}
fn adt_repr(
&self,
adt_id: chalk_ir::AdtId<RustInterner<'tcx>>,
) -> Arc<chalk_solve::rust_ir::AdtRepr<RustInterner<'tcx>>> {
let adt_def = adt_id.0;
let int = |i| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Int(i)).intern(self.interner);
let uint = |i| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Uint(i)).intern(self.interner);
Arc::new(chalk_solve::rust_ir::AdtRepr {
c: adt_def.repr.c(),
packed: adt_def.repr.packed(),
int: adt_def.repr.int.map(|i| match i {
attr::IntType::SignedInt(ty) => match ty {
ast::IntTy::Isize => int(chalk_ir::IntTy::Isize),
ast::IntTy::I8 => int(chalk_ir::IntTy::I8),
ast::IntTy::I16 => int(chalk_ir::IntTy::I16),
ast::IntTy::I32 => int(chalk_ir::IntTy::I32),
ast::IntTy::I64 => int(chalk_ir::IntTy::I64),
ast::IntTy::I128 => int(chalk_ir::IntTy::I128),
},
attr::IntType::UnsignedInt(ty) => match ty {
ast::UintTy::Usize => uint(chalk_ir::UintTy::Usize),
ast::UintTy::U8 => uint(chalk_ir::UintTy::U8),
ast::UintTy::U16 => uint(chalk_ir::UintTy::U16),
ast::UintTy::U32 => uint(chalk_ir::UintTy::U32),
ast::UintTy::U64 => uint(chalk_ir::UintTy::U64),
ast::UintTy::U128 => uint(chalk_ir::UintTy::U128),
},
}),
})
}
fn fn_def_datum(
&self,
fn_def_id: chalk_ir::FnDefId<RustInterner<'tcx>>,
) -> Arc<chalk_solve::rust_ir::FnDefDatum<RustInterner<'tcx>>> {
let def_id = fn_def_id.0;
let bound_vars = bound_vars_for_item(self.interner.tcx, def_id);
let binders = binders_for(self.interner, bound_vars);
let where_clauses = self.where_clauses_for(def_id, bound_vars);
let sig = self.interner.tcx.fn_sig(def_id);
let (inputs_and_output, iobinders, _) = crate::chalk::lowering::collect_bound_vars(
self.interner,
self.interner.tcx,
sig.inputs_and_output().subst(self.interner.tcx, bound_vars),
);
let argument_types = inputs_and_output[..inputs_and_output.len() - 1]
.iter()
.map(|t| t.subst(self.interner.tcx, &bound_vars).lower_into(self.interner))
.collect();
let return_type = inputs_and_output[inputs_and_output.len() - 1]
.subst(self.interner.tcx, &bound_vars)
.lower_into(self.interner);
let bound = chalk_solve::rust_ir::FnDefDatumBound {
inputs_and_output: chalk_ir::Binders::new(
iobinders,
chalk_solve::rust_ir::FnDefInputsAndOutputDatum { argument_types, return_type },
),
where_clauses,
};
Arc::new(chalk_solve::rust_ir::FnDefDatum {
id: fn_def_id,
sig: sig.lower_into(self.interner),
binders: chalk_ir::Binders::new(binders, bound),
})
}
fn impl_datum(
&self,
impl_id: chalk_ir::ImplId<RustInterner<'tcx>>,
) -> Arc<chalk_solve::rust_ir::ImplDatum<RustInterner<'tcx>>> {
let def_id = impl_id.0;
let bound_vars = bound_vars_for_item(self.interner.tcx, def_id);
let binders = binders_for(self.interner, bound_vars);
let trait_ref = self.interner.tcx.impl_trait_ref(def_id).expect("not an impl");
let trait_ref = trait_ref.subst(self.interner.tcx, bound_vars);
let where_clauses = self.where_clauses_for(def_id, bound_vars);
let value = chalk_solve::rust_ir::ImplDatumBound {
trait_ref: trait_ref.lower_into(self.interner),
where_clauses,
};
let associated_ty_value_ids: Vec<_> = self
.interner
.tcx
.associated_items(def_id)
.in_definition_order()
.filter(|i| i.kind == AssocKind::Type)
.map(|i| chalk_solve::rust_ir::AssociatedTyValueId(i.def_id))
.collect();
Arc::new(chalk_solve::rust_ir::ImplDatum {
polarity: self.interner.tcx.impl_polarity(def_id).lower_into(self.interner),
binders: chalk_ir::Binders::new(binders, value),
impl_type: chalk_solve::rust_ir::ImplType::Local,
associated_ty_value_ids,
})
}
fn impls_for_trait(
&self,
trait_id: chalk_ir::TraitId<RustInterner<'tcx>>,
parameters: &[chalk_ir::GenericArg<RustInterner<'tcx>>],
_binders: &chalk_ir::CanonicalVarKinds<RustInterner<'tcx>>,
) -> Vec<chalk_ir::ImplId<RustInterner<'tcx>>> {
let def_id = trait_id.0;
// FIXME(chalk): use TraitDef::for_each_relevant_impl, but that will
// require us to be able to interconvert `Ty<'tcx>`, and we're
// not there yet.
let all_impls = self.interner.tcx.all_impls(def_id);
let matched_impls = all_impls.filter(|impl_def_id| {
use chalk_ir::could_match::CouldMatch;
let trait_ref = self.interner.tcx.impl_trait_ref(*impl_def_id).unwrap();
let bound_vars = bound_vars_for_item(self.interner.tcx, *impl_def_id);
let self_ty = trait_ref.self_ty();
let self_ty = self_ty.subst(self.interner.tcx, bound_vars);
let lowered_ty = self_ty.lower_into(self.interner);
parameters[0].assert_ty_ref(self.interner).could_match(
self.interner,
self.unification_database(),
&lowered_ty,
)
});
let impls = matched_impls.map(chalk_ir::ImplId).collect();
impls
}
fn impl_provided_for(
&self,
auto_trait_id: chalk_ir::TraitId<RustInterner<'tcx>>,
chalk_ty: &chalk_ir::TyKind<RustInterner<'tcx>>,
) -> bool {
use chalk_ir::Scalar::*;
use chalk_ir::TyKind::*;
let trait_def_id = auto_trait_id.0;
let all_impls = self.interner.tcx.all_impls(trait_def_id);
for impl_def_id in all_impls {
let trait_ref = self.interner.tcx.impl_trait_ref(impl_def_id).unwrap();
let self_ty = trait_ref.self_ty();
let provides = match (self_ty.kind(), chalk_ty) {
(&ty::Adt(impl_adt_def, ..), Adt(id, ..)) => impl_adt_def.did == id.0.did,
(_, AssociatedType(_ty_id, ..)) => {
// FIXME(chalk): See https://github.com/rust-lang/rust/pull/77152#discussion_r494484774
false
}
(ty::Bool, Scalar(Bool)) => true,
(ty::Char, Scalar(Char)) => true,
(ty::Int(ty1), Scalar(Int(ty2))) => matches!(
(ty1, ty2),
(ty::IntTy::Isize, chalk_ir::IntTy::Isize)
| (ty::IntTy::I8, chalk_ir::IntTy::I8)
| (ty::IntTy::I16, chalk_ir::IntTy::I16)
| (ty::IntTy::I32, chalk_ir::IntTy::I32)
| (ty::IntTy::I64, chalk_ir::IntTy::I64)
| (ty::IntTy::I128, chalk_ir::IntTy::I128)
),
(ty::Uint(ty1), Scalar(Uint(ty2))) => matches!(
(ty1, ty2),
(ty::UintTy::Usize, chalk_ir::UintTy::Usize)
| (ty::UintTy::U8, chalk_ir::UintTy::U8)
| (ty::UintTy::U16, chalk_ir::UintTy::U16)
| (ty::UintTy::U32, chalk_ir::UintTy::U32)
| (ty::UintTy::U64, chalk_ir::UintTy::U64)
| (ty::UintTy::U128, chalk_ir::UintTy::U128)
),
(ty::Float(ty1), Scalar(Float(ty2))) => matches!(
(ty1, ty2),
(ty::FloatTy::F32, chalk_ir::FloatTy::F32)
| (ty::FloatTy::F64, chalk_ir::FloatTy::F64)
),
(&ty::Tuple(substs), Tuple(len, _)) => substs.len() == *len,
(&ty::Array(..), Array(..)) => true,
(&ty::Slice(..), Slice(..)) => true,
(&ty::RawPtr(type_and_mut), Raw(mutability, _)) => {
match (type_and_mut.mutbl, mutability) {
(ast::Mutability::Mut, chalk_ir::Mutability::Mut) => true,
(ast::Mutability::Mut, chalk_ir::Mutability::Not) => false,
(ast::Mutability::Not, chalk_ir::Mutability::Mut) => false,
(ast::Mutability::Not, chalk_ir::Mutability::Not) => true,
}
}
(&ty::Ref(.., mutability1), Ref(mutability2, ..)) => {
match (mutability1, mutability2) {
(ast::Mutability::Mut, chalk_ir::Mutability::Mut) => true,
(ast::Mutability::Mut, chalk_ir::Mutability::Not) => false,
(ast::Mutability::Not, chalk_ir::Mutability::Mut) => false,
(ast::Mutability::Not, chalk_ir::Mutability::Not) => true,
}
}
(&ty::Opaque(def_id, ..), OpaqueType(opaque_ty_id, ..)) => def_id == opaque_ty_id.0,
(&ty::FnDef(def_id, ..), FnDef(fn_def_id, ..)) => def_id == fn_def_id.0,
(&ty::Str, Str) => true,
(&ty::Never, Never) => true,
(&ty::Closure(def_id, ..), Closure(closure_id, _)) => def_id == closure_id.0,
(&ty::Foreign(def_id), Foreign(foreign_def_id)) => def_id == foreign_def_id.0,
(&ty::Error(..), Error) => false,
_ => false,
};
if provides {
return true;
}
}
false
}
fn associated_ty_value(
&self,
associated_ty_id: chalk_solve::rust_ir::AssociatedTyValueId<RustInterner<'tcx>>,
) -> Arc<chalk_solve::rust_ir::AssociatedTyValue<RustInterner<'tcx>>> {
let def_id = associated_ty_id.0;
let assoc_item = self.interner.tcx.associated_item(def_id);
let impl_id = assoc_item.container.id();
match assoc_item.kind {
AssocKind::Type => {}
_ => unimplemented!("Not possible??"),
}
let trait_item_id = assoc_item.trait_item_def_id.expect("assoc_ty with no trait version");
let bound_vars = bound_vars_for_item(self.interner.tcx, def_id);
let binders = binders_for(self.interner, bound_vars);
let ty = self
.interner
.tcx
.type_of(def_id)
.subst(self.interner.tcx, bound_vars)
.lower_into(self.interner);
Arc::new(chalk_solve::rust_ir::AssociatedTyValue {
impl_id: chalk_ir::ImplId(impl_id),
associated_ty_id: chalk_ir::AssocTypeId(trait_item_id),
value: chalk_ir::Binders::new(
binders,
chalk_solve::rust_ir::AssociatedTyValueBound { ty },
),
})
}
fn custom_clauses(&self) -> Vec<chalk_ir::ProgramClause<RustInterner<'tcx>>> {
vec![]
}
fn local_impls_to_coherence_check(
&self,
_trait_id: chalk_ir::TraitId<RustInterner<'tcx>>,
) -> Vec<chalk_ir::ImplId<RustInterner<'tcx>>> {
unimplemented!()
}
fn opaque_ty_data(
&self,
opaque_ty_id: chalk_ir::OpaqueTyId<RustInterner<'tcx>>,
) -> Arc<chalk_solve::rust_ir::OpaqueTyDatum<RustInterner<'tcx>>> {
let bound_vars = ty::fold::shift_vars(
self.interner.tcx,
bound_vars_for_item(self.interner.tcx, opaque_ty_id.0),
1,
);
let where_clauses = self.where_clauses_for(opaque_ty_id.0, bound_vars);
let identity_substs = InternalSubsts::identity_for_item(self.interner.tcx, opaque_ty_id.0);
let bounds =
self.interner
.tcx
.explicit_item_bounds(opaque_ty_id.0)
.iter()
.map(|(bound, _)| bound.subst(self.interner.tcx, &bound_vars))
.map(|bound| {
bound.fold_with(&mut ty::fold::BottomUpFolder {
tcx: self.interner.tcx,
ty_op: |ty| {
if let ty::Opaque(def_id, substs) = *ty.kind() {
if def_id == opaque_ty_id.0 && substs == identity_substs {
return self.interner.tcx.mk_ty(ty::Bound(
ty::INNERMOST,
ty::BoundTy::from(ty::BoundVar::from_u32(0)),
));
}
}
ty
},
lt_op: |lt| lt,
ct_op: |ct| ct,
})
})
.filter_map(|bound| {
LowerInto::<
Option<chalk_ir::QuantifiedWhereClause<RustInterner<'tcx>>>
>::lower_into(bound, self.interner)
})
.collect();
// Binder for the bound variable representing the concrete impl Trait type.
let existential_binder = chalk_ir::VariableKinds::from1(
self.interner,
chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General),
);
let value = chalk_solve::rust_ir::OpaqueTyDatumBound {
bounds: chalk_ir::Binders::new(existential_binder.clone(), bounds),
where_clauses: chalk_ir::Binders::new(existential_binder, where_clauses),
};
let binders = binders_for(self.interner, bound_vars);
Arc::new(chalk_solve::rust_ir::OpaqueTyDatum {
opaque_ty_id,
bound: chalk_ir::Binders::new(binders, value),
})
}
fn program_clauses_for_env(
&self,
environment: &chalk_ir::Environment<RustInterner<'tcx>>,
) -> chalk_ir::ProgramClauses<RustInterner<'tcx>> {
chalk_solve::program_clauses_for_env(self, environment)
}
fn well_known_trait_id(
&self,
well_known_trait: chalk_solve::rust_ir::WellKnownTrait,
) -> Option<chalk_ir::TraitId<RustInterner<'tcx>>> {
use chalk_solve::rust_ir::WellKnownTrait::*;
let lang_items = self.interner.tcx.lang_items();
let def_id = match well_known_trait {
Sized => lang_items.sized_trait(),
Copy => lang_items.copy_trait(),
Clone => lang_items.clone_trait(),
Drop => lang_items.drop_trait(),
Fn => lang_items.fn_trait(),
FnMut => lang_items.fn_mut_trait(),
FnOnce => lang_items.fn_once_trait(),
Generator => lang_items.gen_trait(),
Unsize => lang_items.unsize_trait(),
Unpin => lang_items.unpin_trait(),
CoerceUnsized => lang_items.coerce_unsized_trait(),
DiscriminantKind => lang_items.discriminant_kind_trait(),
};
def_id.map(chalk_ir::TraitId)
}
fn is_object_safe(&self, trait_id: chalk_ir::TraitId<RustInterner<'tcx>>) -> bool {
self.interner.tcx.is_object_safe(trait_id.0)
}
fn hidden_opaque_type(
&self,
_id: chalk_ir::OpaqueTyId<RustInterner<'tcx>>,
) -> chalk_ir::Ty<RustInterner<'tcx>> {
// FIXME(chalk): actually get hidden ty
self.interner
.tcx
.mk_ty(ty::Tuple(self.interner.tcx.intern_substs(&[])))
.lower_into(self.interner)
}
fn closure_kind(
&self,
_closure_id: chalk_ir::ClosureId<RustInterner<'tcx>>,
substs: &chalk_ir::Substitution<RustInterner<'tcx>>,
) -> chalk_solve::rust_ir::ClosureKind {
let kind = &substs.as_slice(self.interner)[substs.len(self.interner) - 3];
match kind.assert_ty_ref(self.interner).kind(self.interner) {
chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Int(int_ty)) => match int_ty {
chalk_ir::IntTy::I8 => chalk_solve::rust_ir::ClosureKind::Fn,
chalk_ir::IntTy::I16 => chalk_solve::rust_ir::ClosureKind::FnMut,
chalk_ir::IntTy::I32 => chalk_solve::rust_ir::ClosureKind::FnOnce,
_ => bug!("bad closure kind"),
},
_ => bug!("bad closure kind"),
}
}
fn closure_inputs_and_output(
&self,
_closure_id: chalk_ir::ClosureId<RustInterner<'tcx>>,
substs: &chalk_ir::Substitution<RustInterner<'tcx>>,
) -> chalk_ir::Binders<chalk_solve::rust_ir::FnDefInputsAndOutputDatum<RustInterner<'tcx>>>
{
let sig = &substs.as_slice(self.interner)[substs.len(self.interner) - 2];
match sig.assert_ty_ref(self.interner).kind(self.interner) {
chalk_ir::TyKind::Function(f) => {
let substitution = f.substitution.0.as_slice(self.interner);
let return_type = substitution.last().unwrap().assert_ty_ref(self.interner).clone();
// Closure arguments are tupled
let argument_tuple = substitution[0].assert_ty_ref(self.interner);
let argument_types = match argument_tuple.kind(self.interner) {
chalk_ir::TyKind::Tuple(_len, substitution) => substitution
.iter(self.interner)
.map(|arg| arg.assert_ty_ref(self.interner))
.cloned()
.collect(),
_ => bug!("Expecting closure FnSig args to be tupled."),
};
chalk_ir::Binders::new(
chalk_ir::VariableKinds::from_iter(
self.interner,
(0..f.num_binders).map(|_| chalk_ir::VariableKind::Lifetime),
),
chalk_solve::rust_ir::FnDefInputsAndOutputDatum { argument_types, return_type },
)
}
_ => panic!("Invalid sig."),
}
}
fn closure_upvars(
&self,
_closure_id: chalk_ir::ClosureId<RustInterner<'tcx>>,
substs: &chalk_ir::Substitution<RustInterner<'tcx>>,
) -> chalk_ir::Binders<chalk_ir::Ty<RustInterner<'tcx>>> {
let inputs_and_output = self.closure_inputs_and_output(_closure_id, substs);
let tuple = substs.as_slice(self.interner).last().unwrap().assert_ty_ref(self.interner);
inputs_and_output.map_ref(|_| tuple.clone())
}
fn closure_fn_substitution(
&self,
_closure_id: chalk_ir::ClosureId<RustInterner<'tcx>>,
substs: &chalk_ir::Substitution<RustInterner<'tcx>>,
) -> chalk_ir::Substitution<RustInterner<'tcx>> {
let substitution = &substs.as_slice(self.interner)[0..substs.len(self.interner) - 3];
chalk_ir::Substitution::from_iter(self.interner, substitution)
}
fn generator_datum(
&self,
_generator_id: chalk_ir::GeneratorId<RustInterner<'tcx>>,
) -> Arc<chalk_solve::rust_ir::GeneratorDatum<RustInterner<'tcx>>> {
unimplemented!()
}
fn generator_witness_datum(
&self,
_generator_id: chalk_ir::GeneratorId<RustInterner<'tcx>>,
) -> Arc<chalk_solve::rust_ir::GeneratorWitnessDatum<RustInterner<'tcx>>> {
unimplemented!()
}
fn unification_database(&self) -> &dyn chalk_ir::UnificationDatabase<RustInterner<'tcx>> {
self
}
fn discriminant_type(
&self,
_: chalk_ir::Ty<RustInterner<'tcx>>,
) -> chalk_ir::Ty<RustInterner<'tcx>> {
unimplemented!()
}
}
impl<'tcx> chalk_ir::UnificationDatabase<RustInterner<'tcx>> for RustIrDatabase<'tcx> {
fn fn_def_variance(
&self,
def_id: chalk_ir::FnDefId<RustInterner<'tcx>>,
) -> chalk_ir::Variances<RustInterner<'tcx>> {
let variances = self.interner.tcx.variances_of(def_id.0);
chalk_ir::Variances::from_iter(
self.interner,
variances.iter().map(|v| v.lower_into(self.interner)),
)
}
fn adt_variance(
&self,
adt_id: chalk_ir::AdtId<RustInterner<'tcx>>,
) -> chalk_ir::Variances<RustInterner<'tcx>> {
let variances = self.interner.tcx.variances_of(adt_id.0.did);
chalk_ir::Variances::from_iter(
self.interner,
variances.iter().map(|v| v.lower_into(self.interner)),
)
}
}
/// Creates an `InternalSubsts` that maps each generic parameter to a higher-ranked
/// var bound at index `0`. For types, we use a `BoundVar` index equal to
/// the type parameter index. For regions, we use the `BoundRegionKind::BrNamed`
/// variant (which has a `DefId`).
fn bound_vars_for_item<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> SubstsRef<'tcx> {
InternalSubsts::for_item(tcx, def_id, |param, substs| match param.kind {
ty::GenericParamDefKind::Type { .. } => tcx
.mk_ty(ty::Bound(
ty::INNERMOST,
ty::BoundTy {
var: ty::BoundVar::from(param.index),
kind: ty::BoundTyKind::Param(param.name),
},
))
.into(),
ty::GenericParamDefKind::Lifetime => {
let br = ty::BoundRegion {
var: ty::BoundVar::from_usize(substs.len()),
kind: ty::BrAnon(substs.len() as u32),
};
tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)).into()
}
ty::GenericParamDefKind::Const { .. } => tcx
.mk_const(ty::ConstS {
val: ty::ConstKind::Bound(ty::INNERMOST, ty::BoundVar::from(param.index)),
ty: tcx.type_of(param.def_id),
})
.into(),
})
}
fn binders_for<'tcx>(
interner: RustInterner<'tcx>,
bound_vars: SubstsRef<'tcx>,
) -> chalk_ir::VariableKinds<RustInterner<'tcx>> {
chalk_ir::VariableKinds::from_iter(
interner,
bound_vars.iter().map(|arg| match arg.unpack() {
ty::subst::GenericArgKind::Lifetime(_re) => chalk_ir::VariableKind::Lifetime,
ty::subst::GenericArgKind::Type(_ty) => {
chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General)
}
ty::subst::GenericArgKind::Const(c) => {
chalk_ir::VariableKind::Const(c.ty().lower_into(interner))
}
}),
)
}
| 40.948787 | 107 | 0.561809 |
f5962ee3e724d10d5d640c65f2eccddead248c55 | 17,824 |
// TODO: Remove all #[allow(dead_code)]
use ash::vk;
use gs::prelude::*;
use gsvk::prelude::common::*;
use gsvk::prelude::buffer::*;
use gsvk::prelude::image::*;
use gsvk::prelude::descriptor::*;
use gsvk::prelude::pipeline::*;
use gsvk::prelude::command::*;
use gsvk::prelude::sync::*;
use gsvk::prelude::api::*;
use gsma::data_size;
use vk_examples::{ Y_CORRECTION, DEFAULT_CLEAR_COLOR };
use super::data::{ Vertex, UBOMatrices, CubeResources };
use nalgebra::{ Matrix4, Point3, Vector3 };
use std::path::Path;
const VERTEX_SHADER_SOURCE_PATH : &'static str = "src/descriptorsets/cube.vert.glsl";
const FRAGMENT_SHADER_SOURCE_PATH: &'static str = "src/descriptorsets/cube.frag.glsl";
const MODEL_PATH: &'static str = "models/cube.gltf";
const TEXTURE1_PATH: &'static str = "textures/crate01_color_height_rgba.png";
const TEXTURE2_PATH: &'static str = "textures/crate02_color_height_rgba.png";
pub struct VulkanExample {
model_entity: GsglTFEntity,
cubes: CubeResources,
#[allow(dead_code)]
model_repository: GsBufferRepository<Device>,
ubo_storage : GsBufferRepository<Host>,
#[allow(dead_code)]
desc_storage: GsDescriptorRepository,
pipeline: GsPipeline<Graphics>,
depth_attachment: GsDSAttachment,
#[allow(dead_code)]
image_storage : GsImageRepository<Device>,
command_pool : GsCommandPool,
command_buffers: Vec<GsCommandBuffer>,
view_port: CmdViewportInfo,
scissor : CmdScissorInfo,
camera: GsFlightCamera,
present_availables: Vec<GsSemaphore>,
is_toggle_event: bool,
}
impl VulkanExample {
pub fn new(initializer: AssetInitializer) -> GsResult<VulkanExample> {
let screen_dimension = initializer.screen_dimension();
let mut camera = GsCameraFactory::config()
.place_at(Point3::new(0.0, 0.0, 2.5))
.screen_aspect_ratio(screen_dimension.width as f32 / screen_dimension.height as f32)
.into_flight_camera();
camera.set_move_speed(5.0);
let view_port = CmdViewportInfo::from(screen_dimension);
let scissor = CmdScissorInfo::from(screen_dimension);
let ubo_data = [
vec![
UBOMatrices {
projection: camera.proj_matrix(),
model : Matrix4::new_translation(&Vector3::new(-2.0, 0.0, 0.0)),
view : camera.view_matrix(),
y_correction: Y_CORRECTION.clone(),
},
],
vec![
UBOMatrices {
projection: camera.proj_matrix(),
model : Matrix4::new_translation(&Vector3::new(1.5, 0.5, 0.0)),
view : camera.view_matrix(),
y_correction: Y_CORRECTION.clone(),
},
],
];
let (model_entity, model_repository, ubo_buffers, ubo_storage) = {
VulkanExample::load_model(&initializer, &ubo_data)
}?;
let (sample_images, depth_attachment, image_storage) = {
VulkanExample::image(&initializer, screen_dimension)
}?;
let (ubo_sets, desc_storage) = {
VulkanExample::ubo(&initializer, &model_entity, &sample_images, &ubo_buffers)
}?;
let push_consts = model_entity.pushconst_description(GsPipelineStage::VERTEX);
let pipeline = {
VulkanExample::pipelines(&initializer, push_consts, &[&ubo_sets[0], &ubo_sets[1]], &depth_attachment)
}?;
let present_availables = {
VulkanExample::sync_resources(&initializer, &pipeline)
}?;
let cubes = CubeResources {
matrices : ubo_data,
texture : sample_images,
ubo_set : ubo_sets,
ubo_buffer: ubo_buffers,
};
let (command_pool, command_buffers) = {
VulkanExample::commands(&initializer, &pipeline, &model_entity, &cubes, &view_port, &scissor)
}?;
let procedure = VulkanExample {
model_entity, cubes,
model_repository, ubo_storage, desc_storage,
pipeline,
depth_attachment, image_storage,
command_pool, command_buffers,
camera, view_port, scissor,
present_availables,
is_toggle_event: false,
};
Ok(procedure)
}
fn update_uniforms(&mut self) -> GsResult<()> {
// Update UBOMatrices uniform block.
self.cubes.matrices[0][0].view = self.camera.view_matrix();
self.cubes.matrices[1][0].view = self.camera.view_matrix();
// Update data in memory.
self.ubo_storage.data_updater()?
.update(&self.cubes.ubo_buffer[0], &self.cubes.matrices[0])?
.update(&self.cubes.ubo_buffer[1], &self.cubes.matrices[1])?
.finish()?;
Ok(())
}
fn load_model(initializer: &AssetInitializer, ubo_data: &[Vec<UBOMatrices>; 2]) -> GsResult<(GsglTFEntity, GsBufferRepository<Device>, [GsUniformBuffer; 2], GsBufferRepository<Host>)> {
let mut model_allocator = GsBufferAllocator::new(initializer, BufferStorageType::DEVICE);
let mut ubo_allocator = GsBufferAllocator::new(initializer, BufferStorageType::HOST);
// allocate uniform data buffer.
// refer to `layout (set = 0, binding = 0) uniform UBO` in cube.vert.
let ubo_matrix_info1 = GsUniformBuffer::new(0, data_size!(UBOMatrices));
let ubo_matrix_info2 = ubo_matrix_info1.clone();
let ubo_matrix_index1 = ubo_allocator.assign(ubo_matrix_info1)?; // ubo buffer for cube 0
let ubo_matrix_index2 = ubo_allocator.assign(ubo_matrix_info2)?; // ubo buffer for cube 1
// allocate model data buffer.
let gltf_importer = GsglTFImporter::new(initializer);
let (mut model_entity, model_data) = gltf_importer.load(Path::new(MODEL_PATH))?;
let model_vertex_index = model_allocator.assign_v2(&model_data.vertex_allot_delegate())?;
let model_uniform_index = ubo_allocator.assign_v2(&model_data.uniform_allot_delegate(1))?;
let model_distributor = model_allocator.allocate()?;
let ubo_distributor = ubo_allocator.allocate()?;
model_entity.acquire_vertex(model_vertex_index, &model_distributor);
model_entity.acquire_uniform(model_uniform_index, &ubo_distributor);
let mut model_repository = model_distributor.into_repository();
model_repository.data_uploader()?
.upload_v2(&model_entity.vertex_upload_delegate().unwrap(), &model_data)?
.finish()?;
let cube0_ubo = ubo_distributor.acquire(ubo_matrix_index1);
let cube1_ubo = ubo_distributor.acquire(ubo_matrix_index2);
let mut ubo_repository = ubo_distributor.into_repository();
ubo_repository.data_uploader()?
.upload_v2(&model_entity.uniform_upload_delegate().unwrap(), &model_data)?
.upload(&cube0_ubo, &ubo_data[0])?
.upload(&cube1_ubo, &ubo_data[1])?
.finish()?;
Ok((model_entity, model_repository, [cube0_ubo, cube1_ubo], ubo_repository))
}
fn ubo(initializer: &AssetInitializer, model: &GsglTFEntity, textures: &[GsCombinedImgSampler; 2], ubo_buffers: &[GsUniformBuffer; 2]) -> GsResult<([DescriptorSet; 2], GsDescriptorRepository)> {
let mut descriptor_allocator = GsDescriptorAllocator::new(initializer);
// descriptor set for first cube.
let mut descriptor_set_config = DescriptorSetConfig::new();
descriptor_set_config.add_buffer_binding(&ubo_buffers[0], GsPipelineStage::VERTEX); // binding 0
descriptor_set_config.add_buffer_binding(model, GsPipelineStage::VERTEX); // binding 1
descriptor_set_config.add_image_binding(&textures[0], GsPipelineStage::FRAGMENT); // binding 2
let cube0_desc_index = descriptor_allocator.assign(descriptor_set_config);
// descriptor set for second cube.
let mut descriptor_set_config = DescriptorSetConfig::new();
descriptor_set_config.add_buffer_binding(&ubo_buffers[1], GsPipelineStage::VERTEX); // binding 0
descriptor_set_config.add_buffer_binding(model, GsPipelineStage::VERTEX); // binding 1
descriptor_set_config.add_image_binding(&textures[1], GsPipelineStage::FRAGMENT); // binding 2
let cube1_desc_index = descriptor_allocator.assign(descriptor_set_config);
// allocate descriptor set.
let descriptor_distributor = descriptor_allocator.allocate()?;
let cube0_ubo_set = descriptor_distributor.acquire(cube0_desc_index);
let cube1_ubo_set = descriptor_distributor.acquire(cube1_desc_index);
let desc_storage = descriptor_distributor.into_repository();
Ok(([cube0_ubo_set, cube1_ubo_set], desc_storage))
}
fn image(initializer: &AssetInitializer, dimension: vkDim2D) -> GsResult<([GsCombinedImgSampler; 2], GsDSAttachment, GsImageRepository<Device>)> {
let mut image_allocator = GsImageAllocator::new(initializer, ImageStorageType::DEVICE);
// Depth Attachment
let depth_attachment_info = GsDSAttachment::new(dimension, DepthStencilImageFormat::Depth32Bit);
let depth_image_index = image_allocator.assign(depth_attachment_info)?;
// Combined Sample Image
let image_loader = ImageLoader::new(initializer);
let image_storage1 = image_loader.load_2d(Path::new(TEXTURE1_PATH), GsImageFormat::default())?; // texture 1 for cube 1
let image_storage2 = image_loader.load_2d(Path::new(TEXTURE2_PATH), GsImageFormat::default())?; // texture 2 for cube 2
// refer to `layout (set = 0, binding = 2) sampler2D samplerColorMap` in cube.frag.glsl. Accessible from the fragment shader only.
let image_info1 = GsCombinedImgSampler::new(2, image_storage1, ImagePipelineStage::FragmentStage);
let image_info2 = GsCombinedImgSampler::new(2, image_storage2, ImagePipelineStage::FragmentStage);
let sample_image_index1 = image_allocator.assign(image_info1)?;
let sample_image_index2 = image_allocator.assign(image_info2)?;
let image_distributor = image_allocator.allocate()?;
let depth_attachment = image_distributor.acquire(depth_image_index);
let sample_image1 = image_distributor.acquire(sample_image_index1);
let sample_image2 = image_distributor.acquire(sample_image_index2);
let image_storage = image_distributor.into_repository();
Ok(([sample_image1, sample_image2], depth_attachment, image_storage))
}
fn pipelines(initializer: &AssetInitializer, push_consts: GsPushConstantRange, ubo_sets: &[&DescriptorSet; 2], depth_image: &GsDSAttachment) -> GsResult<GsPipeline<Graphics>> {
// shaders
let vertex_shader = GsShaderCI::from_source(GsPipelineStage::VERTEX, Path::new(VERTEX_SHADER_SOURCE_PATH), None, "[Vertex Shader]");
let fragment_shader = GsShaderCI::from_source(GsPipelineStage::FRAGMENT, Path::new(FRAGMENT_SHADER_SOURCE_PATH), None, "[Fragment Shader]");
let shader_infos = vec![vertex_shader, fragment_shader];
let vertex_input_desc = Vertex::input_description();
// pipeline
let mut render_pass_builder = GsRenderPass::new(initializer);
let first_subpass = render_pass_builder.new_subpass();
let color_attachment = RenderAttachmentCI::<Present>::new(initializer)
.op(vk::AttachmentLoadOp::CLEAR, vk::AttachmentStoreOp::STORE)
.clear_value(DEFAULT_CLEAR_COLOR.clone());
let depth_attachment = depth_image.attachment()
.op(vk::AttachmentLoadOp::CLEAR, vk::AttachmentStoreOp::DONT_CARE);
render_pass_builder.add_attachment(color_attachment, first_subpass);
render_pass_builder.add_attachment(depth_attachment, first_subpass);
let dependency0 = RenderDependencyCI::new(SubpassStage::BeginExternal, SubpassStage::AtIndex(first_subpass))
.stage(vk::PipelineStageFlags::BOTTOM_OF_PIPE, vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT)
.access(vk::AccessFlags::MEMORY_READ, vk::AccessFlags::COLOR_ATTACHMENT_READ | vk::AccessFlags::COLOR_ATTACHMENT_WRITE)
.with_flags(vk::DependencyFlags::BY_REGION);
render_pass_builder.add_dependency(dependency0);
let dependency1 = RenderDependencyCI::new(SubpassStage::AtIndex(first_subpass), SubpassStage::EndExternal)
.stage(vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT, vk::PipelineStageFlags::BOTTOM_OF_PIPE)
.access(vk::AccessFlags::COLOR_ATTACHMENT_READ | vk::AccessFlags::COLOR_ATTACHMENT_WRITE, vk::AccessFlags::MEMORY_READ)
.with_flags(vk::DependencyFlags::BY_REGION);
render_pass_builder.add_dependency(dependency1);
let render_pass = render_pass_builder.build()?;
let depth_stencil = GsDepthStencilState::setup(GsDepthStencilPrefab::EnableDepth);
let pipeline_config = GfxPipelineConfig::new(shader_infos, vertex_input_desc, render_pass, initializer.screen_dimension())
.with_depth_stencil(depth_stencil)
.with_viewport(ViewportStateType::Dynamic { count: 1 })
.with_descriptor_sets(ubo_sets)
.with_push_constants(vec![push_consts])
.finish();
let mut pipeline_builder = GfxPipelineBuilder::new(initializer)?;
let graphics_pipeline = pipeline_builder.build(pipeline_config)?;
Ok(graphics_pipeline)
}
fn sync_resources(initializer: &AssetInitializer, pipeline: &GsPipeline<Graphics>) -> GsResult<Vec<GsSemaphore>> {
// sync
let mut present_availables = Vec::with_capacity(pipeline.frame_count());
for _ in 0..pipeline.frame_count() {
let semaphore = GsSemaphore::new(initializer)?;
present_availables.push(semaphore);
}
Ok(present_availables)
}
fn commands(initializer: &AssetInitializer, pipeline: &GsPipeline<Graphics>, model_entity: &GsglTFEntity, cubes: &CubeResources, view_port: &CmdViewportInfo, scissor: &CmdScissorInfo) -> GsResult<(GsCommandPool, Vec<GsCommandBuffer>)> {
let command_pool = GsCommandPool::new(initializer, DeviceQueueIdentifier::Graphics)?;
let mut command_buffers = vec![];
let command_buffer_count = pipeline.frame_count();
let raw_commands = command_pool.allocate(CmdBufferUsage::UnitaryCommand, command_buffer_count)?;
for (frame_index, command) in raw_commands.into_iter().enumerate() {
let mut recorder = GsCmdRecorder::<Graphics>::new(initializer, pipeline, command);
recorder.begin_record(vk::CommandBufferUsageFlags::SIMULTANEOUS_USE)?
.begin_render_pass(pipeline, frame_index)
.set_viewport(0, &[view_port.clone()])
.set_scissor(0, &[scissor.clone()])
.bind_pipeline();
VulkanExample::record_commands(&recorder, model_entity, cubes)?;
recorder.end_render_pass();
let command_recorded = recorder.end_record()?;
command_buffers.push(command_recorded);
}
Ok((command_pool, command_buffers))
}
fn record_commands(recorder: &GsCmdRecorder<Graphics>, model: &GsglTFEntity, cubes: &CubeResources) -> GsResult<()> {
let model_render_params = GsglTFRenderParams {
is_use_vertex : true,
is_use_node_transform: true,
is_push_materials : true,
material_stage: GsPipelineStage::VERTEX,
};
// draw the model.
model.record_command(recorder, &cubes.ubo_set[0], &[], Some(model_render_params.clone()))?;
model.record_command(recorder, &cubes.ubo_set[1], &[], Some(model_render_params.clone()))?;
Ok(())
}
}
impl GraphicsRoutine for VulkanExample {
fn draw(&mut self, device: &GsDevice, device_available: &GsFence, image_available: &GsSemaphore, image_index: usize, _: f32) -> GsResult<&GsSemaphore> {
if self.is_toggle_event {
self.update_uniforms()?;
}
let submit_info = QueueSubmitBundle {
wait_semaphores: &[image_available],
sign_semaphores: &[&self.present_availables[image_index]],
wait_stages : &[vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT],
commands : &[&self.command_buffers[image_index]],
};
device.logic.submit_single(&submit_info, Some(device_available), DeviceQueueIdentifier::Graphics)?;
return Ok(&self.present_availables[image_index])
}
fn reload_res(&mut self, initializer: AssetInitializer) -> GsResult<()> {
let ubo_sets = &[&self.cubes.ubo_set[0], &self.cubes.ubo_set[1]];
let push_consts = self.model_entity.pushconst_description(GsPipelineStage::VERTEX);
self.pipeline = VulkanExample::pipelines(&initializer, push_consts, ubo_sets, &self.depth_attachment)?;
self.present_availables = VulkanExample::sync_resources(&initializer, &self.pipeline)?;
let (command_pool, command_buffers) = VulkanExample::commands(&initializer, &self.pipeline, &self.model_entity, &self.cubes, &self.view_port, &self.scissor)?;
self.command_pool = command_pool;
self.command_buffers = command_buffers;
Ok(())
}
fn react_input(&mut self, inputer: &ActionNerve, delta_time: f32) -> SceneAction {
if inputer.is_key_active() || inputer.is_mouse_active() {
if inputer.is_key_pressed(GsKeycode::ESCAPE) {
return SceneAction::Terminal
}
self.is_toggle_event = true;
self.camera.react_input(inputer, delta_time);
} else {
self.is_toggle_event = false;
}
SceneAction::Rendering
}
}
| 43.367397 | 240 | 0.675438 |
9b72aa309b804022f41578d718afd0f95a2b60b8 | 18,215 | use super::CelestialBody;
use crate::codec::{Decode, Encode};
use crate::{remote_type, Quaternion, RemoteObject, Vector3};
remote_type!(
/// Used to get flight telemetry for a vessel, by calling `Vessel::flight()`. All of
/// the information returned by this class is given in the reference frame passed to that method.
///
/// # Note
/// To get orbital information, such as the apoapsis or inclination, see `Orbit`.
object SpaceCenter.Flight {
properties: {
{
GForce {
/// Returns the current G force acting on the vessel in *g*.
///
/// **Game Scenes**: Flight
get: g_force -> f32
}
}
{
MeanAltitude {
/// Returns the altitude above sea level, in meters. Measured from the center of
/// mass of the vessel.
///
/// **Game Scenes**: Flight
get: mean_altitude -> f64
}
}
{
SurfaceAltitude {
/// Returns the altitude above the surface of the body or sea level, whichever
/// is closer, in meters. Measured from the center of mass of the vessel.
///
/// **Game Scenes**: Flight
get: surface_altitude -> f64
}
}
{
BedrockAltitude {
/// Returns the altitude above the surface of the body, in meters. When over water,
/// this is the altitude above the sea floor. Measured from the center of mass of
/// the vessel.
///
/// **Game Scenes**: Flight
get: bedrock_altitude -> f64
}
}
{
Elevation {
/// Returns the elevation of the terrain under the vessel, in meters. This is the
/// height of the terrain above sea level, and is negative when the vessel is
/// over the sea.
///
/// **Game Scenes**: Flight
get: elevation -> f64
}
}
{
Latitude {
/// Returns the [latitude](https://en.wikipedia.org/wiki/Latitude) of the vessel for
/// the body being orbited, in degrees.
///
/// **Game Scenes**: Flight
get: latitude -> f64
}
}
{
Longitude {
/// Returns the [longitude](https://en.wikipedia.org/wiki/Longitude) of the vessel for
/// the body being orbited, in degrees.
///
/// **Game Scenes**: Flight
get: longitude -> f64
}
}
{
Velocity {
/// Returns the velocity of the vessel, in the reference frame `ReferenceFrame`.
///
/// **Game Scenes**: Flight
///
/// # Return
/// The velocity as a vector. The vector points in the direction of travel, and
/// its magnitude is the speed of the vessel in meters per second.
get: velocity -> Vector3
}
}
{
Speed {
/// Returns the speed of the vessel in meters per second, in the reference
/// frame `ReferenceFrame`.
///
/// **Game Scenes**: Flight
get: speed -> f64
}
}
{
HorizontalSpeed {
/// Returns the horizontal speed of the vessel in meters per second, in the reference
/// frame `ReferenceFrame`.
///
/// **Game Scenes**: Flight
get: horizontal_speed -> f64
}
}
{
VerticalSpeed {
/// Returns the vertical speed of the vessel in meters per second, in the reference
/// frame `ReferenceFrame`.
///
/// **Game Scenes**: Flight
get: vertical_speed -> f64
}
}
{
CenterOfMass {
/// Returns the position of the center of mass of the vessel, in the
/// reference frame `ReferenceFrame`.
///
/// **Game Scenes**: Flight
///
/// # Return
/// The position as a vector.
get: center_of_mass -> Vector3
}
}
{
Rotation {
/// Returns the rotation of the vessel, in the reference frame `ReferenceFrame`
///
/// **Game Scenes**: Flight
///
/// # Return
/// The rotation as a quaternion of the form (*x*,*y*,*z*,*w*).
get: rotation -> Quaternion
}
}
{
Direction {
/// Returns the direction that the vessel is pointing in, in the
/// reference frame `ReferenceFrame`.
///
/// **Game Scenes**: Flight
///
/// # Return
/// The direction as a unit vector.
get: direction -> Vector3
}
}
{
Pitch {
/// Returns the pitch of the vessel relative to the horizon, in degrees.
/// A value between -90° and +90°.
///
/// **Game Scenes**: Flight
get: pitch -> f32
}
}
{
Heading {
/// Returns the heading of the vessel (its angle relative to north), in degrees.
/// A value between 0° and 360°.
///
/// **Game Scenes**: Flight
get: heading -> f32
}
}
{
Roll {
/// Returns the roll of the vessel relative to the horizon, in degrees.
/// A value between -180° and +180°.
///
/// **Game Scenes**: Flight
get: roll -> f32
}
}
{
Prograde {
/// Returns the prograde direction of the vessels orbit, in the
/// reference frame `ReferenceFrame`.
///
/// **Game Scenes**: Flight
///
/// # Return
/// The direction as a unit vector.
get: prograde -> Vector3
}
}
{
Retrograde {
/// Returns the retrograde direction of the vessels orbit, in the
/// reference frame `ReferenceFrame`.
///
/// **Game Scenes**: Flight
///
/// # Return
/// The direction as a unit vector.
get: retrograde -> Vector3
}
}
{
Normal {
/// Returns the normal direction of the vessels orbit, in the
/// reference frame `ReferenceFrame`.
///
/// **Game Scenes**: Flight
///
/// # Return
/// The direction as a unit vector.
get: normal -> Vector3
}
}
{
AntiNormal {
/// Returns the direction opposite to the normal of the vessels orbit, in the
/// reference frame `ReferenceFrame`.
///
/// **Game Scenes**: Flight
///
/// # Return
/// The direction as a unit vector.
get: anti_normal -> Vector3
}
}
{
Radial {
/// Returns the radial direction of the vessels orbit, in the
/// reference frame `ReferenceFrame`.
///
/// **Game Scenes**: Flight
///
/// # Return
/// The direction as a unit vector.
get: radial -> Vector3
}
}
{
AntiRadial {
/// Returns the direction opposite to the radial of the vessels orbit, in the
/// reference frame `ReferenceFrame`.
///
/// **Game Scenes**: Flight
///
/// # Return
/// The direction as a unit vector.
get: anti_radial -> Vector3
}
}
{
AtmosphereDensity {
/// Returns the current density of the atmosphere around the vessel,
/// in kg/m<sup>3</sup>.
///
/// **Game Scenes**: Flight
get: atmosphere_density -> f32
}
}
{
DynamicPressure {
/// Returns the dynamic pressure acting on the vessel, in Pascals. This is a
/// measure of the strength of the aerodynamic forces. It is equal to
/// `1/2 · air density · velocity<sup>2</sup>`. It is commonly denoted Q.
///
/// **Game Scenes**: Flight
get: dynamic_pressure -> f32
}
}
{
StaticPressure {
/// Returns the static atmospheric pressure acting on the vessel, in Pascals.
///
/// **Game Scenes**: Flight
get: static_pressure -> f32
}
}
{
StaticPressureAtMSL {
/// Returns the static pressure at mean sea level, in Pascals.
///
/// **Game Scenes**: Flight
get: static_pressure_at_msl -> f32
}
}
{
AerodynamicForce {
/// Returns the total aerodynamic forces acting on the vessel,
/// in reference frame `ReferenceFrame`.
///
/// **Game Scenes**: Flight
///
/// # Return
/// A vector pointing in the direction that the force acts, with its magnitude
/// equal to the strength of the force in Newtons.
get: aerodynamic_force -> Vector3
}
}
{
Lift {
/// Returns the [aerodynamic lift](https://en.wikipedia.org/wiki/Aerodynamic_force)
/// currently acting on the vessel.
///
/// **Game Scenes**: Flight
///
/// # Return
/// A vector pointing in the direction that the force acts, with its magnitude
/// equal to the strength of the force in Newtons.
get: lift -> Vector3
}
}
{
Drag {
/// Returns the [aerodynamic drag](https://en.wikipedia.org/wiki/Aerodynamic_force)
/// currently acting on the vessel.
///
/// **Game Scenes**: Flight
///
/// # Return
/// A vector pointing in the direction that the force acts, with its magnitude
/// equal to the strength of the force in Newtons.
get: drag -> Vector3
}
}
{
SpeedOfSound {
/// Returns the speed of sound, in the atmosphere around the vessel, in m/s.
///
/// **Game Scenes**: Flight
get: speed_of_sound -> f32
}
}
{
Mach {
/// Returns the speed of the vessel, in multiples of the speed of sound.
///
/// **Game Scenes**: Flight
get: mach -> f32
}
}
{
ReynoldsNumber {
/// Returns the vessels Reynolds number.
///
/// **Game Scenes**: Flight
///
/// # Note
/// Requires Ferram Aerospace Research.
get: reynolds_number -> f32
}
}
{
TrueAirSpeed {
/// Returns the [true air speed](https://en.wikipedia.org/wiki/True_airspeed)
/// of the vessel, in meters per second.
///
/// **Game Scenes**: Flight
get: true_air_speed -> f32
}
}
{
EquivalentAirSpeed {
/// Returns the [equivalent air speed](https://en.wikipedia.org/wiki/Equivalent_airspeed)
/// of the vessel, in meters per second.
///
/// **Game Scenes**: Flight
get: equivalent_air_speed -> f32
}
}
{
TerminalVelocity {
/// Returns an estimate of the current terminal velocity of the vessel,
/// in meters per second. This is the speed at which the drag forces cancel
/// out the force of gravity.
///
/// **Game Scenes**: Flight
get: terminal_velocity -> f32
}
}
{
AngleOfAttack {
/// Returns the pitch angle between the orientation of the vessel and its
/// velocity vector, in degrees..
///
/// **Game Scenes**: Flight
get: angle_of_attack -> f32
}
}
{
SideslipAngle {
/// Returns the yaw angle between the orientation of the vessel and its
/// velocity vector, in degrees.
///
/// **Game Scenes**: Flight
get: sideslip_angle -> f32
}
}
{
TotalAirTemperature {
/// Returns the [total air temperature](https://en.wikipedia.org/wiki/Total_air_temperature)
/// of the atmosphere around the vessel, in Kelvin. This includes the
/// `Flight::static_air_temperature()` and the vessel’s kinetic energy.
///
/// **Game Scenes**: Flight
get: total_air_temperature -> f32
}
}
{
StaticAirTemperature {
/// Returns the [static (ambient) temperature](https://en.wikipedia.org/wiki/Total_air_temperature)
/// of the atmosphere around the vessel, in Kelvin.
///
/// **Game Scenes**: Flight
get: static_air_temperature -> f32
}
}
{
StallFraction {
/// Returns the current amount of stall, between 0 and 1. A value greater than
/// 0.005 indicates a minor stall and a value greater than 0.5 indicates
/// a large-scale stall.
///
/// **Game Scenes**: Flight
///
/// # Note
/// Requires Ferram Aerospace Research.
get: stall_fraction -> f32
}
}
{
DragCoefficient {
/// Returns the coefficient of drag. This is the amount of drag produced by the
/// vessel. It depends on air speed, air density and wing area.
///
/// **Game Scenes**: Flight
///
/// # Note
/// Requires Ferram Aerospace Research.
get: drag_coefficient -> f32
}
}
{
LiftCoefficient {
/// Returns the coefficient of lift. This is the amount of lift produced by the
/// vessel. It depends on air speed, air density and wing area.
///
/// **Game Scenes**: Flight
///
/// # Note
/// Requires Ferram Aerospace Research.
get: lift_coefficient -> f32
}
}
{
BallisticCoefficient {
/// Returns the [ballistic coefficient](https://en.wikipedia.org/wiki/Ballistic_coefficient).
///
/// **Game Scenes**: Flight
///
/// # Note
/// Requires Ferram Aerospace Research.
get: ballistic_coefficient -> f32
}
}
{
ThrustSpecificFuelConsumption {
/// Returns the efficiency of the engines, with a lower value indicating a
/// more efficient vessel. This value is the number of Newtons of fuel that
/// are burned, per hour, to produce one newton of thrust.
///
/// **Game Scenes**: Flight
///
/// # Note
/// Requires Ferram Aerospace Research.
get: thrust_specific_fuel_consumption -> f32
}
}
}
methods: {
{
/// Simulate and return the total aerodynamic forces acting on the vessel,
/// if it where to be traveling with the given velocity at the given position
/// in the atmosphere of the given celestial body.
///
/// **Game Scenes**: Flight
///
/// # Arguments
/// * `body` - The celestial body.
/// * `position` - The vessel's position as a vector on the body.
/// * `velocity` - The vessel's velocity as a vector on the body.
///
/// # Return
/// A vector pointing in the direction that the force acts, with its magnitude equal
/// to the strength of the force in Newtons.
fn simulate_aerodynamic_force_at(body: &CelestialBody, position: Vector3, velocity: Vector3) -> Vector3 {
SimulateAerodynamicForceAt(body, position, velocity)
}
}
}
});
| 35.927022 | 117 | 0.436453 |
bb479a36edc780154d92acd2b4f0f7fdccd93240 | 2,675 | use serde::{Serialize, Deserialize};
use reqwest::blocking::{Request, Response};
use super::{AuthenticatedBuildRequestBuilder, Changeset, FromResponse, IntoRequest, WithSuccess};
#[derive(Debug)]
#[derive(Deserialize)]
#[serde(rename_all="camelCase")]
pub struct EmojiData {
#[serde(rename="_id")]
id: String,
name: String,
aliases: Vec<String>,
extension: String,
#[serde(rename="_updatedAt")]
updated_at: String
}
pub struct NewEmoji {
emoji: Vec<u8>,
name: String,
aliases: Vec<String>
}
#[derive(Debug)]
#[derive(Deserialize)]
pub struct EmojiResponseData {
emojis: Changeset<EmojiData>
}
pub struct ListRequest;
impl IntoRequest for ListRequest {
fn into_request(self, b: &impl AuthenticatedBuildRequestBuilder) -> Request {
b.get("api/v1/emoji-custom.list").build().unwrap()
}
}
impl FromResponse for ListRequest {
type Output = WithSuccess<EmojiResponseData>;
fn from_response(response: Response) -> Option<Self::Output> {
response.json().unwrap()
}
}
#[derive(Serialize)]
pub struct CreateRequest {
emoji: Vec<u8>,
name: String,
aliases: Vec<String>
}
impl IntoRequest for CreateRequest {
fn into_request(self, b: &impl AuthenticatedBuildRequestBuilder) -> Request {
b.post("api/v1/emoji-custom.create").form(&self).build().unwrap()
}
}
impl FromResponse for CreateRequest {
type Output = WithSuccess<()>;
fn from_response(response: Response) -> Option<Self::Output> {
response.json().unwrap()
}
}
#[derive(Serialize)]
#[serde(rename_all="camelCase")]
pub struct DeleteRequest {
emoji_id: String
}
impl IntoRequest for DeleteRequest {
fn into_request(self, b: &impl AuthenticatedBuildRequestBuilder) -> Request {
b.post("api/v1/emoji-custom.delete").json(&self).build().unwrap()
}
}
impl FromResponse for DeleteRequest {
type Output = WithSuccess<()>;
fn from_response(response: Response) -> Option<Self::Output> {
response.json().unwrap()
}
}
#[derive(Serialize)]
#[serde(rename_all="camelCase")]
pub struct UpdateRequest {
#[serde(rename="_id")]
id: String,
#[serde(skip_serializing_if="Option::is_none")]
emoji: Option<Vec<u8>>,
name: String,
#[serde(skip_serializing_if="Option::is_none")]
aliases: Option<Vec<String>>
}
impl IntoRequest for UpdateRequest {
fn into_request(self, b: &impl AuthenticatedBuildRequestBuilder) -> Request {
b.post("api/v1/emoji-custom.update").form(&self).build().unwrap()
}
}
impl FromResponse for UpdateRequest {
type Output = WithSuccess<()>;
fn from_response(response: Response) -> Option<Self::Output> {
response.json().unwrap()
}
}
| 23.672566 | 97 | 0.690841 |
e66ea4a54329129e83c4968338802f54cdfb0b80 | 3,272 | use std::io;
use std::net::SocketAddr;
use std::sync::Arc;
use async_trait::async_trait;
use bytes::{BufMut, BytesMut};
use sha2::{Digest, Sha224};
use socket2::{Domain, Socket, Type};
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
use crate::{
common::dns_client::DnsClient,
// common::tls::wrap_tls,
proxy::{stream::SimpleStream, ProxyStream, ProxyTcpHandler},
session::Session,
};
pub struct Handler {
pub address: String,
pub port: u16,
pub password: String,
// pub domain: String,
pub bind_addr: SocketAddr,
pub dns_client: Arc<DnsClient>,
}
#[async_trait]
impl ProxyTcpHandler for Handler {
fn name(&self) -> &str {
return super::NAME;
}
fn tcp_connect_addr(&self) -> Option<(String, u16, SocketAddr)> {
Some((self.address.clone(), self.port, self.bind_addr.clone()))
}
async fn handle<'a>(
&'a self,
sess: &'a Session,
stream: Option<Box<dyn ProxyStream>>,
) -> io::Result<Box<dyn ProxyStream>> {
if let Some(mut stream) = stream {
let mut buf = BytesMut::new();
let password = Sha224::digest(self.password.as_bytes());
let password = hex::encode(&password[..]);
buf.put_slice(password.as_bytes());
buf.put_slice(b"\r\n");
buf.put_u8(0x01); // tcp
sess.destination.write_into(&mut buf)?;
buf.put_slice(b"\r\n");
stream.write_all(&buf[..]).await?;
return Ok(stream);
}
let ips = match self
.dns_client
.lookup_with_bind(String::from(&self.address), &self.bind_addr)
.await
{
Ok(ips) => ips,
Err(err) => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("lookup {} failed: {}", &self.address, err),
));
}
};
let mut last_err = None;
for ip in ips {
let socket = Socket::new(Domain::ipv4(), Type::stream(), None)?;
socket.bind(&self.bind_addr.into())?;
let addr = SocketAddr::new(ip, self.port);
match TcpStream::connect_std(socket.into_tcp_stream(), &addr).await {
Ok(mut stream) => {
// let mut stream = wrap_tls(stream, &self.domain).await?;
let mut buf = BytesMut::new();
let password = Sha224::digest(self.password.as_bytes());
let password = hex::encode(&password[..]);
buf.put_slice(password.as_bytes());
buf.put_slice(b"\r\n");
buf.put_u8(0x01); // tcp
sess.destination.write_into(&mut buf)?;
buf.put_slice(b"\r\n");
stream.write_all(&buf[..]).await?;
return Ok(Box::new(SimpleStream(stream)));
}
Err(e) => {
last_err = Some(e);
}
}
}
Err(last_err.unwrap_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"could not resolve to any address",
)
}))
}
}
| 31.461538 | 81 | 0.507335 |
8f26aaf4e91affc118beb33b9938513182c2fb35 | 23,245 | #![feature(inner_deref)]
use std::fs::{self, File};
use std::io::{self, BufRead, Write};
use std::ops::Not;
use std::path::{Path, PathBuf};
use std::process::Command;
const XARGO_MIN_VERSION: (u32, u32, u32) = (0, 3, 19);
const CARGO_MIRI_HELP: &str = r#"Interprets bin crates and tests in Miri
Usage:
cargo miri [subcommand] [options] [--] [<miri opts>...] [--] [<program opts>...]
Subcommands:
run Run binaries (default)
test Run tests
setup Only perform automatic setup, but without asking questions (for getting a proper libstd)
Common options:
-h, --help Print this message
--features Features to compile for the package
-V, --version Print version info and exit
Other [options] are the same as `cargo check`. Everything after the first "--" is
passed verbatim to Miri, which will pass everything after the second "--" verbatim
to the interpreted program.
"#;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum MiriCommand {
Run,
Test,
Setup,
}
fn show_help() {
println!("{}", CARGO_MIRI_HELP);
}
fn show_version() {
println!(
"miri {} ({} {})",
env!("CARGO_PKG_VERSION"),
env!("VERGEN_SHA_SHORT"),
env!("VERGEN_COMMIT_DATE")
);
}
fn show_error(msg: String) -> ! {
eprintln!("fatal error: {}", msg);
std::process::exit(1)
}
// Determines whether a `--flag` is present.
fn has_arg_flag(name: &str) -> bool {
let mut args = std::env::args().take_while(|val| val != "--");
args.any(|val| val == name)
}
/// Gets the value of a `--flag`.
fn get_arg_flag_value(name: &str) -> Option<String> {
// Stop searching at `--`.
let mut args = std::env::args().take_while(|val| val != "--");
loop {
let arg = match args.next() {
Some(arg) => arg,
None => return None,
};
if !arg.starts_with(name) {
continue;
}
// Strip leading `name`.
let suffix = &arg[name.len()..];
if suffix.is_empty() {
// This argument is exactly `name`; the next one is the value.
return args.next();
} else if suffix.starts_with('=') {
// This argument is `name=value`; get the value.
// Strip leading `=`.
return Some(suffix[1..].to_owned());
}
}
}
/// Returns the path to the `miri` binary
fn find_miri() -> PathBuf {
let mut path = std::env::current_exe().expect("current executable path invalid");
path.set_file_name("miri");
path
}
fn cargo() -> Command {
if let Ok(val) = std::env::var("CARGO") {
// Bootstrap tells us where to find cargo
Command::new(val)
} else {
Command::new("cargo")
}
}
fn xargo_check() -> Command {
if let Ok(val) = std::env::var("XARGO_CHECK") {
// Bootstrap tells us where to find xargo
Command::new(val)
} else {
Command::new("xargo-check")
}
}
fn list_targets() -> impl Iterator<Item = cargo_metadata::Target> {
// We need to get the manifest, and then the metadata, to enumerate targets.
let manifest_path =
get_arg_flag_value("--manifest-path").map(|m| Path::new(&m).canonicalize().unwrap());
let mut cmd = cargo_metadata::MetadataCommand::new();
if let Some(ref manifest_path) = manifest_path {
cmd.manifest_path(manifest_path);
}
let mut metadata = if let Ok(metadata) = cmd.exec() {
metadata
} else {
show_error(format!("Could not obtain Cargo metadata; likely an ill-formed manifest"));
};
let current_dir = std::env::current_dir();
let package_index = metadata
.packages
.iter()
.position(|package| {
let package_manifest_path = Path::new(&package.manifest_path);
if let Some(ref manifest_path) = manifest_path {
package_manifest_path == manifest_path
} else {
let current_dir = current_dir.as_ref().expect("could not read current directory");
let package_manifest_directory = package_manifest_path
.parent()
.expect("could not find parent directory of package manifest");
package_manifest_directory == current_dir
}
})
.unwrap_or_else(|| {
show_error(format!(
"This seems to be a workspace, which is not supported by cargo-miri"
))
});
let package = metadata.packages.remove(package_index);
// Finally we got the list of targets to build
package.targets.into_iter()
}
/// Make sure that the `miri` and `rustc` binary are from the same sysroot.
/// This can be violated e.g. when miri is locally built and installed with a different
/// toolchain than what is used when `cargo miri` is run.
fn test_sysroot_consistency() {
fn get_sysroot(mut cmd: Command) -> PathBuf {
let out = cmd
.arg("--print")
.arg("sysroot")
.output()
.expect("Failed to run rustc to get sysroot info");
let stdout = String::from_utf8(out.stdout).expect("stdout is not valid UTF-8");
let stderr = String::from_utf8(out.stderr).expect("stderr is not valid UTF-8");
assert!(
out.status.success(),
"Bad status code {} when getting sysroot info via {:?}.\nstdout:\n{}\nstderr:\n{}",
out.status,
cmd,
stdout,
stderr,
);
let stdout = stdout.trim();
PathBuf::from(stdout)
.canonicalize()
.unwrap_or_else(|_| panic!("Failed to canonicalize sysroot: {}", stdout))
}
// Do not check sysroots if we got built as part of a Rust distribution.
// During `bootstrap`, the sysroot does not match anyway, and then some distros
// play symlink tricks so the sysroots may be different even for the final stage
// (see <https://github.com/mozilla/nixpkgs-mozilla/issues/198>).
if option_env!("RUSTC_STAGE").is_some() {
return;
}
let rustc_sysroot = get_sysroot(Command::new("rustc"));
let miri_sysroot = get_sysroot(Command::new(find_miri()));
if rustc_sysroot != miri_sysroot {
show_error(format!(
"miri was built for a different sysroot than the rustc in your current toolchain.\n\
Make sure you use the same toolchain to run miri that you used to build it!\n\
rustc sysroot: `{}`\n\
miri sysroot: `{}`",
rustc_sysroot.display(),
miri_sysroot.display()
));
}
}
fn xargo_version() -> Option<(u32, u32, u32)> {
let out = xargo_check().arg("--version").output().ok()?;
if !out.status.success() {
return None;
}
// Parse output. The first line looks like "xargo 0.3.12 (b004f1c 2018-12-13)".
let line = out
.stderr
.lines()
.nth(0)
.expect("malformed `xargo --version` output: not at least one line")
.expect("malformed `xargo --version` output: error reading first line");
let (name, version) = {
let mut split = line.split(' ');
(
split.next().expect("malformed `xargo --version` output: empty"),
split.next().expect("malformed `xargo --version` output: not at least two words"),
)
};
if name != "xargo" {
// This is some fork of xargo
return None;
}
let mut version_pieces = version.split('.');
let major = version_pieces
.next()
.expect("malformed `xargo --version` output: not a major version piece")
.parse()
.expect("malformed `xargo --version` output: major version is not an integer");
let minor = version_pieces
.next()
.expect("malformed `xargo --version` output: not a minor version piece")
.parse()
.expect("malformed `xargo --version` output: minor version is not an integer");
let patch = version_pieces
.next()
.expect("malformed `xargo --version` output: not a patch version piece")
.parse()
.expect("malformed `xargo --version` output: patch version is not an integer");
if !version_pieces.next().is_none() {
panic!("malformed `xargo --version` output: more than three pieces in version");
}
Some((major, minor, patch))
}
fn ask_to_run(mut cmd: Command, ask: bool, text: &str) {
if ask {
let mut buf = String::new();
print!("I will run `{:?}` to {}. Proceed? [Y/n] ", cmd, text);
io::stdout().flush().unwrap();
io::stdin().read_line(&mut buf).unwrap();
match buf.trim().to_lowercase().as_ref() {
// Proceed.
"" | "y" | "yes" => {}
"n" | "no" => show_error(format!("Aborting as per your request")),
a => show_error(format!("I do not understand `{}`", a)),
};
} else {
println!("Running `{:?}` to {}.", cmd, text);
}
if cmd.status().expect(&format!("failed to execute {:?}", cmd)).success().not() {
show_error(format!("Failed to {}", text));
}
}
/// Performs the setup required to make `cargo miri` work: Getting a custom-built libstd. Then sets
/// `MIRI_SYSROOT`. Skipped if `MIRI_SYSROOT` is already set, in which case we expect the user has
/// done all this already.
fn setup(ask_user: bool) {
if std::env::var("MIRI_SYSROOT").is_ok() {
if !ask_user {
println!("WARNING: MIRI_SYSROOT already set, not doing anything.")
}
return;
}
// First, we need xargo.
if xargo_version().map_or(true, |v| v < XARGO_MIN_VERSION) {
if std::env::var("XARGO_CHECK").is_ok() {
// The user manually gave us a xargo binary; don't do anything automatically.
show_error(format!("Your xargo is too old; please upgrade to the latest version"))
}
let mut cmd = cargo();
cmd.args(&["install", "xargo", "-f"]);
ask_to_run(cmd, ask_user, "install a recent enough xargo");
}
// Determine where the rust sources are located. `XARGO_RUST_SRC` env var trumps everything.
let rust_src = match std::env::var("XARGO_RUST_SRC") {
Ok(val) => PathBuf::from(val),
Err(_) => {
// Check for `rust-src` rustup component.
let sysroot = Command::new("rustc")
.args(&["--print", "sysroot"])
.output()
.expect("failed to get rustc sysroot")
.stdout;
let sysroot = std::str::from_utf8(&sysroot).unwrap();
let sysroot = Path::new(sysroot.trim_end_matches('\n'));
// First try: `$SYSROOT/lib/rustlib/src/rust`; test if that contains `Cargo.lock`.
let rustup_src = sysroot.join("lib").join("rustlib").join("src").join("rust");
let base_dir = if rustup_src.join("Cargo.lock").exists() {
// Just use this.
rustup_src
} else {
// Maybe this is a local toolchain built with `x.py` and linked into `rustup`?
// Second try: `$SYSROOT/../../..`; test if that contains `x.py`.
let local_src = sysroot.parent().and_then(Path::parent).and_then(Path::parent);
match local_src {
Some(local_src) if local_src.join("x.py").exists() => {
// Use this.
PathBuf::from(local_src)
}
_ => {
// Fallback: Ask the user to install the `rust-src` component, and use that.
let mut cmd = Command::new("rustup");
cmd.args(&["component", "add", "rust-src"]);
ask_to_run(
cmd,
ask_user,
"install the rustc-src component for the selected toolchain",
);
rustup_src
}
}
};
base_dir.join("src") // Xargo wants the src-subdir
}
};
if !rust_src.exists() {
show_error(format!("Given Rust source directory `{}` does not exist.", rust_src.display()));
}
// Next, we need our own libstd. We will do this work in whatever is a good cache dir for this platform.
let dirs = directories::ProjectDirs::from("org", "rust-lang", "miri").unwrap();
let dir = dirs.cache_dir();
if !dir.exists() {
fs::create_dir_all(&dir).unwrap();
}
// The interesting bit: Xargo.toml
File::create(dir.join("Xargo.toml"))
.unwrap()
.write_all(
br#"
[dependencies.std]
default_features = false
# We need the `panic_unwind` feature because we use the `unwind` panic strategy.
# Using `abort` works for libstd, but then libtest will not compile.
features = ["panic_unwind"]
[dependencies.test]
"#,
)
.unwrap();
// The boring bits: a dummy project for xargo.
// FIXME: With xargo-check, can we avoid doing this?
File::create(dir.join("Cargo.toml"))
.unwrap()
.write_all(
br#"
[package]
name = "miri-xargo"
description = "A dummy project for building libstd with xargo."
version = "0.0.0"
[lib]
path = "lib.rs"
"#,
)
.unwrap();
File::create(dir.join("lib.rs")).unwrap();
// Prepare xargo invocation.
let target = get_arg_flag_value("--target");
let print_sysroot = !ask_user && has_arg_flag("--print-sysroot"); // whether we just print the sysroot path
let mut command = xargo_check();
command.arg("build").arg("-q");
command.current_dir(&dir);
command.env("RUSTFLAGS", miri::miri_default_args().join(" "));
command.env("XARGO_HOME", &dir);
command.env("XARGO_RUST_SRC", &rust_src);
// Handle target flag.
if let Some(ref target) = target {
command.arg("--target").arg(&target);
}
// Finally run it!
if command.status().expect("failed to run xargo").success().not() {
show_error(format!("Failed to run xargo"));
}
// That should be it! But we need to figure out where xargo built stuff.
// Unfortunately, it puts things into a different directory when the
// architecture matches the host.
let is_host = match target {
None => true,
Some(target) => target == rustc_version::version_meta().unwrap().host,
};
let sysroot = if is_host { dir.join("HOST") } else { PathBuf::from(dir) };
std::env::set_var("MIRI_SYSROOT", &sysroot); // pass the env var to the processes we spawn, which will turn it into "--sysroot" flags
if print_sysroot {
// Print just the sysroot and nothing else; this way we do not need any escaping.
println!("{}", sysroot.display());
} else if !ask_user {
println!("A libstd for Miri is now available in `{}`.", sysroot.display());
}
}
fn main() {
// Check for version and help flags even when invoked as `cargo-miri`.
if std::env::args().any(|a| a == "--help" || a == "-h") {
show_help();
return;
}
if std::env::args().any(|a| a == "--version" || a == "-V") {
show_version();
return;
}
if let Some("miri") = std::env::args().nth(1).as_ref().map(AsRef::as_ref) {
// This arm is for when `cargo miri` is called. We call `cargo check` for each applicable target,
// but with the `RUSTC` env var set to the `cargo-miri` binary so that we come back in the other branch,
// and dispatch the invocations to `rustc` and `miri`, respectively.
in_cargo_miri();
} else if let Some("rustc") = std::env::args().nth(1).as_ref().map(AsRef::as_ref) {
// This arm is executed when `cargo-miri` runs `cargo check` with the `RUSTC_WRAPPER` env var set to itself:
// dependencies get dispatched to `rustc`, the final test/binary to `miri`.
inside_cargo_rustc();
} else {
show_error(format!("must be called with either `miri` or `rustc` as first argument."))
}
}
fn in_cargo_miri() {
let (subcommand, skip) = match std::env::args().nth(2).as_deref() {
Some("test") => (MiriCommand::Test, 3),
Some("run") => (MiriCommand::Run, 3),
Some("setup") => (MiriCommand::Setup, 3),
// Default command, if there is an option or nothing.
Some(s) if s.starts_with("-") => (MiriCommand::Run, 2),
None => (MiriCommand::Run, 2),
// Invalid command.
Some(s) => show_error(format!("Unknown command `{}`", s)),
};
let verbose = has_arg_flag("-v");
// Some basic sanity checks
test_sysroot_consistency();
// We always setup.
let ask = subcommand != MiriCommand::Setup;
setup(ask);
if subcommand == MiriCommand::Setup {
// Stop here.
return;
}
// Now run the command.
for target in list_targets() {
let mut args = std::env::args().skip(skip);
let kind = target
.kind
.get(0)
.expect("badly formatted cargo metadata: target::kind is an empty array");
// Now we run `cargo check $FLAGS $ARGS`, giving the user the
// change to add additional arguments. `FLAGS` is set to identify
// this target. The user gets to control what gets actually passed to Miri.
let mut cmd = cargo();
cmd.arg("check");
match (subcommand, kind.as_str()) {
(MiriCommand::Run, "bin") => {
// FIXME: we just run all the binaries here.
// We should instead support `cargo miri --bin foo`.
cmd.arg("--bin").arg(target.name);
}
(MiriCommand::Test, "test") => {
cmd.arg("--test").arg(target.name);
}
(MiriCommand::Test, "lib") => {
// There can be only one lib.
cmd.arg("--lib").arg("--profile").arg("test");
}
(MiriCommand::Test, "bin") => {
cmd.arg("--bin").arg(target.name).arg("--profile").arg("test");
}
// The remaining targets we do not even want to build.
_ => continue,
}
// Forward user-defined `cargo` args until first `--`.
while let Some(arg) = args.next() {
if arg == "--" {
break;
}
cmd.arg(arg);
}
// Serialize the remaining args into a special environemt variable.
// This will be read by `inside_cargo_rustc` when we go to invoke
// our actual target crate (the binary or the test we are running).
// Since we're using "cargo check", we have no other way of passing
// these arguments.
let args_vec: Vec<String> = args.collect();
cmd.env("MIRI_ARGS", serde_json::to_string(&args_vec).expect("failed to serialize args"));
// Set `RUSTC_WRAPPER` to ourselves. Cargo will prepend that binary to its usual invocation,
// i.e., the first argument is `rustc` -- which is what we use in `main` to distinguish
// the two codepaths.
let path = std::env::current_exe().expect("current executable path invalid");
cmd.env("RUSTC_WRAPPER", path);
if verbose {
cmd.env("MIRI_VERBOSE", ""); // this makes `inside_cargo_rustc` verbose.
eprintln!("+ {:?}", cmd);
}
let exit_status =
cmd.spawn().expect("could not run cargo").wait().expect("failed to wait for cargo?");
if !exit_status.success() {
std::process::exit(exit_status.code().unwrap_or(-1))
}
}
}
fn inside_cargo_rustc() {
/// Determines if we are being invoked (as rustc) to build a runnable
/// executable. We run "cargo check", so this should only happen when
/// we are trying to compile a build script or build script dependency,
/// which actually needs to be executed on the host platform.
///
/// Currently, we detect this by checking for "--emit=link",
/// which indicates that Cargo instruced rustc to output
/// a native object.
fn is_target_crate() -> bool {
// `--emit` is sometimes missing, e.g. cargo calls rustc for "--print".
// That is definitely not a target crate.
// If `--emit` is present, then host crates are built ("--emit=link,...),
// while the rest is only checked.
get_arg_flag_value("--emit").map_or(false, |emit| !emit.contains("link"))
}
/// Returns whether or not Cargo invoked the wrapper (this binary) to compile
/// the final, target crate (either a test for 'cargo test', or a binary for 'cargo run')
/// Cargo does not give us this information directly, so we need to check
/// various command-line flags.
fn is_runnable_crate() -> bool {
let is_bin = get_arg_flag_value("--crate-type").as_deref() == Some("bin");
let is_test = has_arg_flag("--test");
// The final runnable (under Miri) crate will either be a binary crate
// or a test crate. We make sure to exclude build scripts here, since
// they are also build with "--crate-type bin"
is_bin || is_test
}
let verbose = std::env::var("MIRI_VERBOSE").is_ok();
let target_crate = is_target_crate();
// Figure out which arguments we need to pass.
let mut args: Vec<String> = std::env::args().skip(2).collect(); // skip `cargo-miri rustc`
// We make sure to only specify our custom Xargo sysroot and
// other args for target crates - that is, crates which are ultimately
// going to get interpreted by Miri.
if target_crate {
let sysroot =
std::env::var("MIRI_SYSROOT").expect("The wrapper should have set MIRI_SYSROOT");
args.push("--sysroot".to_owned());
args.push(sysroot);
args.splice(0..0, miri::miri_default_args().iter().map(ToString::to_string));
}
// Figure out the binary we need to call. If this is a runnable target crate, we want to call
// Miri to start interpretation; otherwise we want to call rustc to build the crate as usual.
let mut command = if target_crate && is_runnable_crate() {
// This is the 'target crate' - the binary or test crate that
// we want to interpret under Miri. We deserialize the user-provided arguments
// from the special environment variable "MIRI_ARGS", and feed them
// to the 'miri' binary.
let magic = std::env::var("MIRI_ARGS").expect("missing MIRI_ARGS");
let mut user_args: Vec<String> =
serde_json::from_str(&magic).expect("failed to deserialize MIRI_ARGS");
args.append(&mut user_args);
// Run this in Miri.
Command::new(find_miri())
} else {
Command::new("rustc")
};
// Run it.
command.args(&args);
if verbose {
eprintln!("+ {:?}", command);
}
match command.status() {
Ok(exit) =>
if !exit.success() {
std::process::exit(exit.code().unwrap_or(42));
},
Err(ref e) => panic!("error running {:?}:\n{:?}", command, e),
}
}
| 38.485099 | 137 | 0.575866 |
76346c686e7ce31e3e756abe0db93c3048d6cc4c | 3,971 | //! Threadpool
mod idle;
use self::idle::Idle;
mod park;
pub(crate) use park::{Parker, Unparker};
pub(super) mod queue;
mod worker;
pub(crate) use worker::Launch;
pub(crate) use worker::block_in_place;
use crate::loom::sync::Arc;
use crate::runtime::task::JoinHandle;
use crate::runtime::{Callback, Driver, HandleInner};
use std::fmt;
use std::future::Future;
/// Work-stealing based thread pool for executing futures.
pub(crate) struct ThreadPool {
spawner: Spawner,
}
/// Submits futures to the associated thread pool for execution.
///
/// A `Spawner` instance is a handle to a single thread pool that allows the owner
/// of the handle to spawn futures onto the thread pool.
///
/// The `Spawner` handle is *only* used for spawning new futures. It does not
/// impact the lifecycle of the thread pool in any way. The thread pool may
/// shut down while there are outstanding `Spawner` instances.
///
/// `Spawner` instances are obtained by calling [`ThreadPool::spawner`].
///
/// [`ThreadPool::spawner`]: method@ThreadPool::spawner
#[derive(Clone)]
pub(crate) struct Spawner {
shared: Arc<worker::Shared>,
}
// ===== impl ThreadPool =====
impl ThreadPool {
pub(crate) fn new(
size: usize,
driver: Driver,
handle_inner: HandleInner,
before_park: Option<Callback>,
after_unpark: Option<Callback>,
) -> (ThreadPool, Launch) {
let parker = Parker::new(driver);
let (shared, launch) =
worker::create(size, parker, handle_inner, before_park, after_unpark);
let spawner = Spawner { shared };
let thread_pool = ThreadPool { spawner };
(thread_pool, launch)
}
/// Returns reference to `Spawner`.
///
/// The `Spawner` handle can be cloned and enables spawning tasks from other
/// threads.
pub(crate) fn spawner(&self) -> &Spawner {
&self.spawner
}
/// Blocks the current thread waiting for the future to complete.
///
/// The future will execute on the current thread, but all spawned tasks
/// will be executed on the thread pool.
pub(crate) fn block_on<F>(&self, future: F) -> F::Output
where
F: Future,
{
let mut enter = crate::runtime::enter(true);
enter.block_on(future).expect("failed to park thread")
}
}
impl fmt::Debug for ThreadPool {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("ThreadPool").finish()
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
self.spawner.shutdown();
}
}
// ==== impl Spawner =====
impl Spawner {
/// Spawns a future onto the thread pool
pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: crate::future::Future + Send + 'static,
F::Output: Send + 'static,
{
worker::Shared::bind_new_task(&self.shared, future)
}
pub(crate) fn shutdown(&mut self) {
self.shared.close();
}
pub(crate) fn as_handle_inner(&self) -> &HandleInner {
self.shared.as_handle_inner()
}
}
cfg_metrics! {
use crate::runtime::{SchedulerMetrics, WorkerMetrics};
impl Spawner {
pub(crate) fn num_workers(&self) -> usize {
self.shared.worker_metrics.len()
}
pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics {
&self.shared.scheduler_metrics
}
pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics {
&self.shared.worker_metrics[worker]
}
pub(crate) fn injection_queue_depth(&self) -> usize {
self.shared.injection_queue_depth()
}
pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize {
self.shared.worker_local_queue_depth(worker)
}
}
}
impl fmt::Debug for Spawner {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Spawner").finish()
}
}
| 26.651007 | 82 | 0.626039 |
56df8ffd8bdb2c28438069db29d4a9ea96a0d95e | 4,627 | #![feature(custom_attribute, custom_derive, plugin)]
extern crate hyper;
extern crate time;
extern crate slack_hook;
extern crate rustc_serialize;
extern crate small_logger;
use std::io::prelude::*;
use std::fs::File;
use std::thread;
use std::convert::AsRef;
use hyper::server::Server;
use hyper::server::Request;
use hyper::server::Response;
use hyper::uri::RequestUri;
use hyper::net::Fresh;
use hyper::server::Handler;
use std::path::Path;
use std::sync::{Mutex, Arc};
use std::sync::mpsc::{Sender, channel};
use rustc_serialize::json::Json;
use rustc_serialize::json;
use dispatcher::Dispatcher;
use hook_configuration::HookConfiguration;
use deployer::DeployMessage;
mod hook_configuration;
mod dispatcher;
mod deployer;
mod tools;
pub struct GitHook {
content: Json,
}
impl GitHook {
fn repository_name(&self) -> &str {
let path = ["repository", "name"];
self.content.find_path(&path).unwrap().as_string().unwrap()
}
fn reference(&self) -> &str {
self.content.find("ref").unwrap().as_string().unwrap()
}
fn branch(&self) -> &str {
self.reference().trim_left_matches("refs/heads/")
}
}
pub struct Daemon {
intercom: Arc<Mutex<Sender<DeployMessage>>>,
config: HookConfiguration,
}
impl Handler for Daemon {
fn handle(&self, req: Request, res: Response<Fresh>) {
let mut s = String::new();
let mut myreq = req;
if myreq.uri == RequestUri::AbsolutePath("/hook/".to_string()) {
match myreq.read_to_string(&mut s) {
Ok(_) => {
println!("Got payload {}", s);
let decode = Json::from_str(s.as_ref());
match decode {
Ok(decoded) => {
let gh = GitHook { content: decoded };
let repo_name = gh.repository_name();
let branch = gh.branch();
match self.config
.hooks
.iter()
.filter(|&binding| if repo_name == binding.name {
match binding.branch.clone() {
Some(target_branch) => branch == target_branch,
None => true,
}
} else {
false
})
.next() {
Some(hk) => {
let _ = self.intercom
.lock()
.unwrap()
.send(DeployMessage::Deploy(hk.clone()));
}
None => println!("No hook for {}/{}", repo_name, branch),
}
}
Err(e) => {
println!("Error while parsing http: {:?}", e);
println!("{}", s);
}
}
}
_ => {}
}
}
let mut res = res.start().unwrap();
res.write_all(b"OK.").unwrap();
res.end().unwrap();
}
}
pub fn main() {
let mut json_config = String::new();
let config_location = &Path::new("config.json");
match File::open(config_location) {
Err(err) => {
panic!("Error during config file read: {:?}. {}",
config_location,
err.to_string())
}
Ok(icf) => {
let mut config_file = icf;
config_file.read_to_string(&mut json_config).ok().unwrap()
}
};
let config: HookConfiguration = match json::decode(json_config.as_ref()) {
Err(err) => {
println!("Error while parsing config file:");
println!("{}", err);
println!("{}", json_config);
panic!("Sorry.");
}
Ok(content) => content,
};
let (tx, rx) = channel();
let dispatcher = Dispatcher { config: config.clone() };
thread::spawn(move || {
dispatcher.run(rx);
});
let handler = Daemon {
config: config,
intercom: Arc::new(Mutex::new(tx)),
};
let port = 5000;
println!("Starting up, listening on port {}.", port);
let _ = Server::http(format!("127.0.0.1:{}", port).as_str()).unwrap().handle(handler);
}
| 29.100629 | 90 | 0.461854 |
ef9ef40ee653d55d536585ff47388de0b6455ab1 | 12,381 | #[cfg(unix)]
mod unix;
#[cfg(windows)]
mod windows;
#[cfg(unix)]
pub use self::unix::*;
#[cfg(windows)]
pub use self::windows::*;
use super::utils::copy_stat_into_wasm;
use super::varargs::VarArgs;
use byteorder::{ByteOrder, LittleEndian};
/// NOTE: TODO: These syscalls only support wasm_32 for now because they assume offsets are u32
/// Syscall list: https://www.cs.utexas.edu/~bismith/test/syscalls/syscalls32.html
use libc::{
// ENOTTY,
c_int,
c_void,
chdir,
// fcntl, setsockopt, getppid
close,
dup2,
exit,
fstat,
getpid,
// iovec,
lseek,
// open,
read,
// readv,
rmdir,
// writev,
stat,
write,
// sockaddr_in,
};
use wasmer_runtime_core::vm::Ctx;
use super::env;
use std::slice;
// use std::sys::fd::FileDesc;
// Another conditional constant for name resolution: Macos et iOS use
// SO_NOSIGPIPE as a setsockopt flag to disable SIGPIPE emission on socket.
// Other platforms do otherwise.
#[cfg(target_os = "darwin")]
use libc::SO_NOSIGPIPE;
#[cfg(not(target_os = "darwin"))]
const SO_NOSIGPIPE: c_int = 0;
/// exit
pub fn ___syscall1(ctx: &mut Ctx, which: c_int, mut varargs: VarArgs) {
debug!("emscripten::___syscall1 (exit) {}", which);
let status: i32 = varargs.get(ctx);
unsafe {
exit(status);
}
}
/// read
pub fn ___syscall3(ctx: &mut Ctx, which: i32, mut varargs: VarArgs) -> i32 {
// -> ssize_t
debug!("emscripten::___syscall3 (read) {}", which);
let fd: i32 = varargs.get(ctx);
let buf: u32 = varargs.get(ctx);
let count = varargs.get(ctx);
debug!("=> fd: {}, buf_offset: {}, count: {}", fd, buf, count);
let buf_addr = emscripten_memory_pointer!(ctx.memory(0), buf) as *mut c_void;
let ret = unsafe { read(fd, buf_addr, count) };
debug!("=> ret: {}", ret);
ret as _
}
/// write
pub fn ___syscall4(ctx: &mut Ctx, which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall4 (write) {}", which);
let fd: i32 = varargs.get(ctx);
let buf: u32 = varargs.get(ctx);
let count = varargs.get(ctx);
debug!("=> fd: {}, buf: {}, count: {}", fd, buf, count);
let buf_addr = emscripten_memory_pointer!(ctx.memory(0), buf) as *const c_void;
unsafe { write(fd, buf_addr, count) as i32 }
}
/// close
pub fn ___syscall6(ctx: &mut Ctx, which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall6 (close) {}", which);
let fd: i32 = varargs.get(ctx);
debug!("fd: {}", fd);
unsafe { close(fd) }
}
// chdir
pub fn ___syscall12(ctx: &mut Ctx, which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall12 (chdir) {}", which);
let path_addr: i32 = varargs.get(ctx);
unsafe {
let path_ptr = emscripten_memory_pointer!(ctx.memory(0), path_addr) as *const i8;
let path = std::ffi::CStr::from_ptr(path_ptr);
let ret = chdir(path_ptr);
debug!("=> path: {:?}, ret: {}", path, ret);
ret
}
}
pub fn ___syscall10(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall10");
-1
}
pub fn ___syscall15(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall15");
-1
}
// getpid
pub fn ___syscall20(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall20 (getpid)");
unsafe { getpid() }
}
pub fn ___syscall38(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall38");
-1
}
// rmdir
pub fn ___syscall40(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall40 (rmdir)");
let pathname: u32 = varargs.get(ctx);
let pathname_addr = emscripten_memory_pointer!(ctx.memory(0), pathname) as *const i8;
unsafe { rmdir(pathname_addr) }
}
pub fn ___syscall60(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall60");
-1
}
// dup2
pub fn ___syscall63(ctx: &mut Ctx, which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall63 (dup2) {}", which);
let src: i32 = varargs.get(ctx);
let dst: i32 = varargs.get(ctx);
unsafe { dup2(src, dst) }
}
// getppid
pub fn ___syscall64(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall64 (getppid)");
unsafe { getpid() }
}
pub fn ___syscall66(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall66");
-1
}
pub fn ___syscall75(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall75");
-1
}
pub fn ___syscall85(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall85");
-1
}
pub fn ___syscall91(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall91");
-1
}
pub fn ___syscall97(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall97");
-1
}
pub fn ___syscall110(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall110");
-1
}
// mmap2
pub fn ___syscall192(ctx: &mut Ctx, which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall192 (mmap2) {}", which);
let addr: i32 = varargs.get(ctx);
let len: u32 = varargs.get(ctx);
let prot: i32 = varargs.get(ctx);
let flags: i32 = varargs.get(ctx);
let fd: i32 = varargs.get(ctx);
let off: i32 = varargs.get(ctx);
debug!(
"=> addr: {}, len: {}, prot: {}, flags: {}, fd: {}, off: {}",
addr, len, prot, flags, fd, off
);
if fd == -1 {
let ptr = env::call_memalign(ctx, 16384, len);
if ptr == 0 {
return -1;
}
env::call_memset(ctx, ptr, 0, len);
ptr as _
} else {
-1
}
}
/// lseek
pub fn ___syscall140(ctx: &mut Ctx, which: i32, mut varargs: VarArgs) -> i32 {
// -> c_int
debug!("emscripten::___syscall140 (lseek) {}", which);
let fd: i32 = varargs.get(ctx);
let offset = varargs.get(ctx);
let whence: i32 = varargs.get(ctx);
debug!("=> fd: {}, offset: {}, whence = {}", fd, offset, whence);
unsafe { lseek(fd, offset, whence) as _ }
}
/// readv
#[allow(clippy::cast_ptr_alignment)]
pub fn ___syscall145(ctx: &mut Ctx, which: c_int, mut varargs: VarArgs) -> i32 {
// -> ssize_t
debug!("emscripten::___syscall145 (readv) {}", which);
// let fd: i32 = varargs.get(ctx);
// let iov: u32 = varargs.get(ctx);
// let iovcnt: i32 = varargs.get(ctx);
// debug!("=> fd: {}, iov: {}, iovcnt = {}", fd, iov, iovcnt);
// let iov_addr = emscripten_memory_pointer!(ctx.memory(0), iov) as *mut iovec;
// unsafe { readv(fd, iov_addr, iovcnt) }
let fd: i32 = varargs.get(ctx);
let iov: i32 = varargs.get(ctx);
let iovcnt: i32 = varargs.get(ctx);
#[repr(C)]
struct GuestIovec {
iov_base: i32,
iov_len: i32,
}
debug!("=> fd: {}, iov: {}, iovcnt = {}", fd, iov, iovcnt);
let mut ret = 0;
unsafe {
for i in 0..iovcnt {
let guest_iov_addr =
emscripten_memory_pointer!(ctx.memory(0), (iov + i * 8)) as *mut GuestIovec;
let iov_base = emscripten_memory_pointer!(ctx.memory(0), (*guest_iov_addr).iov_base)
as *mut c_void;
let iov_len = (*guest_iov_addr).iov_len as _;
// debug!("=> iov_addr: {:?}, {:?}", iov_base, iov_len);
let curr = read(fd, iov_base, iov_len);
if curr < 0 {
return -1;
}
ret += curr;
}
// debug!(" => ret: {}", ret);
ret as _
}
}
// writev
#[allow(clippy::cast_ptr_alignment)]
pub fn ___syscall146(ctx: &mut Ctx, which: i32, mut varargs: VarArgs) -> i32 {
// -> ssize_t
debug!("emscripten::___syscall146 (writev) {}", which);
let fd: i32 = varargs.get(ctx);
let iov: i32 = varargs.get(ctx);
let iovcnt: i32 = varargs.get(ctx);
#[repr(C)]
struct GuestIovec {
iov_base: i32,
iov_len: i32,
}
debug!("=> fd: {}, iov: {}, iovcnt = {}", fd, iov, iovcnt);
let mut ret = 0;
unsafe {
for i in 0..iovcnt {
let guest_iov_addr =
emscripten_memory_pointer!(ctx.memory(0), (iov + i * 8)) as *mut GuestIovec;
let iov_base = emscripten_memory_pointer!(ctx.memory(0), (*guest_iov_addr).iov_base)
as *const c_void;
let iov_len = (*guest_iov_addr).iov_len as _;
// debug!("=> iov_addr: {:?}, {:?}", iov_base, iov_len);
let curr = write(fd, iov_base, iov_len);
if curr < 0 {
return -1;
}
ret += curr;
}
// debug!(" => ret: {}", ret);
ret as _
}
}
pub fn ___syscall168(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall168");
-1
}
pub fn ___syscall191(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall191 - stub");
-1
}
pub fn ___syscall194(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall194 - stub");
-1
}
pub fn ___syscall196(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall194 - stub");
-1
}
pub fn ___syscall199(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall199 - stub");
-1
}
// stat64
pub fn ___syscall195(ctx: &mut Ctx, which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall195 (stat64) {}", which);
let pathname: u32 = varargs.get(ctx);
let buf: u32 = varargs.get(ctx);
let pathname_addr = emscripten_memory_pointer!(ctx.memory(0), pathname) as *const i8;
unsafe {
let mut _stat: stat = std::mem::zeroed();
let ret = stat(pathname_addr, &mut _stat);
debug!("ret: {}", ret);
if ret != 0 {
return ret;
}
copy_stat_into_wasm(ctx, buf, &_stat);
}
0
}
// fstat64
pub fn ___syscall197(ctx: &mut Ctx, which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall197 (fstat64) {}", which);
let fd: c_int = varargs.get(ctx);
let buf: u32 = varargs.get(ctx);
unsafe {
let mut stat = std::mem::zeroed();
let ret = fstat(fd, &mut stat);
debug!("ret: {}", ret);
if ret != 0 {
return ret;
}
copy_stat_into_wasm(ctx, buf, &stat);
}
0
}
pub fn ___syscall220(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall220");
-1
}
// fcntl64
pub fn ___syscall221(ctx: &mut Ctx, which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall221 (fcntl64) {}", which);
// fcntl64
let _fd: i32 = varargs.get(ctx);
let cmd: u32 = varargs.get(ctx);
match cmd {
2 => 0,
_ => -1,
}
}
pub fn ___syscall268(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall268");
-1
}
pub fn ___syscall272(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall272");
-1
}
pub fn ___syscall295(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall295");
-1
}
pub fn ___syscall300(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall300");
-1
}
pub fn ___syscall334(_ctx: &mut Ctx, _one: i32, _two: i32) -> i32 {
debug!("emscripten::___syscall334");
-1
}
// prlimit64
pub fn ___syscall340(ctx: &mut Ctx, which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall340 (prlimit64), {}", which);
// NOTE: Doesn't really matter. Wasm modules cannot exceed WASM_PAGE_SIZE anyway.
let _pid: i32 = varargs.get(ctx);
let _resource: i32 = varargs.get(ctx);
let _new_limit: u32 = varargs.get(ctx);
let old_limit: u32 = varargs.get(ctx);
if old_limit != 0 {
// just report no limits
let buf_ptr = emscripten_memory_pointer!(ctx.memory(0), old_limit) as *mut u8;
let buf = unsafe { slice::from_raw_parts_mut(buf_ptr, 16) };
LittleEndian::write_i32(&mut buf[..], -1); // RLIM_INFINITY
LittleEndian::write_i32(&mut buf[4..], -1); // RLIM_INFINITY
LittleEndian::write_i32(&mut buf[8..], -1); // RLIM_INFINITY
LittleEndian::write_i32(&mut buf[12..], -1); // RLIM_INFINITY
}
0
}
| 28.593533 | 96 | 0.587432 |
bf735ff2a13f06f3a492acf8945053425a71b1b9 | 1,372 | //! Type trait for the mantissa of an extended float.
use crate::traits::*;
/// Type trait for the mantissa type.
pub trait Mantissa: UnsignedInteger {
/// Mask for the left-most bit, to check if the value is normalized.
const NORMALIZED_MASK: Self;
/// Mask to extract the high bits from the integer.
const HIMASK: Self;
/// Mask to extract the low bits from the integer.
const LOMASK: Self;
/// Full size of the integer, in bits.
const FULL: i32 = Self::BITS as i32;
/// Half size of the integer, in bits.
const HALF: i32 = Self::FULL / 2;
}
impl Mantissa for u8 {
const NORMALIZED_MASK: u8 = 0x80;
const HIMASK: u8 = 0xF0;
const LOMASK: u8 = 0x0F;
}
impl Mantissa for u16 {
const NORMALIZED_MASK: u16 = 0x8000;
const HIMASK: u16 = 0xFF00;
const LOMASK: u16 = 0x00FF;
}
impl Mantissa for u32 {
const NORMALIZED_MASK: u32 = 0x80000000;
const HIMASK: u32 = 0xFFFF0000;
const LOMASK: u32 = 0x0000FFFF;
}
impl Mantissa for u64 {
const NORMALIZED_MASK: u64 = 0x8000000000000000;
const HIMASK: u64 = 0xFFFFFFFF00000000;
const LOMASK: u64 = 0x00000000FFFFFFFF;
}
impl Mantissa for u128 {
const NORMALIZED_MASK: u128 = 0x80000000000000000000000000000000;
const HIMASK: u128 = 0xFFFFFFFFFFFFFFFF0000000000000000;
const LOMASK: u128 = 0x0000000000000000FFFFFFFFFFFFFFFF;
}
| 28.583333 | 72 | 0.69242 |
61e980be8d045a36cc33dbfb532cae21c2662105 | 96 | #[doc = "Reader of register RESERVED5"]
pub type R = crate::R<u32, super::RESERVED5>;
impl R {}
| 24 | 45 | 0.666667 |
ab8f81b2175963311696e670a1ff51bac64ee06a | 3,180 | use crate::{self as io};
use bytes::{Buf, Bytes};
use pin_project::pin_project;
use std::{cmp, pin::Pin, task::Context};
/// A TcpStream where the initial reads will be served from `prefix`.
#[pin_project]
#[derive(Debug)]
pub struct PrefixedIo<I> {
prefix: Bytes,
#[pin]
io: I,
}
impl<I> PrefixedIo<I> {
pub fn new(prefix: impl Into<Bytes>, io: I) -> Self {
let prefix = prefix.into();
Self { prefix, io }
}
pub fn prefix(&self) -> &Bytes {
&self.prefix
}
}
impl<I> From<I> for PrefixedIo<I> {
fn from(io: I) -> Self {
Self::new(Bytes::default(), io)
}
}
#[async_trait::async_trait]
impl<I: Send + Sync> io::Peek for PrefixedIo<I> {
async fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
let sz = self.prefix.len().min(buf.len());
if sz == 0 {
return Ok(0);
}
(&mut buf[..sz]).clone_from_slice(&self.prefix[..sz]);
Ok(sz)
}
}
impl<I: io::PeerAddr> io::PeerAddr for PrefixedIo<I> {
#[inline]
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
self.io.peer_addr()
}
}
impl<I: io::AsyncRead> io::AsyncRead for PrefixedIo<I> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut io::ReadBuf<'_>,
) -> io::Poll<()> {
let this = self.project();
// Check the length only once, since looking as the length
// of a Bytes isn't as cheap as the length of a &[u8].
let peeked_len = this.prefix.len();
if peeked_len == 0 {
this.io.poll_read(cx, buf)
} else {
let len = cmp::min(buf.remaining(), peeked_len);
buf.put_slice(&this.prefix.as_ref()[..len]);
this.prefix.advance(len);
// If we've finally emptied the prefix, drop it so we don't
// hold onto the allocated memory any longer. We won't peek
// again.
if peeked_len == len {
*this.prefix = Bytes::new();
}
io::Poll::Ready(Ok(()))
}
}
}
impl<I: io::Write> io::Write for PrefixedIo<I> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.io.write(buf)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
self.io.flush()
}
}
impl<I: io::AsyncWrite> io::AsyncWrite for PrefixedIo<I> {
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
self.project().io.poll_shutdown(cx)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
self.project().io.poll_flush(cx)
}
#[inline]
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
self.project().io.poll_write(cx, buf)
}
#[inline]
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> io::Poll<usize> {
self.project().io.poll_write_vectored(cx, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.io.is_write_vectored()
}
}
| 25.853659 | 94 | 0.536792 |
69b9df106b14c1ee8fbe6502158d19b2d0c8d84b | 594 | use alga::general::Real;
use na;
use na::Point3;
use crate::shape;
use crate::procedural::{IndexBuffer, TriMesh, TriMesh3};
use super::ToTriMesh;
impl<N: Real> ToTriMesh<Point3<N>, ()> for shape::TriMesh3<N> {
fn to_trimesh(&self, _: ()) -> TriMesh3<N> {
TriMesh::new(
(**self.vertices()).clone(),
self.normals().as_ref().map(|ns| (**ns).clone()),
self.uvs().as_ref().map(|ns| (**ns).clone()),
Some(IndexBuffer::Unified(
(**self.indices()).iter().map(|e| na::convert(*e)).collect(),
)),
)
}
}
| 29.7 | 77 | 0.530303 |
031ba160a4e2f35098029a751473de85ca26d975 | 11,925 | use {
crate::accountsdb_repl_service::AccountsDbReplService,
crossbeam_channel::unbounded,
log::*,
paychains_download_utils::download_snapshot_archive,
paychains_genesis_utils::download_then_check_genesis_hash,
paychains_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo},
paychains_ledger::{
blockstore::Blockstore, blockstore_db::BlockstoreOptions, blockstore_processor,
leader_schedule_cache::LeaderScheduleCache,
},
paychains_replica_lib::accountsdb_repl_client::AccountsDbReplClientServiceConfig,
paychains_rpc::{
max_slots::MaxSlots,
optimistically_confirmed_bank_tracker::{
OptimisticallyConfirmedBank, OptimisticallyConfirmedBankTracker,
},
rpc::JsonRpcConfig,
rpc_pubsub_service::{PubSubConfig, PubSubService},
rpc_service::JsonRpcService,
rpc_subscriptions::RpcSubscriptions,
},
paychains_runtime::{
accounts_index::AccountSecondaryIndexes, bank_forks::BankForks,
commitment::BlockCommitmentCache, hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
snapshot_config::SnapshotConfig, snapshot_package::SnapshotType, snapshot_utils,
},
paychains_sdk::{clock::Slot, exit::Exit, genesis_config::GenesisConfig, hash::Hash},
paychains_send_transaction_service::send_transaction_service,
paychains_streamer::socket::SocketAddrSpace,
std::{
fs,
net::SocketAddr,
path::PathBuf,
sync::{
atomic::{AtomicBool, AtomicU64},
Arc, RwLock,
},
},
};
pub struct ReplicaNodeConfig {
pub rpc_peer_addr: SocketAddr,
pub accountsdb_repl_peer_addr: Option<SocketAddr>,
pub rpc_addr: SocketAddr,
pub rpc_pubsub_addr: SocketAddr,
pub ledger_path: PathBuf,
pub snapshot_archives_dir: PathBuf,
pub bank_snapshots_dir: PathBuf,
pub account_paths: Vec<PathBuf>,
pub snapshot_info: (Slot, Hash),
pub cluster_info: Arc<ClusterInfo>,
pub rpc_config: JsonRpcConfig,
pub snapshot_config: Option<SnapshotConfig>,
pub pubsub_config: PubSubConfig,
pub account_indexes: AccountSecondaryIndexes,
pub accounts_db_caching_enabled: bool,
pub replica_exit: Arc<RwLock<Exit>>,
pub socket_addr_space: SocketAddrSpace,
}
pub struct ReplicaNode {
json_rpc_service: Option<JsonRpcService>,
pubsub_service: Option<PubSubService>,
optimistically_confirmed_bank_tracker: Option<OptimisticallyConfirmedBankTracker>,
accountsdb_repl_service: Option<AccountsDbReplService>,
}
// Struct maintaining information about banks
struct ReplicaBankInfo {
bank_forks: Arc<RwLock<BankForks>>,
optimistically_confirmed_bank: Arc<RwLock<OptimisticallyConfirmedBank>>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
}
// Initialize the replica by downloading snapshot from the peer, initialize
// the BankForks, OptimisticallyConfirmedBank, LeaderScheduleCache and
// BlockCommitmentCache and return the info wrapped as ReplicaBankInfo.
fn initialize_from_snapshot(
replica_config: &ReplicaNodeConfig,
snapshot_config: &SnapshotConfig,
genesis_config: &GenesisConfig,
) -> ReplicaBankInfo {
info!(
"Downloading snapshot from the peer into {:?}",
replica_config.snapshot_archives_dir
);
download_snapshot_archive(
&replica_config.rpc_peer_addr,
&replica_config.snapshot_archives_dir,
replica_config.snapshot_info,
SnapshotType::FullSnapshot,
snapshot_config.maximum_full_snapshot_archives_to_retain,
snapshot_config.maximum_incremental_snapshot_archives_to_retain,
false,
&mut None,
)
.unwrap();
fs::create_dir_all(&snapshot_config.bank_snapshots_dir)
.expect("Couldn't create bank snapshot directory");
let archive_info = snapshot_utils::get_highest_full_snapshot_archive_info(
&replica_config.snapshot_archives_dir,
)
.unwrap();
let process_options = blockstore_processor::ProcessOptions {
account_indexes: replica_config.account_indexes.clone(),
accounts_db_caching_enabled: replica_config.accounts_db_caching_enabled,
..blockstore_processor::ProcessOptions::default()
};
info!(
"Build bank from snapshot archive: {:?}",
&snapshot_config.bank_snapshots_dir
);
let (bank0, _) = snapshot_utils::bank_from_snapshot_archives(
&replica_config.account_paths,
&snapshot_config.bank_snapshots_dir,
&archive_info,
None,
genesis_config,
process_options.debug_keys.clone(),
None,
process_options.account_indexes.clone(),
process_options.accounts_db_caching_enabled,
process_options.limit_load_slot_count_from_snapshot,
process_options.shrink_ratio,
process_options.accounts_db_test_hash_calculation,
false,
process_options.verify_index,
process_options.accounts_db_config,
None,
)
.unwrap();
let bank0_slot = bank0.slot();
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
let mut block_commitment_cache = BlockCommitmentCache::default();
block_commitment_cache.initialize_slots(bank0_slot);
let block_commitment_cache = Arc::new(RwLock::new(block_commitment_cache));
ReplicaBankInfo {
bank_forks,
optimistically_confirmed_bank,
leader_schedule_cache,
block_commitment_cache,
}
}
fn start_client_rpc_services(
replica_config: &ReplicaNodeConfig,
genesis_config: &GenesisConfig,
cluster_info: Arc<ClusterInfo>,
bank_info: &ReplicaBankInfo,
socket_addr_space: &SocketAddrSpace,
) -> (
Option<JsonRpcService>,
Option<PubSubService>,
Option<OptimisticallyConfirmedBankTracker>,
) {
let ReplicaBankInfo {
bank_forks,
optimistically_confirmed_bank,
leader_schedule_cache,
block_commitment_cache,
} = bank_info;
let blockstore = Arc::new(
Blockstore::open_with_options(
&replica_config.ledger_path,
BlockstoreOptions {
enforce_ulimit_nofile: false,
..BlockstoreOptions::default()
},
)
.unwrap(),
);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(0));
let max_slots = Arc::new(MaxSlots::default());
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
max_complete_transaction_status_slot.clone(),
blockstore.clone(),
bank_forks.clone(),
block_commitment_cache.clone(),
optimistically_confirmed_bank.clone(),
));
let rpc_override_health_check = Arc::new(AtomicBool::new(false));
if ContactInfo::is_valid_address(&replica_config.rpc_addr, socket_addr_space) {
assert!(ContactInfo::is_valid_address(
&replica_config.rpc_pubsub_addr,
socket_addr_space
));
} else {
assert!(!ContactInfo::is_valid_address(
&replica_config.rpc_pubsub_addr,
socket_addr_space
));
}
let (trigger, pubsub_service) = PubSubService::new(
replica_config.pubsub_config.clone(),
&subscriptions,
replica_config.rpc_pubsub_addr,
);
replica_config
.replica_exit
.write()
.unwrap()
.register_exit(Box::new(move || trigger.cancel()));
let (_bank_notification_sender, bank_notification_receiver) = unbounded();
(
Some(JsonRpcService::new(
replica_config.rpc_addr,
replica_config.rpc_config.clone(),
replica_config.snapshot_config.clone(),
bank_forks.clone(),
block_commitment_cache.clone(),
blockstore,
cluster_info,
None,
genesis_config.hash(),
&replica_config.ledger_path,
replica_config.replica_exit.clone(),
None,
rpc_override_health_check,
optimistically_confirmed_bank.clone(),
send_transaction_service::Config {
retry_rate_ms: 0,
leader_forward_count: 0,
..send_transaction_service::Config::default()
},
max_slots,
leader_schedule_cache.clone(),
max_complete_transaction_status_slot,
)),
Some(pubsub_service),
Some(OptimisticallyConfirmedBankTracker::new(
bank_notification_receiver,
&exit,
bank_forks.clone(),
optimistically_confirmed_bank.clone(),
subscriptions,
None,
)),
)
}
impl ReplicaNode {
pub fn new(replica_config: ReplicaNodeConfig) -> Self {
let genesis_config = download_then_check_genesis_hash(
&replica_config.rpc_peer_addr,
&replica_config.ledger_path,
None,
MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
false,
true,
)
.unwrap();
let snapshot_config = SnapshotConfig {
full_snapshot_archive_interval_slots: Slot::MAX,
incremental_snapshot_archive_interval_slots: Slot::MAX,
snapshot_archives_dir: replica_config.snapshot_archives_dir.clone(),
bank_snapshots_dir: replica_config.bank_snapshots_dir.clone(),
..SnapshotConfig::default()
};
let bank_info =
initialize_from_snapshot(&replica_config, &snapshot_config, &genesis_config);
let (json_rpc_service, pubsub_service, optimistically_confirmed_bank_tracker) =
start_client_rpc_services(
&replica_config,
&genesis_config,
replica_config.cluster_info.clone(),
&bank_info,
&replica_config.socket_addr_space,
);
let accountsdb_repl_client_config = AccountsDbReplClientServiceConfig {
worker_threads: 1,
replica_server_addr: replica_config.accountsdb_repl_peer_addr.unwrap(),
};
let last_replicated_slot = bank_info.bank_forks.read().unwrap().root_bank().slot();
info!(
"Starting AccountsDbReplService from slot {:?}",
last_replicated_slot
);
let accountsdb_repl_service = Some(
AccountsDbReplService::new(last_replicated_slot, accountsdb_repl_client_config)
.expect("Failed to start AccountsDb replication service"),
);
info!(
"Started AccountsDbReplService from slot {:?}",
last_replicated_slot
);
ReplicaNode {
json_rpc_service,
pubsub_service,
optimistically_confirmed_bank_tracker,
accountsdb_repl_service,
}
}
pub fn join(self) {
if let Some(json_rpc_service) = self.json_rpc_service {
json_rpc_service.join().expect("rpc_service");
}
if let Some(pubsub_service) = self.pubsub_service {
pubsub_service.join().expect("pubsub_service");
}
if let Some(optimistically_confirmed_bank_tracker) =
self.optimistically_confirmed_bank_tracker
{
optimistically_confirmed_bank_tracker
.join()
.expect("optimistically_confirmed_bank_tracker");
}
if let Some(accountsdb_repl_service) = self.accountsdb_repl_service {
accountsdb_repl_service
.join()
.expect("accountsdb_repl_service");
}
}
}
| 34.365994 | 93 | 0.66826 |
d50ceca6d5bf72f365dd5f443a493eedfbb77b65 | 9,022 | pub mod changeset;
pub mod target;
pub use self::changeset::AsChangeset;
pub use self::target::{IntoUpdateTarget, UpdateTarget};
use crate::backend::Backend;
use crate::dsl::{Filter, IntoBoxed};
use crate::expression::{AppearsOnTable, Expression, NonAggregate, SelectableExpression};
use crate::query_builder::returning_clause::*;
use crate::query_builder::where_clause::*;
use crate::query_builder::*;
use crate::query_dsl::methods::{BoxedDsl, FilterDsl};
use crate::query_dsl::RunQueryDsl;
use crate::query_source::Table;
use crate::result::Error::QueryBuilderError;
use crate::result::QueryResult;
impl<T, U> UpdateStatement<T, U, SetNotCalled> {
pub(crate) fn new(target: UpdateTarget<T, U>) -> Self {
UpdateStatement {
table: target.table,
where_clause: target.where_clause,
values: SetNotCalled,
returning: NoReturningClause,
}
}
/// Provides the `SET` clause of the `UPDATE` statement.
///
/// See [`update`](../fn.update.html) for usage examples, or [the update
/// guide](https://diesel.rs/guides/all-about-updates/) for a more exhaustive
/// set of examples.
pub fn set<V>(self, values: V) -> UpdateStatement<T, U, V::Changeset>
where
T: Table,
V: changeset::AsChangeset<Target = T>,
UpdateStatement<T, U, V::Changeset>: AsQuery,
{
UpdateStatement {
table: self.table,
where_clause: self.where_clause,
values: values.as_changeset(),
returning: self.returning,
}
}
}
#[derive(Debug, Copy, Clone)]
#[must_use = "Queries are only executed when calling `load`, `get_result` or similar."]
/// Represents a complete `UPDATE` statement.
///
/// See [`update`](../fn.update.html) for usage examples, or [the update
/// guide](https://diesel.rs/guides/all-about-updates/) for a more exhaustive
/// set of examples.
pub struct UpdateStatement<T, U, V = SetNotCalled, Ret = NoReturningClause> {
table: T,
where_clause: U,
values: V,
returning: Ret,
}
/// An `UPDATE` statement with a boxed `WHERE` clause.
pub type BoxedUpdateStatement<'a, DB, T, V = SetNotCalled, Ret = NoReturningClause> =
UpdateStatement<T, BoxedWhereClause<'a, DB>, V, Ret>;
impl<T, U, V, Ret> UpdateStatement<T, U, V, Ret> {
/// Adds the given predicate to the `WHERE` clause of the statement being
/// constructed.
///
/// If there is already a `WHERE` clause, the predicate will be appended
/// with `AND`. There is no difference in behavior between
/// `update(table.filter(x))` and `update(table).filter(x)`.
///
/// # Example
///
/// ```rust
/// # include!("../../doctest_setup.rs");
/// #
/// # fn main() {
/// # use schema::users::dsl::*;
/// # let connection = establish_connection();
/// let updated_rows = diesel::update(users)
/// .set(name.eq("Jim"))
/// .filter(name.eq("Sean"))
/// .execute(&connection);
/// assert_eq!(Ok(1), updated_rows);
///
/// let expected_names = vec!["Jim".to_string(), "Tess".to_string()];
/// let names = users.select(name).order(id).load(&connection);
///
/// assert_eq!(Ok(expected_names), names);
/// # }
/// ```
pub fn filter<Predicate>(self, predicate: Predicate) -> Filter<Self, Predicate>
where
Self: FilterDsl<Predicate>,
{
FilterDsl::filter(self, predicate)
}
/// Boxes the `WHERE` clause of this update statement.
///
/// This is useful for cases where you want to conditionally modify a query,
/// but need the type to remain the same. The backend must be specified as
/// part of this. It is not possible to box a query and have it be useable
/// on multiple backends.
///
/// A boxed query will incur a minor performance penalty, as the query builder
/// can no longer be inlined by the compiler. For most applications this cost
/// will be minimal.
///
/// ### Example
///
/// ```rust
/// # include!("../../doctest_setup.rs");
/// #
/// # fn main() {
/// # run_test().unwrap();
/// # }
/// #
/// # fn run_test() -> QueryResult<()> {
/// # use std::collections::HashMap;
/// # use schema::users::dsl::*;
/// # let connection = establish_connection();
/// # let mut params = HashMap::new();
/// # params.insert("tess_has_been_a_jerk", false);
/// let mut query = diesel::update(users)
/// .set(name.eq("Jerk"))
/// .into_boxed();
///
/// if !params["tess_has_been_a_jerk"] {
/// query = query.filter(name.ne("Tess"));
/// }
///
/// let updated_rows = query.execute(&connection)?;
/// assert_eq!(1, updated_rows);
///
/// let expected_names = vec!["Jerk", "Tess"];
/// let names = users.select(name).order(id).load::<String>(&connection)?;
///
/// assert_eq!(expected_names, names);
/// # Ok(())
/// # }
/// ```
pub fn into_boxed<'a, DB>(self) -> IntoBoxed<'a, Self, DB>
where
DB: Backend,
Self: BoxedDsl<'a, DB>,
{
BoxedDsl::internal_into_boxed(self)
}
}
impl<T, U, V, Ret, Predicate> FilterDsl<Predicate> for UpdateStatement<T, U, V, Ret>
where
U: WhereAnd<Predicate>,
Predicate: AppearsOnTable<T>,
{
type Output = UpdateStatement<T, U::Output, V, Ret>;
fn filter(self, predicate: Predicate) -> Self::Output {
UpdateStatement {
table: self.table,
where_clause: self.where_clause.and(predicate),
values: self.values,
returning: self.returning,
}
}
}
impl<'a, T, U, V, Ret, DB> BoxedDsl<'a, DB> for UpdateStatement<T, U, V, Ret>
where
U: Into<BoxedWhereClause<'a, DB>>,
{
type Output = BoxedUpdateStatement<'a, DB, T, V, Ret>;
fn internal_into_boxed(self) -> Self::Output {
UpdateStatement {
table: self.table,
where_clause: self.where_clause.into(),
values: self.values,
returning: self.returning,
}
}
}
impl<T, U, V, Ret, DB> QueryFragment<DB> for UpdateStatement<T, U, V, Ret>
where
DB: Backend,
T: Table,
T::FromClause: QueryFragment<DB>,
U: QueryFragment<DB>,
V: QueryFragment<DB>,
Ret: QueryFragment<DB>,
{
fn walk_ast(&self, mut out: AstPass<DB>) -> QueryResult<()> {
if self.values.is_noop()? {
return Err(QueryBuilderError(
"There are no changes to save. This query cannot be built".into(),
));
}
out.unsafe_to_cache_prepared();
out.push_sql("UPDATE ");
self.table.from_clause().walk_ast(out.reborrow())?;
out.push_sql(" SET ");
self.values.walk_ast(out.reborrow())?;
self.where_clause.walk_ast(out.reborrow())?;
self.returning.walk_ast(out.reborrow())?;
Ok(())
}
}
impl<T, U, V, Ret> QueryId for UpdateStatement<T, U, V, Ret> {
type QueryId = ();
const HAS_STATIC_QUERY_ID: bool = false;
}
impl<T, U, V> AsQuery for UpdateStatement<T, U, V, NoReturningClause>
where
T: Table,
UpdateStatement<T, U, V, ReturningClause<T::AllColumns>>: Query,
{
type SqlType = <Self::Query as Query>::SqlType;
type Query = UpdateStatement<T, U, V, ReturningClause<T::AllColumns>>;
fn as_query(self) -> Self::Query {
self.returning(T::all_columns())
}
}
impl<T, U, V, Ret> Query for UpdateStatement<T, U, V, ReturningClause<Ret>>
where
T: Table,
Ret: Expression + SelectableExpression<T> + NonAggregate,
{
type SqlType = Ret::SqlType;
}
impl<T, U, V, Ret, Conn> RunQueryDsl<Conn> for UpdateStatement<T, U, V, Ret> {}
impl<T, U, V> UpdateStatement<T, U, V, NoReturningClause> {
/// Specify what expression is returned after execution of the `update`.
/// # Examples
///
/// ### Updating a single record:
///
/// ```rust
/// # include!("../../doctest_setup.rs");
/// #
/// # #[cfg(feature = "postgres")]
/// # fn main() {
/// # use schema::users::dsl::*;
/// # let connection = establish_connection();
/// let updated_name = diesel::update(users.filter(id.eq(1)))
/// .set(name.eq("Dean"))
/// .returning(name)
/// .get_result(&connection);
/// assert_eq!(Ok("Dean".to_string()), updated_name);
/// # }
/// # #[cfg(not(feature = "postgres"))]
/// # fn main() {}
/// ```
pub fn returning<E>(self, returns: E) -> UpdateStatement<T, U, V, ReturningClause<E>>
where
T: Table,
UpdateStatement<T, U, V, ReturningClause<E>>: Query,
{
UpdateStatement {
table: self.table,
where_clause: self.where_clause,
values: self.values,
returning: ReturningClause(returns),
}
}
}
/// Indicates that you have not yet called `.set` on an update statement
#[derive(Debug, Clone, Copy)]
pub struct SetNotCalled;
| 31.879859 | 89 | 0.589781 |
912beffac8d2b2692f739994d32c9747b88869cc | 975 | // Copyright lowRISC contributors.
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
//! I/O interfaces, in lieu of [`std::io`].
//!
//! These functions and traits are mostly intended for manipulating byte
//! buffers, but they could be implemented on other types that provide a
//! read/write interface.
pub mod bit_buf;
pub mod cursor;
pub mod endian;
pub mod read;
pub mod write;
pub use cursor::Cursor;
pub use read::Read;
pub use write::Write;
/// A generic, low-level I/O error.
#[derive(Copy, Clone, Debug)]
#[non_exhaustive]
pub enum Error {
/// Indicates that some underlying buffer has been completely used up,
/// either for reading from or writing to.
///
/// This is typically a fatal error, since it is probably not possible
/// to re-allocate that underlying buffer.
BufferExhausted,
/// Indicates that an unspecified, internal failure occurred.
Internal,
}
| 27.857143 | 75 | 0.710769 |
bfce8984698c56ab092107b2245f86c8e914bee0 | 7,508 | use std::{
future::Future,
marker::PhantomData,
ops::{Deref, DerefMut},
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use pin_project_lite::pin_project;
pub use map::Map;
pub use then::Then;
pub use timeout::Timeout;
use crate::actor::Actor;
mod either;
mod map;
pub mod result;
mod then;
mod timeout;
/// Trait for types which are a placeholder of a value that may become
/// available at some later point in time.
///
/// `ActorFuture` is very similar to a regular `Future`, only with subsequent combinator closures accepting the actor and its context, in addition to the result.
///
/// `ActorFuture` allows for use cases where future processing requires access to the actor or its context.
///
/// Here is an example of a handler on a single actor, deferring work to another actor, and
/// then updating the initiating actor's state:
///
/// ```no_run
/// use actix::prelude::*;
///
/// // The response type returned by the actor future
/// type OriginalActorResponse = ();
/// // The error type returned by the actor future
/// type MessageError = ();
/// // This is the needed result for the DeferredWork message
/// // It's a result that combine both Response and Error from the future response.
/// type DeferredWorkResult = Result<OriginalActorResponse, MessageError>;
/// #
/// # struct ActorState {}
/// #
/// # impl ActorState {
/// # fn update_from(&mut self, _result: ()) {}
/// # }
/// #
/// # struct OtherActor {}
/// #
/// # impl Actor for OtherActor {
/// # type Context = Context<Self>;
/// # }
/// #
/// # impl Handler<OtherMessage> for OtherActor {
/// # type Result = ();
/// #
/// # fn handle(&mut self, _msg: OtherMessage, _ctx: &mut Context<Self>) -> Self::Result {
/// # }
/// # }
/// #
/// # struct OriginalActor{
/// # other_actor: Addr<OtherActor>,
/// # inner_state: ActorState
/// # }
/// #
/// # impl Actor for OriginalActor{
/// # type Context = Context<Self>;
/// # }
/// #
/// # #[derive(Message)]
/// # #[rtype(result = "Result<(), MessageError>")]
/// # struct DeferredWork{}
/// #
/// # #[derive(Message)]
/// # #[rtype(result = "()")]
/// # struct OtherMessage{}
///
/// impl Handler<DeferredWork> for OriginalActor {
/// // Notice the `Response` is an `ActorFuture`-ized version of `Self::Message::Result`.
/// type Result = ResponseActFuture<Self, Result<OriginalActorResponse, MessageError>>;
///
/// fn handle(&mut self, _msg: DeferredWork, _ctx: &mut Context<Self>) -> Self::Result {
/// // this creates a `Future` representing the `.send` and subsequent `Result` from
/// // `other_actor`
/// let send_to_other = self.other_actor
/// .send(OtherMessage {});
///
/// // Wrap that `Future` so subsequent chained handlers can access
/// // the `actor` (`self` in the synchronous code) as well as the context.
/// let send_to_other = actix::fut::wrap_future::<_, Self>(send_to_other);
///
/// // once the wrapped future resolves, update this actor's state
/// let update_self = send_to_other.map(|result, actor, _ctx| {
/// // Actor's state updated here
/// match result {
/// Ok(v) => {
/// actor.inner_state.update_from(v);
/// Ok(())
/// },
/// // Failed to send message to other_actor
/// Err(_e) => Err(()),
/// }
/// });
///
/// // return the wrapped future
/// Box::pin(update_self)
/// }
/// }
///
/// ```
///
/// See also [into_actor](trait.WrapFuture.html#tymethod.into_actor), which provides future conversion using trait
pub trait ActorFuture<A: Actor> {
/// The type of value that this future will resolved with if it is
/// successful.
type Output;
fn poll(
self: Pin<&mut Self>,
srv: &mut A,
ctx: &mut A::Context,
task: &mut Context<'_>,
) -> Poll<Self::Output>;
}
pub trait ActorFutureExt<A: Actor>: ActorFuture<A> {
/// Map this future's result to a different type, returning a new future of
/// the resulting type.
fn map<F, U>(self, f: F) -> Map<Self, F>
where
F: FnOnce(Self::Output, &mut A, &mut A::Context) -> U,
Self: Sized,
{
Map::new(self, f)
}
/// Chain on a computation for when a future finished, passing the result of
/// the future to the provided closure `f`.
fn then<F, Fut>(self, f: F) -> Then<Self, Fut, F>
where
F: FnOnce(Self::Output, &mut A, &mut A::Context) -> Fut,
Fut: ActorFuture<A>,
Self: Sized,
{
then::new(self, f)
}
/// Add timeout to futures chain.
///
/// `Err(())` returned as a timeout error.
fn timeout(self, timeout: Duration) -> Timeout<Self>
where
Self: Sized,
{
Timeout::new(self, timeout)
}
}
impl<F, A> ActorFutureExt<A> for F
where
F: ActorFuture<A>,
A: Actor,
{
}
/// Type alias for a pinned box ActorFuture trait object.
pub type LocalBoxActorFuture<A, I> = Pin<Box<dyn ActorFuture<A, Output = I>>>;
impl<F, A> ActorFuture<A> for Box<F>
where
F: ActorFuture<A> + Unpin + ?Sized,
A: Actor,
{
type Output = F::Output;
fn poll(
mut self: Pin<&mut Self>,
srv: &mut A,
ctx: &mut A::Context,
task: &mut Context<'_>,
) -> Poll<Self::Output> {
Pin::new(&mut **self.as_mut()).poll(srv, ctx, task)
}
}
impl<P, A> ActorFuture<A> for Pin<P>
where
P: Unpin + DerefMut,
<P as Deref>::Target: ActorFuture<A>,
A: Actor,
{
type Output = <<P as Deref>::Target as ActorFuture<A>>::Output;
fn poll(
self: Pin<&mut Self>,
srv: &mut A,
ctx: &mut A::Context,
task: &mut Context<'_>,
) -> Poll<Self::Output> {
Pin::get_mut(self).as_mut().poll(srv, ctx, task)
}
}
/// Helper trait that allows conversion of normal future into `ActorFuture`
pub trait WrapFuture<A>
where
A: Actor,
{
/// The future that this type can be converted into.
type Future: ActorFuture<A>;
#[deprecated(since = "0.11.0", note = "Please use WrapFuture::into_actor")]
#[doc(hidden)]
fn actfuture(self) -> Self::Future;
/// Convert normal future to a ActorFuture
fn into_actor(self, a: &A) -> Self::Future;
}
impl<F: Future, A: Actor> WrapFuture<A> for F {
type Future = FutureWrap<F, A>;
#[doc(hidden)]
fn actfuture(self) -> Self::Future {
wrap_future(self)
}
fn into_actor(self, _: &A) -> Self::Future {
wrap_future(self)
}
}
pin_project! {
pub struct FutureWrap<F, A>
where
F: Future,
A: Actor,
{
#[pin]
fut: F,
_act: PhantomData<A>
}
}
/// Converts normal future into `ActorFuture`, allowing its processing to
/// use the actor's state.
///
/// See the documentation for [ActorFuture](trait.ActorFuture.html) for a practical example involving both
/// `wrap_future` and `ActorFuture`
pub fn wrap_future<F, A>(f: F) -> FutureWrap<F, A>
where
F: Future,
A: Actor,
{
FutureWrap {
fut: f,
_act: PhantomData,
}
}
impl<F, A> ActorFuture<A> for FutureWrap<F, A>
where
F: Future,
A: Actor,
{
type Output = F::Output;
fn poll(
self: Pin<&mut Self>,
_: &mut A,
_: &mut A::Context,
task: &mut Context<'_>,
) -> Poll<Self::Output> {
self.project().fut.poll(task)
}
}
| 26.814286 | 161 | 0.581513 |
edf21f3ad7e6f6396933078b1134f6725e187a63 | 6,581 | //! ObjectId
use std::{
error,
fmt,
result,
sync::atomic::{AtomicUsize, Ordering},
};
use byteorder::{BigEndian, ByteOrder};
use hex::{self, FromHexError};
use rand::{thread_rng, Rng};
use time;
const TIMESTAMP_SIZE: usize = 4;
const PROCESS_ID_SIZE: usize = 5;
const COUNTER_SIZE: usize = 3;
const TIMESTAMP_OFFSET: usize = 0;
const PROCESS_ID_OFFSET: usize = TIMESTAMP_OFFSET + TIMESTAMP_SIZE;
const COUNTER_OFFSET: usize = PROCESS_ID_OFFSET + PROCESS_ID_SIZE;
const MAX_U24: usize = 0xFF_FFFF;
static OID_COUNTER: AtomicUsize = AtomicUsize::new(0);
/// Errors that can occur during OID construction and generation.
#[derive(Debug)]
pub enum Error {
ArgumentError(String),
FromHexError(FromHexError),
}
impl From<FromHexError> for Error {
fn from(err: FromHexError) -> Error {
Error::FromHexError(err)
}
}
/// Alias for Result<T, oid::Error>.
pub type Result<T> = result::Result<T, Error>;
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::ArgumentError(ref inner) => inner.fmt(fmt),
Error::FromHexError(ref inner) => inner.fmt(fmt),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::ArgumentError(ref inner) => &inner,
Error::FromHexError(ref inner) =>
{
#[allow(deprecated)]
inner.description()
}
}
}
fn cause(&self) -> Option<&dyn error::Error> {
match *self {
Error::ArgumentError(_) => None,
Error::FromHexError(ref inner) => Some(inner),
}
}
}
/// A wrapper around raw 12-byte ObjectId representations.
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct ObjectId {
id: [u8; 12],
}
impl Default for ObjectId {
fn default() -> Self {
Self::new()
}
}
impl ObjectId {
/// Generates a new ObjectID, represented in bytes.
/// See the [docs](http://docs.mongodb.org/manual/reference/object-id/)
/// for more information.
pub fn new() -> ObjectId {
let timestamp = ObjectId::gen_timestamp();
let process_id = ObjectId::gen_process_id();
let counter = ObjectId::gen_count();
let mut buf: [u8; 12] = [0; 12];
buf[TIMESTAMP_OFFSET..(TIMESTAMP_SIZE + TIMESTAMP_OFFSET)]
.clone_from_slice(×tamp[..TIMESTAMP_SIZE]);
buf[PROCESS_ID_OFFSET..(PROCESS_ID_SIZE + PROCESS_ID_OFFSET)]
.clone_from_slice(&process_id[..PROCESS_ID_SIZE]);
buf[COUNTER_OFFSET..(COUNTER_SIZE + COUNTER_OFFSET)]
.clone_from_slice(&counter[..COUNTER_SIZE]);
ObjectId::with_bytes(buf)
}
/// Constructs a new ObjectId wrapper around the raw byte representation.
pub fn with_bytes(bytes: [u8; 12]) -> ObjectId {
ObjectId { id: bytes }
}
/// Creates an ObjectID using a 12-byte (24-char) hexadecimal string.
pub fn with_string(s: &str) -> Result<ObjectId> {
let bytes: Vec<u8> = hex::decode(s.as_bytes())?;
if bytes.len() != 12 {
Err(Error::ArgumentError(
"Provided string must be a 12-byte hexadecimal string.".to_owned(),
))
} else {
let mut byte_array: [u8; 12] = [0; 12];
byte_array[..].copy_from_slice(&bytes[..]);
Ok(ObjectId::with_bytes(byte_array))
}
}
/// Returns the raw byte representation of an ObjectId.
pub fn bytes(&self) -> [u8; 12] {
self.id
}
/// Convert the objectId to hex representation.
pub fn to_hex(&self) -> String {
hex::encode(self.id)
}
// Generates a new timestamp representing the current seconds since epoch.
// Represented in Big Endian.
fn gen_timestamp() -> [u8; 4] {
let timespec = time::get_time();
let timestamp = timespec.sec as u32;
let mut buf: [u8; 4] = [0; 4];
BigEndian::write_u32(&mut buf, timestamp);
buf
}
// Generate a random 5-byte array.
fn gen_process_id() -> [u8; 5] {
let rng = thread_rng().gen_range(0, MAX_U24) as u32;
let mut buf: [u8; 5] = [0; 5];
BigEndian::write_u32(&mut buf, rng);
buf
}
// Gets an incremental 3-byte count.
// Represented in Big Endian.
fn gen_count() -> [u8; 3] {
// Init oid counter
if OID_COUNTER.load(Ordering::SeqCst) == 0 {
let start = thread_rng().gen_range(0, MAX_U24 + 1);
OID_COUNTER.store(start, Ordering::SeqCst);
}
let u_counter = OID_COUNTER.fetch_add(1, Ordering::SeqCst);
// Mod result instead of OID_COUNTER to prevent threading issues.
// Static mutexes are currently unstable; once they have been
// stabilized, one should be used to access OID_COUNTER and
// perform multiple operations atomically.
let u = u_counter % MAX_U24;
// Convert usize to writable u64, then extract the first three bytes.
let u_int = u as u64;
let mut buf: [u8; 8] = [0; 8];
BigEndian::write_u64(&mut buf, u_int);
let buf_u24: [u8; 3] = [buf[5], buf[6], buf[7]];
buf_u24
}
}
impl fmt::Display for ObjectId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.to_hex())
}
}
impl fmt::Debug for ObjectId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&format!("ObjectId({})", self.to_hex()))
}
}
#[test]
fn count_generated_is_big_endian() {
let start = 1_122_866;
OID_COUNTER.store(start, Ordering::SeqCst);
// Test count generates correct value 1122866
let count_bytes = ObjectId::gen_count();
let mut buf: [u8; 4] = [0; 4];
buf[1..=COUNTER_SIZE].clone_from_slice(&count_bytes[..COUNTER_SIZE]);
let count = BigEndian::read_u32(&buf);
assert_eq!(start as u32, count);
// Test OID formats count correctly as big endian
let oid = ObjectId::new();
assert_eq!(0x11u8, oid.bytes()[COUNTER_OFFSET]);
assert_eq!(0x22u8, oid.bytes()[COUNTER_OFFSET + 1]);
assert_eq!(0x33u8, oid.bytes()[COUNTER_OFFSET + 2]);
}
#[test]
fn test_display() {
let id = ObjectId::with_string("53e37d08776f724e42000000").unwrap();
assert_eq!(format!("{}", id), "53e37d08776f724e42000000")
}
#[test]
fn test_debug() {
let id = ObjectId::with_string("53e37d08776f724e42000000").unwrap();
assert_eq!(format!("{:?}", id), "ObjectId(53e37d08776f724e42000000)")
}
| 28.737991 | 83 | 0.607962 |
e2f92c66913365b134807cbb7495b09aaaa556b5 | 3,064 | use criterion::{black_box, BatchSize, BenchmarkId, Criterion, Throughput};
use std::collections::{BTreeSet, BinaryHeap, HashSet};
use crate::util::{self, Bool256, Rand};
use byte_set::ByteSet;
pub fn benches(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("Min");
let mut rng = rand::thread_rng();
for &size in util::SIZES {
group.throughput(Throughput::Bytes(size as u64));
group.bench_function(BenchmarkId::new("ByteSet", size), |b| {
b.iter_batched_ref(
|| black_box(ByteSet::rand_len(size, &mut rng)),
|byte_set| {
black_box(byte_set.first());
},
BatchSize::SmallInput,
)
});
group.bench_function(BenchmarkId::new("[bool; 256]", size), |b| {
b.iter_batched_ref(
|| black_box(Bool256::rand_len(size, &mut rng)),
|bool256| {
black_box(bool256.min());
},
BatchSize::SmallInput,
)
});
let range_inclusive = black_box(0u8..=util::saturating_cast(size));
group.bench_with_input(
BenchmarkId::new("RangeInclusive<u8>", size),
&range_inclusive,
|b, range_inclusive| {
b.iter(|| {
black_box(range_inclusive.start());
})
},
);
group.bench_function(BenchmarkId::new("HashSet<u8>", size), |b| {
b.iter_batched_ref(
|| black_box(HashSet::<u8>::rand_len(size, &mut rng)),
|hash_set| {
black_box(hash_set.iter().min());
},
BatchSize::SmallInput,
)
});
group.bench_function(BenchmarkId::new("BTreeSet<u8>", size), |b| {
b.iter_batched_ref(
|| black_box(BTreeSet::<u8>::rand_len(size, &mut rng)),
|btree_set| {
// The `first` method is nightly-only:
// https://github.com/rust-lang/rust/issues/62924
black_box(btree_set.iter().next());
},
BatchSize::SmallInput,
)
});
group.bench_function(BenchmarkId::new("Vec<u8>", size), |b| {
b.iter_batched_ref(
|| black_box(Vec::<u8>::rand_len(size, &mut rng)),
|vec| {
black_box(vec.iter().min());
},
BatchSize::SmallInput,
)
});
group.bench_function(BenchmarkId::new("BinaryHeap<u8>", size), |b| {
b.iter_batched_ref(
// `Reverse` is required here because `peek` returns the max value.
|| black_box(BinaryHeap::<u8>::rand_len(size, &mut rng)),
|binary_heap| {
black_box(binary_heap.iter().min());
},
BatchSize::SmallInput,
)
});
}
group.finish();
}
| 33.304348 | 83 | 0.484661 |
76ed81f100de55093f915df62ec00964b5337893 | 1,433 | use core::fmt;
use core::prelude::*;
use super::drivers::vga;
struct Stdout;
impl Stdout {
fn write_fmt(&mut self, fmt: &fmt::Arguments) {
fmt::write(self, fmt);
}
}
impl fmt::FormatWriter for Stdout {
fn write(&mut self, bytes: &[u8]) -> fmt::Result {
for &c in bytes.iter() {
putc(c);
}
Ok(())
}
}
pub fn print_args(fmt: &fmt::Arguments) {
write!(Stdout, "{}", fmt);
}
pub fn println_args(fmt: &fmt::Arguments) {
writeln!(Stdout, "{}", fmt);
}
static mut pos: int = 0;
unsafe fn seek(offset: int) {
pos += offset;
}
unsafe fn write_char(c: char) {
if c == '\x08' {
if pos > 0 {
if pos % 80 == 0 {
while (*vga::SCREEN)[(pos-1) as uint].char == 0 {
pos -= 1;
}
}
else if pos > 0 {
pos -= 1;
(*vga::SCREEN)[pos as uint].char = 0;
}
}
}
else if c == '\n' {
seek(80 - pos % 80);
}
else if c == '\t' {
seek(4 - pos % 4);
}
else {
(*vga::SCREEN)[pos as uint].char = c as u8;
pos += 1;
}
pos %= vga::SCREEN_SIZE as int;
vga::cursor_at(pos as uint);
}
pub fn putc(c: u8) {
unsafe {
write_char(c as char);
}
}
pub fn puti(num: int) {
}
pub fn puts(s: &str) {
for c in s.as_bytes().iter() {
putc(*c);
}
}
| 17.9125 | 65 | 0.449407 |
23371e82a646f70d8d2ff4f39ddf83a9d9dd913e | 6,217 | use goblin::{elf, mach};
use elf::{find_elf_section, has_elf_section};
use mach::{find_mach_section, has_mach_segment};
use object::{Object, ObjectTarget};
/// Provides access to DWARF debugging information in object files.
pub trait DwarfData {
/// Checks whether this object contains DWARF infos.
fn has_dwarf_data(&self) -> bool;
/// Loads a specific dwarf section if its in the file.
fn get_dwarf_section(&self, section: DwarfSection) -> Option<DwarfSectionData>;
}
impl<'input> DwarfData for Object<'input> {
fn has_dwarf_data(&self) -> bool {
match self.target {
// We assume an ELF contains debug information if it still contains
// the debug_info section. The file utility uses a similar mechanism,
// except that it checks for the ".symtab" section instead.
ObjectTarget::Elf(ref elf) => has_elf_section(
elf,
elf::section_header::SHT_PROGBITS,
DwarfSection::DebugInfo.elf_name(),
),
// MachO generally stores debug information in the "__DWARF" segment,
// so we simply check if it is present. The only exception to this
// rule is call frame information (CFI), which is stored in the __TEXT
// segment of the executable. This, however, requires more specific
// logic anyway, so we ignore this here.
ObjectTarget::MachOSingle(ref macho) => has_mach_segment(macho, "__DWARF"),
ObjectTarget::MachOFat(_, ref macho) => has_mach_segment(macho, "__DWARF"),
// We do not support DWARF in any other object targets
_ => false,
}
}
fn get_dwarf_section(&self, section: DwarfSection) -> Option<DwarfSectionData> {
match self.target {
ObjectTarget::Elf(ref elf) => read_elf_dwarf_section(elf, self.as_bytes(), section),
ObjectTarget::MachOSingle(ref macho) => read_mach_dwarf_section(macho, section),
ObjectTarget::MachOFat(_, ref macho) => read_mach_dwarf_section(macho, section),
_ => None,
}
}
}
/// Represents the name of a DWARF debug section.
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
pub enum DwarfSection {
EhFrame,
DebugFrame,
DebugAbbrev,
DebugAranges,
DebugLine,
DebugLoc,
DebugPubNames,
DebugRanges,
DebugStr,
DebugInfo,
DebugTypes,
}
impl DwarfSection {
/// Return the name for ELF.
pub fn elf_name(self) -> &'static str {
match self {
DwarfSection::EhFrame => ".eh_frame",
DwarfSection::DebugFrame => ".debug_frame",
DwarfSection::DebugAbbrev => ".debug_abbrev",
DwarfSection::DebugAranges => ".debug_aranges",
DwarfSection::DebugLine => ".debug_line",
DwarfSection::DebugLoc => ".debug_loc",
DwarfSection::DebugPubNames => ".debug_pubnames",
DwarfSection::DebugRanges => ".debug_ranges",
DwarfSection::DebugStr => ".debug_str",
DwarfSection::DebugInfo => ".debug_info",
DwarfSection::DebugTypes => ".debug_types",
}
}
/// Return the name for MachO.
pub fn macho_name(self) -> &'static str {
match self {
DwarfSection::EhFrame => "__eh_frame",
DwarfSection::DebugFrame => "__debug_frame",
DwarfSection::DebugAbbrev => "__debug_abbrev",
DwarfSection::DebugAranges => "__debug_aranges",
DwarfSection::DebugLine => "__debug_line",
DwarfSection::DebugLoc => "__debug_loc",
DwarfSection::DebugPubNames => "__debug_pubnames",
DwarfSection::DebugRanges => "__debug_ranges",
DwarfSection::DebugStr => "__debug_str",
DwarfSection::DebugInfo => "__debug_info",
DwarfSection::DebugTypes => "__debug_types",
}
}
/// Return the name of the section for debug purposes.
pub fn name(self) -> &'static str {
match self {
DwarfSection::EhFrame => "eh_frame",
DwarfSection::DebugFrame => "debug_frame",
DwarfSection::DebugAbbrev => "debug_abbrev",
DwarfSection::DebugAranges => "debug_aranges",
DwarfSection::DebugLine => "debug_line",
DwarfSection::DebugLoc => "debug_loc",
DwarfSection::DebugPubNames => "debug_pubnames",
DwarfSection::DebugRanges => "debug_ranges",
DwarfSection::DebugStr => "debug_str",
DwarfSection::DebugInfo => "debug_info",
DwarfSection::DebugTypes => "debug_types",
}
}
}
/// Gives access to a section in a dwarf file.
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct DwarfSectionData<'data> {
section: DwarfSection,
data: &'data [u8],
offset: u64,
}
impl<'data> DwarfSectionData<'data> {
/// Constructs a `DwarfSectionData` object from raw data.
pub fn new(section: DwarfSection, data: &[u8], offset: u64) -> DwarfSectionData {
DwarfSectionData {
section,
data,
offset,
}
}
/// Return the section data as bytes.
pub fn as_bytes(&self) -> &'data [u8] {
self.data
}
/// Get the absolute file offset.
pub fn offset(&self) -> u64 {
self.offset
}
/// Get the section name.
pub fn section(&self) -> DwarfSection {
self.section
}
}
/// Reads a single `DwarfSection` from an ELF object file.
fn read_elf_dwarf_section<'data>(
elf: &elf::Elf<'data>,
data: &'data [u8],
sect: DwarfSection,
) -> Option<DwarfSectionData<'data>> {
let sh_type = elf::section_header::SHT_PROGBITS;
find_elf_section(elf, data, sh_type, sect.elf_name())
.map(|section| DwarfSectionData::new(sect, section.data, section.header.sh_offset))
}
/// Reads a single `DwarfSection` from Mach object file.
fn read_mach_dwarf_section<'data>(
macho: &mach::MachO<'data>,
sect: DwarfSection,
) -> Option<DwarfSectionData<'data>> {
find_mach_section(macho, sect.macho_name())
.map(|section| DwarfSectionData::new(sect, section.data, section.header.offset.into()))
}
| 35.936416 | 96 | 0.618305 |
9b9e4496a870dc926c88791b9e4aaebc4e54d623 | 965 | // Test an `exists<'a> { forall<'b> { 'a = 'b } }` pattern -- which should not compile!
//
// In particular, we test this pattern in trait solving, where it is not connected
// to any part of the source code.
use std::cell::Cell;
trait Trait<T> {}
fn foo<T>()
where
T: Trait<for<'b> fn(Cell<&'b u32>)>,
{
}
impl<'a> Trait<fn(Cell<&'a u32>)> for () {}
fn main() {
// Here, proving that `(): Trait<for<'b> fn(&'b u32)>` uses the impl:
//
// - The impl provides the clause `forall<'a> { (): Trait<fn(&'a u32)> }`
// - We instantiate `'a` existentially to get `(): Trait<fn(&?a u32)>`
// - We unify `fn(&?a u32)` with `for<'b> fn(&'b u32)`
// - This requires (among other things) instantiating `'b` universally,
// yielding `fn(&!b u32)`, in a fresh universe U1
// - So we get `?a = !b` but the universe U0 assigned to `?a` cannot name `!b`.
foo::<()>(); //~ ERROR implementation of `Trait` is not general enough
}
| 32.166667 | 87 | 0.57513 |
eb522832769d0619a8ea3d026659122b44f10327 | 1,876 | use helpers::{HashMap, HashSet};
type Graph<'a> = HashMap<&'a str, HashSet<&'a str>>;
const START: &str = "start";
const END: &str = "end";
fn num_paths<'a>(
graph: &Graph<'a>,
visited: &mut HashSet<&'a str>,
re_visit: Option<&'a str>,
cur: &'a str,
) -> usize {
if cur == END {
return match re_visit {
None => 1,
Some(x) => {
if visited.contains(x) {
1
} else {
// don't double-count, will be handled in the second case
0
}
}
};
}
if visited.contains(&cur) {
return 0;
}
let mut ret = 0usize;
if cur.chars().all(|c| c.is_ascii_lowercase()) {
if cur != START && re_visit.is_none() {
let re_visit = Some(cur);
ret += graph[cur]
.iter()
.map(|&cur| num_paths(graph, &mut visited.clone(), re_visit, cur))
.sum::<usize>();
}
visited.insert(cur);
}
ret += graph[cur]
.iter()
.map(|&cur| num_paths(graph, &mut visited.clone(), re_visit, cur))
.sum::<usize>();
ret
}
fn run(s: &str, re_visit: Option<&str>) -> usize {
let mut graph = Graph::default();
for line in s.lines() {
let (a, b) = line.split_once('-').unwrap();
graph.entry(a).or_default().insert(b);
graph.entry(b).or_default().insert(a);
}
num_paths(&graph, &mut HashSet::default(), re_visit, START)
}
pub fn p1(s: &str) -> usize {
// Some(START) is just a hack to make sure we don't re-visit anything
run(s, Some(START))
}
pub fn p2(s: &str) -> usize {
run(s, None)
}
#[test]
fn t() {
let s = include_str!("input/d12.txt");
assert_eq!(p1(s), 3887);
assert_eq!(p2(s), 104834);
}
#[test]
fn ex1() {
let s = include_str!("input/d12_ex1.txt");
assert_eq!(p1(s), 10);
assert_eq!(p2(s), 36);
}
#[test]
fn ex2() {
let s = include_str!("input/d12_ex2.txt");
assert_eq!(p1(s), 19);
assert_eq!(p2(s), 103);
}
| 21.563218 | 74 | 0.554904 |
e6535fa9d2e80a6793c63a15bded02fc130c625b | 975 | use super::component_prelude::*;
use crate::expression::ExpressionValue;
use std::collections::HashMap;
pub mod prelude {
pub use super::update_variable_register::{
UpdateVariableAction,
UpdateVariableRegister,
};
pub use super::VariableRegister;
}
mod update_variable_register;
#[derive(Component, Deserialize, Clone, Default)]
#[storage(DenseVecStorage)]
#[serde(deny_unknown_fields, from = "HashMap<String, ExpressionValue>")]
pub struct VariableRegister {
variables: HashMap<String, ExpressionValue>,
}
impl VariableRegister {
pub fn set(&mut self, name: String, value: ExpressionValue) {
let _ = self.variables.insert(name, value);
}
pub fn get(&self, name: &str) -> Option<ExpressionValue> {
self.variables.get(name).cloned()
}
}
impl From<HashMap<String, ExpressionValue>> for VariableRegister {
fn from(variables: HashMap<String, ExpressionValue>) -> Self {
Self { variables }
}
}
| 26.351351 | 72 | 0.699487 |
916e0648a31abea3353a8e910871fe9f6bc48a8a | 19,826 | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Immutable strings.
#[allow(non_uppercase_statics)];
use base::{Boolean, CFAllocatorRef, CFIndex, CFIndexConvertible, CFOptionFlags, CFRange};
use base::{CFRelease, CFTypeID, TCFType, kCFAllocatorDefault, kCFAllocatorNull};
use std::cast;
use std::libc;
use std::ptr;
use std::vec;
pub type UniChar = libc::c_ushort;
// CFString.h
pub type CFStringCompareFlags = CFOptionFlags;
static kCFCompareCaseInsensitive: CFStringCompareFlags = 1;
static kCFCompareBackwards: CFStringCompareFlags = 4;
static kCFCompareAnchored: CFStringCompareFlags = 8;
static kCFCompareNonliteral: CFStringCompareFlags = 16;
static kCFCompareLocalized: CFStringCompareFlags = 32;
static kCFCompareNumerically: CFStringCompareFlags = 64;
static kCFCompareDiacriticInsensitive: CFStringCompareFlags = 128;
static kCFCompareWidthInsensitive: CFStringCompareFlags = 256;
static kCFCompareForcedOrdering: CFStringCompareFlags = 512;
pub type CFStringEncoding = u32;
// OS X built-in encodings.
static kCFStringEncodingMacRoman: CFStringEncoding = 0;
static kCFStringEncodingWindowsLatin1: CFStringEncoding = 0x0500;
static kCFStringEncodingISOLatin1: CFStringEncoding = 0x0201;
static kCFStringEncodingNextStepLatin: CFStringEncoding = 0x0B01;
static kCFStringEncodingASCII: CFStringEncoding = 0x0600;
static kCFStringEncodingUnicode: CFStringEncoding = 0x0100;
static kCFStringEncodingUTF8: CFStringEncoding = 0x08000100;
static kCFStringEncodingNonLossyASCII: CFStringEncoding = 0x0BFF;
static kCFStringEncodingUTF16: CFStringEncoding = 0x0100;
static kCFStringEncodingUTF16BE: CFStringEncoding = 0x10000100;
static kCFStringEncodingUTF16LE: CFStringEncoding = 0x14000100;
static kCFStringEncodingUTF32: CFStringEncoding = 0x0c000100;
static kCFStringEncodingUTF32BE: CFStringEncoding = 0x18000100;
static kCFStringEncodingUTF32LE: CFStringEncoding = 0x1c000100;
// CFStringEncodingExt.h
type CFStringEncodings = CFIndex;
// External encodings, except those defined above.
// Defined above: kCFStringEncodingMacRoman = 0
static kCFStringEncodingMacJapanese: CFStringEncoding = 1;
static kCFStringEncodingMacChineseTrad: CFStringEncoding = 2;
static kCFStringEncodingMacKorean: CFStringEncoding = 3;
static kCFStringEncodingMacArabic: CFStringEncoding = 4;
static kCFStringEncodingMacHebrew: CFStringEncoding = 5;
static kCFStringEncodingMacGreek: CFStringEncoding = 6;
static kCFStringEncodingMacCyrillic: CFStringEncoding = 7;
static kCFStringEncodingMacDevanagari: CFStringEncoding = 9;
static kCFStringEncodingMacGurmukhi: CFStringEncoding = 10;
static kCFStringEncodingMacGujarati: CFStringEncoding = 11;
static kCFStringEncodingMacOriya: CFStringEncoding = 12;
static kCFStringEncodingMacBengali: CFStringEncoding = 13;
static kCFStringEncodingMacTamil: CFStringEncoding = 14;
static kCFStringEncodingMacTelugu: CFStringEncoding = 15;
static kCFStringEncodingMacKannada: CFStringEncoding = 16;
static kCFStringEncodingMacMalayalam: CFStringEncoding = 17;
static kCFStringEncodingMacSinhalese: CFStringEncoding = 18;
static kCFStringEncodingMacBurmese: CFStringEncoding = 19;
static kCFStringEncodingMacKhmer: CFStringEncoding = 20;
static kCFStringEncodingMacThai: CFStringEncoding = 21;
static kCFStringEncodingMacLaotian: CFStringEncoding = 22;
static kCFStringEncodingMacGeorgian: CFStringEncoding = 23;
static kCFStringEncodingMacArmenian: CFStringEncoding = 24;
static kCFStringEncodingMacChineseSimp: CFStringEncoding = 25;
static kCFStringEncodingMacTibetan: CFStringEncoding = 26;
static kCFStringEncodingMacMongolian: CFStringEncoding = 27;
static kCFStringEncodingMacEthiopic: CFStringEncoding = 28;
static kCFStringEncodingMacCentralEurRoman: CFStringEncoding = 29;
static kCFStringEncodingMacVietnamese: CFStringEncoding = 30;
static kCFStringEncodingMacExtArabic: CFStringEncoding = 31;
static kCFStringEncodingMacSymbol: CFStringEncoding = 33;
static kCFStringEncodingMacDingbats: CFStringEncoding = 34;
static kCFStringEncodingMacTurkish: CFStringEncoding = 35;
static kCFStringEncodingMacCroatian: CFStringEncoding = 36;
static kCFStringEncodingMacIcelandic: CFStringEncoding = 37;
static kCFStringEncodingMacRomanian: CFStringEncoding = 38;
static kCFStringEncodingMacCeltic: CFStringEncoding = 39;
static kCFStringEncodingMacGaelic: CFStringEncoding = 40;
static kCFStringEncodingMacFarsi: CFStringEncoding = 0x8C;
static kCFStringEncodingMacUkrainian: CFStringEncoding = 0x98;
static kCFStringEncodingMacInuit: CFStringEncoding = 0xEC;
static kCFStringEncodingMacVT100: CFStringEncoding = 0xFC;
static kCFStringEncodingMacHFS: CFStringEncoding = 0xFF;
// Defined above: kCFStringEncodingISOLatin1 = 0x0201
static kCFStringEncodingISOLatin2: CFStringEncoding = 0x0202;
static kCFStringEncodingISOLatin3: CFStringEncoding = 0x0203;
static kCFStringEncodingISOLatin4: CFStringEncoding = 0x0204;
static kCFStringEncodingISOLatinCyrillic: CFStringEncoding = 0x0205;
static kCFStringEncodingISOLatinArabic: CFStringEncoding = 0x0206;
static kCFStringEncodingISOLatinGreek: CFStringEncoding = 0x0207;
static kCFStringEncodingISOLatinHebrew: CFStringEncoding = 0x0208;
static kCFStringEncodingISOLatin5: CFStringEncoding = 0x0209;
static kCFStringEncodingISOLatin6: CFStringEncoding = 0x020A;
static kCFStringEncodingISOLatinThai: CFStringEncoding = 0x020B;
static kCFStringEncodingISOLatin7: CFStringEncoding = 0x020D;
static kCFStringEncodingISOLatin8: CFStringEncoding = 0x020E;
static kCFStringEncodingISOLatin9: CFStringEncoding = 0x020F;
static kCFStringEncodingISOLatin10: CFStringEncoding = 0x0210;
static kCFStringEncodingDOSLatinUS: CFStringEncoding = 0x0400;
static kCFStringEncodingDOSGreek: CFStringEncoding = 0x0405;
static kCFStringEncodingDOSBalticRim: CFStringEncoding = 0x0406;
static kCFStringEncodingDOSLatin1: CFStringEncoding = 0x0410;
static kCFStringEncodingDOSGreek1: CFStringEncoding = 0x0411;
static kCFStringEncodingDOSLatin2: CFStringEncoding = 0x0412;
static kCFStringEncodingDOSCyrillic: CFStringEncoding = 0x0413;
static kCFStringEncodingDOSTurkish: CFStringEncoding = 0x0414;
static kCFStringEncodingDOSPortuguese: CFStringEncoding = 0x0415;
static kCFStringEncodingDOSIcelandic: CFStringEncoding = 0x0416;
static kCFStringEncodingDOSHebrew: CFStringEncoding = 0x0417;
static kCFStringEncodingDOSCanadianFrench: CFStringEncoding = 0x0418;
static kCFStringEncodingDOSArabic: CFStringEncoding = 0x0419;
static kCFStringEncodingDOSNordic: CFStringEncoding = 0x041A;
static kCFStringEncodingDOSRussian: CFStringEncoding = 0x041B;
static kCFStringEncodingDOSGreek2: CFStringEncoding = 0x041C;
static kCFStringEncodingDOSThai: CFStringEncoding = 0x041D;
static kCFStringEncodingDOSJapanese: CFStringEncoding = 0x0420;
static kCFStringEncodingDOSChineseSimplif: CFStringEncoding = 0x0421;
static kCFStringEncodingDOSKorean: CFStringEncoding = 0x0422;
static kCFStringEncodingDOSChineseTrad: CFStringEncoding = 0x0423;
// Defined above: kCFStringEncodingWindowsLatin1 = 0x0500
static kCFStringEncodingWindowsLatin2: CFStringEncoding = 0x0501;
static kCFStringEncodingWindowsCyrillic: CFStringEncoding = 0x0502;
static kCFStringEncodingWindowsGreek: CFStringEncoding = 0x0503;
static kCFStringEncodingWindowsLatin5: CFStringEncoding = 0x0504;
static kCFStringEncodingWindowsHebrew: CFStringEncoding = 0x0505;
static kCFStringEncodingWindowsArabic: CFStringEncoding = 0x0506;
static kCFStringEncodingWindowsBalticRim: CFStringEncoding = 0x0507;
static kCFStringEncodingWindowsVietnamese: CFStringEncoding = 0x0508;
static kCFStringEncodingWindowsKoreanJohab: CFStringEncoding = 0x0510;
// Defined above: kCFStringEncodingASCII = 0x0600
static kCFStringEncodingANSEL: CFStringEncoding = 0x0601;
static kCFStringEncodingJIS_X0201_76: CFStringEncoding = 0x0620;
static kCFStringEncodingJIS_X0208_83: CFStringEncoding = 0x0621;
static kCFStringEncodingJIS_X0208_90: CFStringEncoding = 0x0622;
static kCFStringEncodingJIS_X0212_90: CFStringEncoding = 0x0623;
static kCFStringEncodingJIS_C6226_78: CFStringEncoding = 0x0624;
static kCFStringEncodingShiftJIS_X0213: CFStringEncoding = 0x0628;
static kCFStringEncodingShiftJIS_X0213_MenKuTen: CFStringEncoding = 0x0629;
static kCFStringEncodingGB_2312_80: CFStringEncoding = 0x0630;
static kCFStringEncodingGBK_95: CFStringEncoding = 0x0631;
static kCFStringEncodingGB_18030_2000: CFStringEncoding = 0x0632;
static kCFStringEncodingKSC_5601_87: CFStringEncoding = 0x0640;
static kCFStringEncodingKSC_5601_92_Johab: CFStringEncoding = 0x0641;
static kCFStringEncodingCNS_11643_92_P1: CFStringEncoding = 0x0651;
static kCFStringEncodingCNS_11643_92_P2: CFStringEncoding = 0x0652;
static kCFStringEncodingCNS_11643_92_P3: CFStringEncoding = 0x0653;
static kCFStringEncodingISO_2022_JP: CFStringEncoding = 0x0820;
static kCFStringEncodingISO_2022_JP_2: CFStringEncoding = 0x0821;
static kCFStringEncodingISO_2022_JP_1: CFStringEncoding = 0x0822;
static kCFStringEncodingISO_2022_JP_3: CFStringEncoding = 0x0823;
static kCFStringEncodingISO_2022_CN: CFStringEncoding = 0x0830;
static kCFStringEncodingISO_2022_CN_EXT: CFStringEncoding = 0x0831;
static kCFStringEncodingISO_2022_KR: CFStringEncoding = 0x0840;
static kCFStringEncodingEUC_JP: CFStringEncoding = 0x0920;
static kCFStringEncodingEUC_CN: CFStringEncoding = 0x0930;
static kCFStringEncodingEUC_TW: CFStringEncoding = 0x0931;
static kCFStringEncodingEUC_KR: CFStringEncoding = 0x0940;
static kCFStringEncodingShiftJIS: CFStringEncoding = 0x0A01;
static kCFStringEncodingKOI8_R: CFStringEncoding = 0x0A02;
static kCFStringEncodingBig5: CFStringEncoding = 0x0A03;
static kCFStringEncodingMacRomanLatin1: CFStringEncoding = 0x0A04;
static kCFStringEncodingHZ_GB_2312: CFStringEncoding = 0x0A05;
static kCFStringEncodingBig5_HKSCS_1999: CFStringEncoding = 0x0A06;
static kCFStringEncodingVISCII: CFStringEncoding = 0x0A07;
static kCFStringEncodingKOI8_U: CFStringEncoding = 0x0A08;
static kCFStringEncodingBig5_E: CFStringEncoding = 0x0A09;
// Defined above: kCFStringEncodingNextStepLatin = 0x0B01
static kCFStringEncodingNextStepJapanese: CFStringEncoding = 0x0B02;
static kCFStringEncodingEBCDIC_US: CFStringEncoding = 0x0C01;
static kCFStringEncodingEBCDIC_CP037: CFStringEncoding = 0x0C02;
static kCFStringEncodingUTF7: CFStringEncoding = 0x04000100;
static kCFStringEncodingUTF7_IMAP: CFStringEncoding = 0x0A10;
static kCFStringEncodingShiftJIS_X0213_00: CFStringEncoding = 0x0628; /* Deprecated */
struct __CFString;
pub type CFStringRef = *__CFString;
/// An immutable string in one of a variety of encodings.
///
/// FIXME(pcwalton): Should be a newtype struct, but that fails due to a Rust compiler bug.
pub struct CFString {
priv obj: CFStringRef,
}
impl Clone for CFString {
#[fixed_stack_segment]
#[inline]
fn clone(&self) -> CFString {
unsafe {
TCFType::wrap_under_get_rule(self.obj)
}
}
}
impl Drop for CFString {
#[fixed_stack_segment]
fn drop(&mut self) {
unsafe {
CFRelease(self.as_CFTypeRef())
}
}
}
impl TCFType<CFStringRef> for CFString {
fn as_concrete_TypeRef(&self) -> CFStringRef {
self.obj
}
unsafe fn wrap_under_create_rule(obj: CFStringRef) -> CFString {
CFString {
obj: obj,
}
}
#[fixed_stack_segment]
#[inline]
fn type_id(_: Option<CFString>) -> CFTypeID {
unsafe {
CFStringGetTypeID()
}
}
}
impl FromStr for CFString {
/// Creates a new `CFString` instance from a Rust string.
#[fixed_stack_segment]
#[inline]
fn from_str(string: &str) -> Option<CFString> {
unsafe {
let string_ref = string.as_imm_buf(|bytes, len| {
CFStringCreateWithBytes(kCFAllocatorDefault,
bytes,
len.to_CFIndex(),
kCFStringEncodingUTF8,
false as Boolean,
kCFAllocatorNull)
});
Some(TCFType::wrap_under_create_rule(string_ref))
}
}
}
impl ToStr for CFString {
#[fixed_stack_segment]
fn to_str(&self) -> ~str {
unsafe {
let char_len = self.char_len();
let range = CFRange::init(0, char_len);
// First, ask how big the buffer ought to be.
let mut bytes_required: CFIndex = 0;
CFStringGetBytes(self.obj,
range,
kCFStringEncodingUTF8,
0,
false as Boolean,
ptr::null(),
0,
&mut bytes_required);
// Then, allocate the buffer and actually copy.
let buffer: ~[u8] = vec::from_elem(bytes_required as uint, '\x00' as u8);
let mut bytes_used: CFIndex = 0;
let chars_written = CFStringGetBytes(self.obj,
range,
kCFStringEncodingUTF8,
0,
false as Boolean,
vec::raw::to_ptr(buffer),
buffer.len().to_CFIndex(),
&mut bytes_used) as uint;
assert!(chars_written.to_CFIndex() == char_len);
// This is dangerous; we over-allocate and null-terminate the string (during
// initialization).
assert!(bytes_used == buffer.len().to_CFIndex());
// Then, reinterpret it as as string. You have been warned!
cast::transmute(buffer)
}
}
}
impl CFString {
/// Like `CFString::from_string`, but references a string that can be used as a backing store
/// by virtue of being statically allocated.
#[fixed_stack_segment]
#[inline]
pub fn from_static_string(string: &'static str) -> CFString {
unsafe {
let string_ref = string.as_imm_buf(|bytes, len| {
CFStringCreateWithBytesNoCopy(kCFAllocatorDefault,
bytes,
len.to_CFIndex(),
kCFStringEncodingUTF8,
false as Boolean,
kCFAllocatorNull)
});
TCFType::wrap_under_create_rule(string_ref)
}
}
/// Returns the number of characters in the string.
#[fixed_stack_segment]
#[inline]
pub fn char_len(&self) -> CFIndex {
unsafe {
CFStringGetLength(self.obj)
}
}
}
#[link_args="-framework CoreFoundation"]
#[nolink]
extern {
/*
* CFString.h
*/
// N.B. organized according to "Functions by task" in docs
/* Creating a CFString */
//fn CFSTR
//fn CFStringCreateArrayBySeparatingStrings
//fn CFStringCreateByCombiningStrings
//fn CFStringCreateCopy
//fn CFStringCreateFromExternalRepresentation
fn CFStringCreateWithBytes(alloc: CFAllocatorRef,
bytes: *u8,
numBytes: CFIndex,
encoding: CFStringEncoding,
isExternalRepresentation: Boolean,
contentsDeallocator: CFAllocatorRef)
-> CFStringRef;
fn CFStringCreateWithBytesNoCopy(alloc: CFAllocatorRef,
bytes: *u8,
numBytes: CFIndex,
encoding: CFStringEncoding,
isExternalRepresentation: Boolean,
contentsDeallocator: CFAllocatorRef)
-> CFStringRef;
//fn CFStringCreateWithCharacters
//fn CFStringCreateWithCharactersNoCopy
//fn CFStringCreateWithCString
//fn CFStringCreateWithCStringNoCopy
//fn CFStringCreateWithFormat
//fn CFStringCreateWithFormatAndArguments
//fn CFStringCreateWithPascalString
//fn CFStringCreateWithPascalStringNoCopy
//fn CFStringCreateWithSubstring
/* Searching Strings */
//fn CFStringCreateArrayWithFindResults
//fn CFStringFind
//fn CFStringFindCharacterFromSet
//fn CFStringFindWithOptions
//fn CFStringFindWithOptionsAndLocale
//fn CFStringGetLineBounds
/* Comparing Strings */
//fn CFStringCompare
//fn CFStringCompareWithOptions
//fn CFStringCompareWithOptionsAndLocale
//fn CFStringHasPrefix
//fn CFStringHasSuffix
/* Accessing Characters */
//fn CFStringCreateExternalRepresentation
fn CFStringGetBytes(theString: CFStringRef,
range: CFRange,
encoding: CFStringEncoding,
lossByte: u8,
isExternalRepresentation: Boolean,
buffer: *u8,
maxBufLen: CFIndex,
usedBufLen: *mut CFIndex)
-> CFIndex;
//fn CFStringGetCharacterAtIndex
//fn CFStringGetCharacters
//fn CFStringGetCharactersPtr
//fn CFStringGetCharacterFromInlineBuffer
//fn CFStringGetCString
//fn CFStringGetCStringPtr
fn CFStringGetLength(theString: CFStringRef) -> CFIndex;
//fn CFStringGetPascalString
//fn CFStringGetPascalStringPtr
//fn CFStringGetRangeOfComposedCharactersAtIndex
//fn CFStringInitInlineBuffer
/* Working With Hyphenation */
//fn CFStringGetHyphenationLocationBeforeIndex
//fn CFStringIsHyphenationAvailableForLocale
/* Working With Encodings */
//fn CFStringConvertEncodingToIANACharSetName
//fn CFStringConvertEncodingToNSStringEncoding
//fn CFStringConvertEncodingToWindowsCodepage
//fn CFStringConvertIANACharSetNameToEncoding
//fn CFStringConvertNSStringEncodingToEncoding
//fn CFStringConvertWindowsCodepageToEncoding
//fn CFStringGetFastestEncoding
//fn CFStringGetListOfAvailableEncodings
//fn CFStringGetMaximumSizeForEncoding
//fn CFStringGetMostCompatibleMacStringEncoding
//fn CFStringGetNameOfEncoding
//fn CFStringGetSmallestEncoding
//fn CFStringGetSystemEncoding
//fn CFStringIsEncodingAvailable
/* Getting Numeric Values */
//fn CFStringGetDoubleValue
//fn CFStringGetIntValue
/* Getting String Properties */
//fn CFShowStr
fn CFStringGetTypeID() -> CFTypeID;
/* String File System Representations */
//fn CFStringCreateWithFileSystemRepresentation
//fn CFStringGetFileSystemRepresentation
//fn CFStringGetMaximumSizeOfFileSystemRepresentation
/* Getting Paragraph Bounds */
//fn CFStringGetParagraphBounds
/* Managing Surrogates */
//fn CFStringGetLongCharacterForSurrogatePair
//fn CFStringGetSurrogatePairForLongCharacter
//fn CFStringIsSurrogateHighCharacter
//fn CFStringIsSurrogateLowCharacter
}
#[test]
fn string_and_back() {
let original = "The quick brown fox jumped over the slow lazy dog.";
let cfstr = CFString::from_static_string(original);
let converted = cfstr.to_str();
assert!(original == converted);
}
| 42.545064 | 97 | 0.727378 |
e9afd7c6e03b3fdd95c29fe87eccd43e16e26991 | 1,084 | // ANCHOR: all
use calloop::{timer::Timer, EventLoop, LoopSignal};
fn main() {
// ANCHOR: decl_loop
let mut event_loop: EventLoop<LoopSignal> =
EventLoop::try_new().expect("Failed to initialize the event loop!");
// ANCHOR_END: decl_loop
// ANCHOR: decl_source
let source = Timer::new().expect("Failed to create timer event source!");
let timer_handle = source.handle();
timer_handle.add_timeout(std::time::Duration::from_secs(5), "Timeout reached!");
// ANCHOR_END: decl_source
// ANCHOR: insert_source
let handle = event_loop.handle();
handle
.insert_source(source, |event, _metadata, shared_data| {
println!("Event fired: {}", event);
shared_data.stop();
})
.expect("Failed to insert event source!");
// ANCHOR_END: insert_source
// ANCHOR: run_loop
let mut shared_data = event_loop.get_signal();
event_loop
.run(None, &mut shared_data, |_shared_data| {})
.expect("Error during event loop!");
// ANCHOR_END: run_loop
}
// ANCHOR_END: all
| 29.297297 | 84 | 0.637454 |
2198e64217eb3cde7f58b92d18b70b9fbf2bb4fc | 82,854 | //! Clients for high level interactions with TUF repositories.
//!
//! # Example
//!
//! ```no_run
//! # use futures_executor::block_on;
//! # #[cfg(feature = "hyper_013")]
//! # use hyper_013 as hyper;
//! # #[cfg(feature = "hyper_014")]
//! # use hyper_014 as hyper;
//! # use hyper::client::Client as HttpClient;
//! # use std::path::PathBuf;
//! # use std::str::FromStr;
//! # use tuf::{Result, Tuf};
//! # use tuf::crypto::PublicKey;
//! # use tuf::client::{Client, Config};
//! # use tuf::metadata::{RootMetadata, Role, MetadataPath, MetadataVersion};
//! # use tuf::interchange::Json;
//! # use tuf::repository::{FileSystemRepository, HttpRepositoryBuilder};
//! #
//! # const PUBLIC_KEY: &'static [u8] = include_bytes!("../tests/ed25519/ed25519-1.pub");
//! #
//! # fn load_root_public_keys() -> Vec<PublicKey> {
//! # vec![PublicKey::from_ed25519(PUBLIC_KEY).unwrap()]
//! # }
//! #
//! # fn main() -> Result<()> {
//! # block_on(async {
//! let root_public_keys = load_root_public_keys();
//! let local = FileSystemRepository::<Json>::new(PathBuf::from("~/.rustup"))?;
//!
//! let remote = HttpRepositoryBuilder::new_with_uri(
//! "https://static.rust-lang.org/".parse::<http::Uri>().unwrap(),
//! HttpClient::new(),
//! )
//! .user_agent("rustup/1.4.0")
//! .build();
//!
//! let mut client = Client::with_trusted_root_keys(
//! Config::default(),
//! &MetadataVersion::Number(1),
//! 1,
//! &root_public_keys,
//! local,
//! remote,
//! ).await?;
//!
//! let _ = client.update().await?;
//! # Ok(())
//! # })
//! # }
//! ```
use chrono::offset::Utc;
use futures_io::{AsyncRead, AsyncWrite};
use futures_util::io::copy;
use log::{error, warn};
use std::collections::HashMap;
use std::future::Future;
use std::pin::Pin;
use crate::crypto::{self, HashAlgorithm, HashValue, PublicKey};
use crate::error::Error;
use crate::interchange::DataInterchange;
use crate::metadata::{
Metadata, MetadataPath, MetadataVersion, RawSignedMetadata, Role, RootMetadata,
SnapshotMetadata, TargetDescription, TargetPath, TargetsMetadata, TimestampMetadata,
VirtualTargetPath,
};
use crate::repository::{Repository, RepositoryProvider, RepositoryStorage};
use crate::tuf::Tuf;
use crate::verify::Verified;
use crate::Result;
/// Translates real paths (where a file is stored) into virtual paths (how it is addressed in TUF)
/// and back.
///
/// Implementations must obey the following identities for all possible inputs.
///
/// ```
/// # use tuf::client::{PathTranslator, DefaultTranslator};
/// # use tuf::metadata::{VirtualTargetPath, TargetPath};
/// let path = TargetPath::new("foo".into()).unwrap();
/// let virt = VirtualTargetPath::new("foo".into()).unwrap();
/// let translator = DefaultTranslator::new();
/// assert_eq!(path,
/// translator.virtual_to_real(&translator.real_to_virtual(&path).unwrap()).unwrap());
/// assert_eq!(virt,
/// translator.real_to_virtual(&translator.virtual_to_real(&virt).unwrap()).unwrap());
/// ```
pub trait PathTranslator {
/// Convert a real path into a virtual path.
fn real_to_virtual(&self, path: &TargetPath) -> Result<VirtualTargetPath>;
/// Convert a virtual path into a real path.
fn virtual_to_real(&self, path: &VirtualTargetPath) -> Result<TargetPath>;
}
/// A `PathTranslator` that does nothing.
#[derive(Clone, Debug, Default)]
pub struct DefaultTranslator;
impl DefaultTranslator {
/// Create a new `DefaultTranslator`.
pub fn new() -> Self {
DefaultTranslator
}
}
impl PathTranslator for DefaultTranslator {
fn real_to_virtual(&self, path: &TargetPath) -> Result<VirtualTargetPath> {
VirtualTargetPath::new(path.value().into())
}
fn virtual_to_real(&self, path: &VirtualTargetPath) -> Result<TargetPath> {
TargetPath::new(path.value().into())
}
}
/// A client that interacts with TUF repositories.
#[derive(Debug)]
pub struct Client<D, L, R, T>
where
D: DataInterchange + Sync,
L: RepositoryProvider<D> + RepositoryStorage<D>,
R: RepositoryProvider<D>,
T: PathTranslator,
{
tuf: Tuf<D>,
config: Config<T>,
local: Repository<L, D>,
remote: Repository<R, D>,
}
impl<D, L, R, T> Client<D, L, R, T>
where
D: DataInterchange + Sync,
L: RepositoryProvider<D> + RepositoryStorage<D>,
R: RepositoryProvider<D>,
T: PathTranslator,
{
/// Create a new TUF client. It will attempt to load the latest root metadata from the local
/// repo and use it as the initial trusted root metadata, or it will return an error if it
/// cannot do so.
///
/// **WARNING**: This is trust-on-first-use (TOFU) and offers weaker security guarantees than
/// the related methods [`Client::with_trusted_root`], [`Client::with_trusted_root_keys`].
///
/// # Examples
///
/// ```
/// # use chrono::offset::{Utc, TimeZone};
/// # use futures_executor::block_on;
/// # use tuf::{
/// # Error,
/// # interchange::Json,
/// # client::{Client, Config},
/// # crypto::{Ed25519PrivateKey, PrivateKey, SignatureScheme},
/// # metadata::{MetadataPath, MetadataVersion, Role, RootMetadataBuilder},
/// # repository::{EphemeralRepository, RepositoryStorage},
/// # };
/// # fn main() -> Result<(), Error> {
/// # block_on(async {
/// # let private_key = Ed25519PrivateKey::from_pkcs8(
/// # &Ed25519PrivateKey::pkcs8()?,
/// # )?;
/// # let public_key = private_key.public().clone();
/// let local = EphemeralRepository::<Json>::new();
/// let remote = EphemeralRepository::<Json>::new();
///
/// let root_version = 1;
/// let root = RootMetadataBuilder::new()
/// .version(root_version)
/// .expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
/// .root_key(public_key.clone())
/// .snapshot_key(public_key.clone())
/// .targets_key(public_key.clone())
/// .timestamp_key(public_key.clone())
/// .signed::<Json>(&private_key)?;
///
/// let root_path = MetadataPath::from_role(&Role::Root);
/// let root_version = MetadataVersion::Number(root_version);
///
/// local.store_metadata(
/// &root_path,
/// &root_version,
/// &mut root.to_raw().unwrap().as_bytes()
/// ).await?;
///
/// let client = Client::with_trusted_local(
/// Config::default(),
/// local,
/// remote,
/// ).await?;
/// # Ok(())
/// # })
/// # }
/// ```
pub async fn with_trusted_local(config: Config<T>, local: L, remote: R) -> Result<Self> {
let (local, remote) = (Repository::new(local), Repository::new(remote));
let root_path = MetadataPath::from_role(&Role::Root);
// FIXME should this be MetadataVersion::None so we bootstrap with the latest version?
let root_version = MetadataVersion::Number(1);
let raw_root: RawSignedMetadata<_, RootMetadata> = local
.fetch_metadata(&root_path, &root_version, config.max_root_length, None)
.await?;
let tuf = Tuf::from_trusted_root(&raw_root)?;
Self::new(config, tuf, local, remote).await
}
/// Create a new TUF client. It will trust this initial root metadata.
///
/// # Examples
///
/// ```
/// # use chrono::offset::{Utc, TimeZone};
/// # use futures_executor::block_on;
/// # use tuf::{
/// # Error,
/// # interchange::Json,
/// # client::{Client, Config},
/// # crypto::{Ed25519PrivateKey, KeyType, PrivateKey, SignatureScheme},
/// # metadata::{MetadataPath, MetadataVersion, Role, RootMetadataBuilder},
/// # repository::{EphemeralRepository},
/// # };
/// # fn main() -> Result<(), Error> {
/// # block_on(async {
/// # let private_key = Ed25519PrivateKey::from_pkcs8(
/// # &Ed25519PrivateKey::pkcs8()?,
/// # )?;
/// # let public_key = private_key.public().clone();
/// let local = EphemeralRepository::<Json>::new();
/// let remote = EphemeralRepository::<Json>::new();
///
/// let root_version = 1;
/// let root_threshold = 1;
/// let raw_root = RootMetadataBuilder::new()
/// .version(root_version)
/// .expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
/// .root_key(public_key.clone())
/// .root_threshold(root_threshold)
/// .snapshot_key(public_key.clone())
/// .targets_key(public_key.clone())
/// .timestamp_key(public_key.clone())
/// .signed::<Json>(&private_key)
/// .unwrap()
/// .to_raw()
/// .unwrap();
///
/// let client = Client::with_trusted_root(
/// Config::default(),
/// &raw_root,
/// local,
/// remote,
/// ).await?;
/// # Ok(())
/// # })
/// # }
/// ```
pub async fn with_trusted_root(
config: Config<T>,
trusted_root: &RawSignedMetadata<D, RootMetadata>,
local: L,
remote: R,
) -> Result<Self> {
let (local, remote) = (Repository::new(local), Repository::new(remote));
let tuf = Tuf::from_trusted_root(trusted_root)?;
Self::new(config, tuf, local, remote).await
}
/// Create a new TUF client. It will attempt to load initial root metadata from the local and remote
/// repositories using the provided keys to pin the verification.
///
/// # Examples
///
/// ```
/// # use chrono::offset::{Utc, TimeZone};
/// # use futures_executor::block_on;
/// # use std::iter::once;
/// # use tuf::{
/// # Error,
/// # interchange::Json,
/// # client::{Client, Config},
/// # crypto::{Ed25519PrivateKey, KeyType, PrivateKey, SignatureScheme},
/// # metadata::{MetadataPath, MetadataVersion, Role, RootMetadataBuilder},
/// # repository::{EphemeralRepository, RepositoryStorage},
/// # };
/// # fn main() -> Result<(), Error> {
/// # block_on(async {
/// # let private_key = Ed25519PrivateKey::from_pkcs8(
/// # &Ed25519PrivateKey::pkcs8()?,
/// # )?;
/// # let public_key = private_key.public().clone();
/// let local = EphemeralRepository::<Json>::new();
/// let remote = EphemeralRepository::<Json>::new();
///
/// let root_version = 1;
/// let root_threshold = 1;
/// let root = RootMetadataBuilder::new()
/// .version(root_version)
/// .expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
/// .root_key(public_key.clone())
/// .root_threshold(root_threshold)
/// .snapshot_key(public_key.clone())
/// .targets_key(public_key.clone())
/// .timestamp_key(public_key.clone())
/// .signed::<Json>(&private_key)?;
///
/// let root_path = MetadataPath::from_role(&Role::Root);
/// let root_version = MetadataVersion::Number(root_version);
///
/// remote.store_metadata(
/// &root_path,
/// &root_version,
/// &mut root.to_raw().unwrap().as_bytes()
/// ).await?;
///
/// let client = Client::with_trusted_root_keys(
/// Config::default(),
/// &root_version,
/// root_threshold,
/// once(&public_key),
/// local,
/// remote,
/// ).await?;
/// # Ok(())
/// # })
/// # }
/// ```
pub async fn with_trusted_root_keys<'a, I>(
config: Config<T>,
root_version: &MetadataVersion,
root_threshold: u32,
trusted_root_keys: I,
local: L,
remote: R,
) -> Result<Self>
where
I: IntoIterator<Item = &'a PublicKey>,
{
let (local, remote) = (Repository::new(local), Repository::new(remote));
let root_path = MetadataPath::from_role(&Role::Root);
let (fetched, raw_root) = fetch_metadata_from_local_or_else_remote(
&root_path,
root_version,
config.max_root_length,
None,
&local,
&remote,
)
.await?;
let tuf = Tuf::from_root_with_trusted_keys(&raw_root, root_threshold, trusted_root_keys)?;
// FIXME(#253) verify the trusted root version matches the provided version.
let root_version = MetadataVersion::Number(tuf.trusted_root().version());
// Only store the metadata after we have validated it.
if fetched {
// NOTE(#301): The spec only states that the unversioned root metadata needs to be
// written to non-volatile storage. This enables a method like
// `Client::with_trusted_local` to initialize trust with the latest root version.
// However, this doesn't work well when trust is established with an externally
// provided root, such as with `Clietn::with_trusted_root` or
// `Client::with_trusted_root_keys`. In those cases, it's possible those initial roots
// could be multiple versions behind the latest cached root metadata. So we'd most
// likely never use the locally cached `root.json`.
//
// Instead, as an extension to the spec, we'll write the `$VERSION.root.json` metadata
// to the local store. This will eventually enable us to initialize metadata from the
// local store (see #301).
local
.store_metadata(&root_path, &root_version, &raw_root)
.await?;
// FIXME: should we also store the root as `MetadataVersion::None`?
}
Self::new(config, tuf, local, remote).await
}
async fn new(
config: Config<T>,
mut tuf: Tuf<D>,
local: Repository<L, D>,
remote: Repository<R, D>,
) -> Result<Self> {
let res = async {
let _r = Self::update_root_with_repos(&config, &mut tuf, None, &local).await?;
let _ts = Self::update_timestamp_with_repos(&config, &mut tuf, None, &local).await?;
let _sn = Self::update_snapshot_with_repos(&mut tuf, None, &local, false).await?;
let _ta = Self::update_targets_with_repos(&mut tuf, None, &local, false).await?;
Ok(())
}
.await;
match res {
Ok(()) | Err(Error::NotFound) => {}
Err(err) => {
warn!("error loading local metadata: : {}", err);
}
}
Ok(Client {
tuf,
config,
local,
remote,
})
}
/// Update TUF metadata from the remote repository.
///
/// Returns `true` if an update occurred and `false` otherwise.
pub async fn update(&mut self) -> Result<bool> {
let r = self.update_root().await?;
let ts = self.update_timestamp().await?;
let sn = self.update_snapshot().await?;
let ta = self.update_targets().await?;
Ok(r || ts || sn || ta)
}
/// Returns the current trusted root version.
pub fn root_version(&self) -> u32 {
self.tuf.trusted_root().version()
}
/// Returns the current trusted timestamp version.
pub fn timestamp_version(&self) -> Option<u32> {
Some(self.tuf.trusted_timestamp()?.version())
}
/// Returns the current trusted snapshot version.
pub fn snapshot_version(&self) -> Option<u32> {
Some(self.tuf.trusted_snapshot()?.version())
}
/// Returns the current trusted targets version.
pub fn targets_version(&self) -> Option<u32> {
Some(self.tuf.trusted_targets()?.version())
}
/// Returns the current trusted delegations version for a given role.
pub fn delegations_version(&self, role: &MetadataPath) -> Option<u32> {
Some(self.tuf.trusted_delegations().get(role)?.version())
}
/// Returns the current trusted root.
pub fn trusted_root(&self) -> &Verified<RootMetadata> {
self.tuf.trusted_root()
}
/// Returns the current trusted timestamp.
pub fn trusted_timestamp(&self) -> Option<&Verified<TimestampMetadata>> {
self.tuf.trusted_timestamp()
}
/// Returns the current trusted snapshot.
pub fn trusted_snapshot(&self) -> Option<&Verified<SnapshotMetadata>> {
self.tuf.trusted_snapshot()
}
/// Returns the current trusted targets.
pub fn trusted_targets(&self) -> Option<&Verified<TargetsMetadata>> {
self.tuf.trusted_targets()
}
/// Returns the current trusted delegations.
pub fn trusted_delegations(&self) -> &HashMap<MetadataPath, Verified<TargetsMetadata>> {
self.tuf.trusted_delegations()
}
/// Update TUF root metadata from the remote repository.
///
/// Returns `true` if an update occurred and `false` otherwise.
pub async fn update_root(&mut self) -> Result<bool> {
Self::update_root_with_repos(&self.config, &mut self.tuf, Some(&self.local), &self.remote)
.await
}
async fn update_root_with_repos<Remote>(
config: &Config<T>,
tuf: &mut Tuf<D>,
local: Option<&Repository<L, D>>,
remote: &Repository<Remote, D>,
) -> Result<bool>
where
Remote: RepositoryProvider<D>,
{
let root_path = MetadataPath::from_role(&Role::Root);
let mut updated = false;
loop {
/////////////////////////////////////////
// TUF-1.0.9 §5.1.2:
//
// Try downloading version N+1 of the root metadata file, up to some W number of
// bytes (because the size is unknown). The value for W is set by the authors of
// the application using TUF. For example, W may be tens of kilobytes. The filename
// used to download the root metadata file is of the fixed form
// VERSION_NUMBER.FILENAME.EXT (e.g., 42.root.json). If this file is not available,
// or we have downloaded more than Y number of root metadata files (because the
// exact number is as yet unknown), then go to step 5.1.9. The value for Y is set
// by the authors of the application using TUF. For example, Y may be 2^10.
// FIXME(#306) We do not have an upper bound on the number of root metadata we'll
// fetch. This means that an attacker that's stolen the root keys could cause a client
// to fall into an infinite loop (but if an attacker has stolen the root keys, the
// client probably has worse problems to worry about).
let next_version = MetadataVersion::Number(tuf.trusted_root().version() + 1);
let res = remote
.fetch_metadata(&root_path, &next_version, config.max_root_length, None)
.await;
let raw_signed_root = match res {
Ok(raw_signed_root) => raw_signed_root,
Err(Error::NotFound) => {
break;
}
Err(err) => {
return Err(err);
}
};
updated = true;
tuf.update_root(&raw_signed_root)?;
/////////////////////////////////////////
// TUF-1.0.9 §5.1.7:
//
// Persist root metadata. The client MUST write the file to non-volatile storage as
// FILENAME.EXT (e.g. root.json).
if let Some(local) = local {
local
.store_metadata(&root_path, &MetadataVersion::None, &raw_signed_root)
.await?;
// NOTE(#301): See the comment in `Client::with_trusted_root_keys`.
local
.store_metadata(&root_path, &next_version, &raw_signed_root)
.await?;
}
/////////////////////////////////////////
// TUF-1.0.9 §5.1.8:
//
// Repeat steps 5.1.1 to 5.1.8.
}
/////////////////////////////////////////
// TUF-1.0.9 §5.1.9:
//
// Check for a freeze attack. The latest known time MUST be lower than the expiration
// timestamp in the trusted root metadata file (version N). If the trusted root
// metadata file has expired, abort the update cycle, report the potential freeze
// attack. On the next update cycle, begin at step 5.0 and version N of the root
// metadata file.
// TODO: Consider moving the root metadata expiration check into `tuf::Tuf`, since that's
// where we check timestamp/snapshot/targets/delegations for expiration.
if tuf.trusted_root().expires() <= &Utc::now() {
error!("Root metadata expired, potential freeze attack");
return Err(Error::ExpiredMetadata(Role::Root));
}
/////////////////////////////////////////
// TUF-1.0.5 §5.1.10:
//
// Set whether consistent snapshots are used as per the trusted root metadata file (see
// Section 4.3).
Ok(updated)
}
/// Returns `true` if an update occurred and `false` otherwise.
async fn update_timestamp(&mut self) -> Result<bool> {
Self::update_timestamp_with_repos(
&self.config,
&mut self.tuf,
Some(&self.local),
&self.remote,
)
.await
}
async fn update_timestamp_with_repos<Remote>(
config: &Config<T>,
tuf: &mut Tuf<D>,
local: Option<&Repository<L, D>>,
remote: &Repository<Remote, D>,
) -> Result<bool>
where
Remote: RepositoryProvider<D>,
{
let timestamp_path = MetadataPath::from_role(&Role::Timestamp);
/////////////////////////////////////////
// TUF-1.0.9 §5.2:
//
// Download the timestamp metadata file, up to X number of bytes (because the size is
// unknown). The value for X is set by the authors of the application using TUF. For
// example, X may be tens of kilobytes. The filename used to download the timestamp
// metadata file is of the fixed form FILENAME.EXT (e.g., timestamp.json).
let raw_signed_timestamp = remote
.fetch_metadata(
×tamp_path,
&MetadataVersion::None,
config.max_timestamp_length,
None,
)
.await?;
if tuf.update_timestamp(&raw_signed_timestamp)?.is_some() {
/////////////////////////////////////////
// TUF-1.0.9 §5.2.4:
//
// Persist timestamp metadata. The client MUST write the file to non-volatile
// storage as FILENAME.EXT (e.g. timestamp.json).
if let Some(local) = local {
local
.store_metadata(
×tamp_path,
&MetadataVersion::None,
&raw_signed_timestamp,
)
.await?;
}
Ok(true)
} else {
Ok(false)
}
}
/// Returns `true` if an update occurred and `false` otherwise.
async fn update_snapshot(&mut self) -> Result<bool> {
let consistent_snapshot = self.tuf.trusted_root().consistent_snapshot();
Self::update_snapshot_with_repos(
&mut self.tuf,
Some(&self.local),
&self.remote,
consistent_snapshot,
)
.await
}
async fn update_snapshot_with_repos<Remote>(
tuf: &mut Tuf<D>,
local: Option<&Repository<L, D>>,
remote: &Repository<Remote, D>,
consistent_snapshots: bool,
) -> Result<bool>
where
Remote: RepositoryProvider<D>,
{
// 5.3.1 Check against timestamp metadata. The hashes and version number listed in the
// timestamp metadata. If hashes and version do not match, discard the new snapshot
// metadata, abort the update cycle, and report the failure.
let snapshot_description = match tuf.trusted_timestamp() {
Some(ts) => Ok(ts.snapshot()),
None => Err(Error::MissingMetadata(Role::Timestamp)),
}?
.clone();
if snapshot_description.version()
<= tuf.trusted_snapshot().map(|s| s.version()).unwrap_or(0)
{
return Ok(false);
}
let (alg, value) = crypto::hash_preference(snapshot_description.hashes())?;
let version = if consistent_snapshots {
MetadataVersion::Number(snapshot_description.version())
} else {
MetadataVersion::None
};
let snapshot_path = MetadataPath::from_role(&Role::Snapshot);
let snapshot_length = Some(snapshot_description.length());
let raw_signed_snapshot = remote
.fetch_metadata(
&snapshot_path,
&version,
snapshot_length,
Some((alg, value.clone())),
)
.await?;
if tuf.update_snapshot(&raw_signed_snapshot)? {
/////////////////////////////////////////
// TUF-1.0.9 §5.3.5:
//
// Persist snapshot metadata. The client MUST write the file to non-volatile
// storage as FILENAME.EXT (e.g. snapshot.json).
if let Some(local) = local {
local
.store_metadata(&snapshot_path, &MetadataVersion::None, &raw_signed_snapshot)
.await?;
}
Ok(true)
} else {
Ok(false)
}
}
/// Returns `true` if an update occurred and `false` otherwise.
async fn update_targets(&mut self) -> Result<bool> {
let consistent_snapshot = self.tuf.trusted_root().consistent_snapshot();
Self::update_targets_with_repos(
&mut self.tuf,
Some(&self.local),
&self.remote,
consistent_snapshot,
)
.await
}
async fn update_targets_with_repos<Remote>(
tuf: &mut Tuf<D>,
local: Option<&Repository<L, D>>,
remote: &Repository<Remote, D>,
consistent_snapshot: bool,
) -> Result<bool>
where
Remote: RepositoryProvider<D>,
{
let targets_description = match tuf.trusted_snapshot() {
Some(sn) => match sn.meta().get(&MetadataPath::from_role(&Role::Targets)) {
Some(d) => Ok(d),
None => Err(Error::VerificationFailure(
"Snapshot metadata did not contain a description of the \
current targets metadata."
.into(),
)),
},
None => Err(Error::MissingMetadata(Role::Snapshot)),
}?
.clone();
if targets_description.version() <= tuf.trusted_targets().map(|t| t.version()).unwrap_or(0)
{
return Ok(false);
}
let (alg, value) = crypto::hash_preference(targets_description.hashes())?;
let version = if consistent_snapshot {
MetadataVersion::Number(targets_description.version())
} else {
MetadataVersion::None
};
let targets_path = MetadataPath::from_role(&Role::Targets);
let targets_length = Some(targets_description.length());
let raw_signed_targets = remote
.fetch_metadata(
&targets_path,
&version,
targets_length,
Some((alg, value.clone())),
)
.await?;
if tuf.update_targets(&raw_signed_targets)? {
/////////////////////////////////////////
// TUF-1.0.9 §5.4.4:
//
// Persist targets metadata. The client MUST write the file to non-volatile storage
// as FILENAME.EXT (e.g. targets.json).
if let Some(local) = local {
local
.store_metadata(&targets_path, &MetadataVersion::None, &raw_signed_targets)
.await?;
}
Ok(true)
} else {
Ok(false)
}
}
/// Fetch a target from the remote repo and write it to the local repo.
pub async fn fetch_target<'a>(&'a mut self, target: &'a TargetPath) -> Result<()> {
let mut read = self._fetch_target(target).await?;
self.local.store_target(&mut read, target).await
}
/// Fetch a target from the remote repo and write it to the provided writer.
///
/// It is **critical** that none of the bytes written to the `write` are used until this future
/// returns `Ok`, as the hash of the target is not verified until all bytes are read from the
/// repository.
pub async fn fetch_target_to_writer<'a, W>(
&'a mut self,
target: &'a TargetPath,
mut write: W,
) -> Result<()>
where
W: AsyncWrite + Send + Unpin,
{
let read = self._fetch_target(&target).await?;
copy(read, &mut write).await?;
Ok(())
}
/// Fetch a target description from the remote repo and return it.
pub async fn fetch_target_description<'a>(
&'a mut self,
target: &'a TargetPath,
) -> Result<TargetDescription> {
let virt = self.config.path_translator.real_to_virtual(target)?;
let snapshot = self
.tuf
.trusted_snapshot()
.ok_or_else(|| Error::MissingMetadata(Role::Snapshot))?
.clone();
let (_, target_description) = self
.lookup_target_description(false, 0, &virt, &snapshot, None)
.await;
target_description
}
// TODO this should check the local repo first
async fn _fetch_target<'a>(
&'a mut self,
target: &'a TargetPath,
) -> Result<impl AsyncRead + Send + Unpin> {
let target_description = self.fetch_target_description(target).await?;
// According to TUF section 5.5.2, when consistent snapshot is enabled, target files should
// be found at `$HASH.FILENAME.EXT`. Otherwise it is stored at `FILENAME.EXT`.
if self.tuf.trusted_root().consistent_snapshot() {
let (_, value) = crypto::hash_preference(target_description.hashes())?;
let target = target.with_hash_prefix(value)?;
self.remote.fetch_target(&target, &target_description).await
} else {
self.remote.fetch_target(target, &target_description).await
}
}
async fn lookup_target_description<'a>(
&'a mut self,
default_terminate: bool,
current_depth: u32,
target: &'a VirtualTargetPath,
snapshot: &'a SnapshotMetadata,
targets: Option<(&'a Verified<TargetsMetadata>, MetadataPath)>,
) -> (bool, Result<TargetDescription>) {
if current_depth > self.config.max_delegation_depth {
warn!(
"Walking the delegation graph would have exceeded the configured max depth: {}",
self.config.max_delegation_depth
);
return (default_terminate, Err(Error::NotFound));
}
// these clones are dumb, but we need immutable values and not references for update
// tuf in the loop below
let (targets, targets_role) = match targets {
Some((t, role)) => (t.clone(), role),
None => match self.tuf.trusted_targets() {
Some(t) => (t.clone(), MetadataPath::from_role(&Role::Targets)),
None => {
return (
default_terminate,
Err(Error::MissingMetadata(Role::Targets)),
);
}
},
};
if let Some(t) = targets.targets().get(target) {
return (default_terminate, Ok(t.clone()));
}
let delegations = match targets.delegations() {
Some(d) => d,
None => return (default_terminate, Err(Error::NotFound)),
};
for delegation in delegations.roles().iter() {
if !delegation.paths().iter().any(|p| target.is_child(p)) {
if delegation.terminating() {
return (true, Err(Error::NotFound));
} else {
continue;
}
}
let role_meta = match snapshot.meta().get(delegation.role()) {
Some(m) => m,
None if !delegation.terminating() => continue,
None => return (true, Err(Error::NotFound)),
};
let (alg, value) = match crypto::hash_preference(role_meta.hashes()) {
Ok(h) => h,
Err(e) => return (delegation.terminating(), Err(e)),
};
/////////////////////////////////////////
// TUF-1.0.9 §5.4:
//
// Download the top-level targets metadata file, up to either the number of bytes
// specified in the snapshot metadata file, or some Z number of bytes. The value
// for Z is set by the authors of the application using TUF. For example, Z may be
// tens of kilobytes. If consistent snapshots are not used (see Section 7), then
// the filename used to download the targets metadata file is of the fixed form
// FILENAME.EXT (e.g., targets.json). Otherwise, the filename is of the form
// VERSION_NUMBER.FILENAME.EXT (e.g., 42.targets.json), where VERSION_NUMBER is the
// version number of the targets metadata file listed in the snapshot metadata
// file.
let version = if self.tuf.trusted_root().consistent_snapshot() {
MetadataVersion::Number(role_meta.version())
} else {
MetadataVersion::None
};
// FIXME: Other than root, this is the only place that first tries using the local
// metadata before failing back to the remote server. Is this logic correct?
let role_length = Some(role_meta.length());
let raw_signed_meta = self
.local
.fetch_metadata(
delegation.role(),
&MetadataVersion::None,
role_length,
Some((alg, value.clone())),
)
.await;
let raw_signed_meta = match raw_signed_meta {
Ok(m) => m,
Err(_) => {
match self
.remote
.fetch_metadata(
delegation.role(),
&version,
role_length,
Some((alg, value.clone())),
)
.await
{
Ok(m) => m,
Err(ref e) if !delegation.terminating() => {
warn!("Failed to fetch metadata {:?}: {:?}", delegation.role(), e);
continue;
}
Err(e) => {
warn!("Failed to fetch metadata {:?}: {:?}", delegation.role(), e);
return (true, Err(e));
}
}
}
};
match self
.tuf
.update_delegation(&targets_role, delegation.role(), &raw_signed_meta)
{
Ok(_) => {
/////////////////////////////////////////
// TUF-1.0.9 §5.4.4:
//
// Persist targets metadata. The client MUST write the file to non-volatile
// storage as FILENAME.EXT (e.g. targets.json).
match self
.local
.store_metadata(delegation.role(), &MetadataVersion::None, &raw_signed_meta)
.await
{
Ok(_) => (),
Err(e) => {
warn!(
"Error storing metadata {:?} locally: {:?}",
delegation.role(),
e
)
}
}
let meta = self
.tuf
.trusted_delegations()
.get(delegation.role())
.unwrap()
.clone();
let f: Pin<Box<dyn Future<Output = _>>> =
Box::pin(self.lookup_target_description(
delegation.terminating(),
current_depth + 1,
target,
snapshot,
Some((&meta, delegation.role().clone())),
));
let (term, res) = f.await;
if term && res.is_err() {
return (true, res);
}
// TODO end recursion early
}
Err(_) if !delegation.terminating() => continue,
Err(e) => return (true, Err(e)),
};
}
(default_terminate, Err(Error::NotFound))
}
}
/// Helper function that first tries to fetch the metadata from the local store, and if it doesn't
/// exist or does and fails to parse, try fetching it from the remote store.
async fn fetch_metadata_from_local_or_else_remote<'a, D, L, R, M>(
path: &'a MetadataPath,
version: &'a MetadataVersion,
max_length: Option<usize>,
hash_data: Option<(&'static HashAlgorithm, HashValue)>,
local: &'a Repository<L, D>,
remote: &'a Repository<R, D>,
) -> Result<(bool, RawSignedMetadata<D, M>)>
where
D: DataInterchange + Sync,
L: RepositoryProvider<D> + RepositoryStorage<D>,
R: RepositoryProvider<D>,
M: Metadata + 'static,
{
match local
.fetch_metadata(path, version, max_length, hash_data.clone())
.await
{
Ok(raw_meta) => Ok((false, raw_meta)),
Err(Error::NotFound) => {
let raw_meta = remote
.fetch_metadata(path, version, max_length, hash_data)
.await?;
Ok((true, raw_meta))
}
Err(err) => Err(err),
}
}
/// Configuration for a TUF `Client`.
///
/// # Defaults
///
/// The following values are considered reasonably safe defaults, however these values may change
/// as this crate moves out of beta. If you are concered about them changing, you should use the
/// `ConfigBuilder` and set your own values.
///
/// ```
/// # use tuf::client::{Config, DefaultTranslator};
/// let config = Config::default();
/// assert_eq!(config.max_root_length(), &Some(1024 * 1024));
/// assert_eq!(config.max_timestamp_length(), &Some(32 * 1024));
/// assert_eq!(config.max_delegation_depth(), 8);
/// let _: &DefaultTranslator = config.path_translator();
/// ```
#[derive(Clone, Debug)]
pub struct Config<T>
where
T: PathTranslator,
{
max_root_length: Option<usize>,
max_timestamp_length: Option<usize>,
max_delegation_depth: u32,
path_translator: T,
}
impl Config<DefaultTranslator> {
/// Initialize a `ConfigBuilder` with the default values.
pub fn build() -> ConfigBuilder<DefaultTranslator> {
ConfigBuilder::default()
}
}
impl<T> Config<T>
where
T: PathTranslator,
{
/// Return the optional maximum root metadata length.
pub fn max_root_length(&self) -> &Option<usize> {
&self.max_root_length
}
/// Return the optional maximum timestamp metadata size.
pub fn max_timestamp_length(&self) -> &Option<usize> {
&self.max_timestamp_length
}
/// The maximum number of steps used when walking the delegation graph.
pub fn max_delegation_depth(&self) -> u32 {
self.max_delegation_depth
}
/// The `PathTranslator`.
pub fn path_translator(&self) -> &T {
&self.path_translator
}
}
impl Default for Config<DefaultTranslator> {
fn default() -> Self {
Config {
max_root_length: Some(1024 * 1024),
max_timestamp_length: Some(32 * 1024),
max_delegation_depth: 8,
path_translator: DefaultTranslator::new(),
}
}
}
/// Helper for building and validating a TUF client `Config`.
#[derive(Debug, PartialEq)]
pub struct ConfigBuilder<T>
where
T: PathTranslator,
{
max_root_length: Option<usize>,
max_timestamp_length: Option<usize>,
max_delegation_depth: u32,
path_translator: T,
}
impl<T> ConfigBuilder<T>
where
T: PathTranslator,
{
/// Validate this builder return a `Config` if validation succeeds.
pub fn finish(self) -> Result<Config<T>> {
Ok(Config {
max_root_length: self.max_root_length,
max_timestamp_length: self.max_timestamp_length,
max_delegation_depth: self.max_delegation_depth,
path_translator: self.path_translator,
})
}
/// Set the optional maximum download length for root metadata.
pub fn max_root_length(mut self, max: Option<usize>) -> Self {
self.max_root_length = max;
self
}
/// Set the optional maximum download length for timestamp metadata.
pub fn max_timestamp_length(mut self, max: Option<usize>) -> Self {
self.max_timestamp_length = max;
self
}
/// Set the maximum number of steps used when walking the delegation graph.
pub fn max_delegation_depth(mut self, max: u32) -> Self {
self.max_delegation_depth = max;
self
}
/// Set the `PathTranslator`.
pub fn path_translator<TT>(self, path_translator: TT) -> ConfigBuilder<TT>
where
TT: PathTranslator,
{
ConfigBuilder {
max_root_length: self.max_root_length,
max_timestamp_length: self.max_timestamp_length,
max_delegation_depth: self.max_delegation_depth,
path_translator,
}
}
}
impl Default for ConfigBuilder<DefaultTranslator> {
fn default() -> ConfigBuilder<DefaultTranslator> {
let cfg = Config::default();
ConfigBuilder {
max_root_length: cfg.max_root_length,
max_timestamp_length: cfg.max_timestamp_length,
max_delegation_depth: cfg.max_delegation_depth,
path_translator: cfg.path_translator,
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::crypto::{Ed25519PrivateKey, HashAlgorithm, PrivateKey};
use crate::interchange::Json;
use crate::metadata::{MetadataPath, MetadataVersion};
use crate::repo_builder::RepoBuilder;
use crate::repository::{EphemeralRepository, ErrorRepository, Track, TrackRepository};
use chrono::prelude::*;
use futures_executor::block_on;
use lazy_static::lazy_static;
use maplit::hashmap;
use matches::assert_matches;
use serde_json::json;
use std::iter::once;
lazy_static! {
static ref KEYS: Vec<Ed25519PrivateKey> = {
let keys: &[&[u8]] = &[
include_bytes!("../tests/ed25519/ed25519-1.pk8.der"),
include_bytes!("../tests/ed25519/ed25519-2.pk8.der"),
include_bytes!("../tests/ed25519/ed25519-3.pk8.der"),
include_bytes!("../tests/ed25519/ed25519-4.pk8.der"),
include_bytes!("../tests/ed25519/ed25519-5.pk8.der"),
include_bytes!("../tests/ed25519/ed25519-6.pk8.der"),
];
keys.iter()
.map(|b| Ed25519PrivateKey::from_pkcs8(b).unwrap())
.collect()
};
}
enum ConstructorMode {
WithTrustedLocal,
WithTrustedRoot,
WithTrustedRootKeys,
}
#[test]
fn client_constructors_err_with_not_found() {
block_on(async {
let local = EphemeralRepository::<Json>::new();
let remote = EphemeralRepository::<Json>::new();
let private_key =
Ed25519PrivateKey::from_pkcs8(&Ed25519PrivateKey::pkcs8().unwrap()).unwrap();
let public_key = private_key.public().clone();
assert_matches!(
Client::with_trusted_local(Config::default(), &local, &remote).await,
Err(Error::NotFound)
);
assert_matches!(
Client::with_trusted_root_keys(
Config::default(),
&MetadataVersion::Number(1),
1,
once(&public_key),
&local,
&remote,
)
.await,
Err(Error::NotFound)
);
})
}
#[test]
fn client_constructors_err_with_invalid_keys() {
block_on(async {
let remote = EphemeralRepository::new();
let root_version = 1;
let good_private_key = &KEYS[0];
let bad_private_key = &KEYS[1];
let _ = RepoBuilder::<_, Json>::new(&remote)
.root_keys(vec![good_private_key])
.with_root_builder(|bld| {
bld.version(root_version)
.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(good_private_key.public().clone())
.snapshot_key(good_private_key.public().clone())
.targets_key(good_private_key.public().clone())
.timestamp_key(good_private_key.public().clone())
})
.commit()
.await
.unwrap();
assert_matches!(
Client::with_trusted_root_keys(
Config::default(),
&MetadataVersion::Number(root_version),
1,
once(bad_private_key.public()),
EphemeralRepository::new(),
&remote,
)
.await,
Err(Error::VerificationFailure(_))
);
})
}
#[test]
fn with_trusted_local_loads_metadata_from_local_repo() {
block_on(constructors_load_metadata_from_local_repo(
ConstructorMode::WithTrustedLocal,
))
}
#[test]
fn with_trusted_root_loads_metadata_from_local_repo() {
block_on(constructors_load_metadata_from_local_repo(
ConstructorMode::WithTrustedRoot,
))
}
#[test]
fn with_trusted_root_keys_loads_metadata_from_local_repo() {
block_on(constructors_load_metadata_from_local_repo(
ConstructorMode::WithTrustedRootKeys,
))
}
async fn constructors_load_metadata_from_local_repo(constructor_mode: ConstructorMode) {
// Store an expired root in the local store.
let local = EphemeralRepository::<Json>::new();
let metadata1 = RepoBuilder::new(&local)
.root_keys(vec![&KEYS[0]])
.targets_keys(vec![&KEYS[0]])
.snapshot_keys(vec![&KEYS[0]])
.timestamp_keys(vec![&KEYS[0]])
.with_root_builder(|bld| {
bld.version(1)
.consistent_snapshot(true)
.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[0].public().clone())
.snapshot_key(KEYS[0].public().clone())
.targets_key(KEYS[0].public().clone())
.timestamp_key(KEYS[0].public().clone())
})
.targets_version(1)
.commit()
.await
.unwrap();
// Remote repo has unexpired metadata.
let remote = EphemeralRepository::<Json>::new();
let metadata2 = RepoBuilder::new(&remote)
.root_keys(vec![&KEYS[0]])
.targets_keys(vec![&KEYS[0]])
.snapshot_keys(vec![&KEYS[0]])
.timestamp_keys(vec![&KEYS[0]])
.with_root_builder(|bld| {
bld.version(2)
.consistent_snapshot(true)
.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[0].public().clone())
.snapshot_key(KEYS[0].public().clone())
.targets_key(KEYS[0].public().clone())
.timestamp_key(KEYS[0].public().clone())
})
.targets_version(2)
.snapshot_version(2)
.timestamp_version(2)
.commit()
.await
.unwrap();
// Now, make sure that the local metadata got version 1.
let track_local = TrackRepository::new(&local);
let track_remote = TrackRepository::new(&remote);
// Make sure the client initialized metadata in the right order. Each has a slightly
// different usage of the local repository.
let mut client = match constructor_mode {
ConstructorMode::WithTrustedLocal => {
Client::with_trusted_local(Config::default(), &track_local, &track_remote)
.await
.unwrap()
}
ConstructorMode::WithTrustedRoot => Client::with_trusted_root(
Config::default(),
&metadata1.root,
&track_local,
&track_remote,
)
.await
.unwrap(),
ConstructorMode::WithTrustedRootKeys => Client::with_trusted_root_keys(
Config::default(),
&MetadataVersion::Number(1),
1,
once(&KEYS[0].public().clone()),
&track_local,
&track_remote,
)
.await
.unwrap(),
};
assert_eq!(client.tuf.trusted_root().version(), 1);
assert_eq!(track_remote.take_tracks(), vec![]);
match constructor_mode {
ConstructorMode::WithTrustedLocal => {
assert_eq!(
track_local.take_tracks(),
vec![
Track::fetch_meta_found(&MetadataVersion::Number(1), &metadata1.root),
Track::FetchErr(
MetadataPath::from_role(&Role::Root),
MetadataVersion::Number(2)
),
Track::fetch_meta_found(
&MetadataVersion::None,
&metadata1.timestamp.as_ref().unwrap()
),
Track::fetch_meta_found(
&MetadataVersion::None,
&metadata1.snapshot.as_ref().unwrap()
),
Track::fetch_meta_found(
&MetadataVersion::None,
metadata1.targets.as_ref().unwrap()
),
],
);
}
ConstructorMode::WithTrustedRoot => {
assert_eq!(
track_local.take_tracks(),
vec![
Track::FetchErr(
MetadataPath::from_role(&Role::Root),
MetadataVersion::Number(2)
),
Track::fetch_meta_found(
&MetadataVersion::None,
&metadata1.timestamp.as_ref().unwrap()
),
Track::fetch_meta_found(
&MetadataVersion::None,
&metadata1.snapshot.as_ref().unwrap()
),
Track::fetch_meta_found(
&MetadataVersion::None,
metadata1.targets.as_ref().unwrap()
),
],
);
}
ConstructorMode::WithTrustedRootKeys => {
assert_eq!(
track_local.take_tracks(),
vec![
Track::fetch_meta_found(&MetadataVersion::Number(1), &metadata1.root),
Track::FetchErr(
MetadataPath::from_role(&Role::Root),
MetadataVersion::Number(2)
),
Track::fetch_meta_found(
&MetadataVersion::None,
&metadata1.timestamp.as_ref().unwrap()
),
Track::fetch_meta_found(
&MetadataVersion::None,
&metadata1.snapshot.as_ref().unwrap()
),
Track::fetch_meta_found(
&MetadataVersion::None,
metadata1.targets.as_ref().unwrap()
),
],
);
}
};
assert_matches!(client.update().await, Ok(true));
assert_eq!(client.tuf.trusted_root().version(), 2);
// We should only fetch metadata from the remote repository and write it to the local
// repository.
assert_eq!(
track_remote.take_tracks(),
vec![
Track::fetch_meta_found(&MetadataVersion::Number(2), &metadata2.root),
Track::FetchErr(
MetadataPath::from_role(&Role::Root),
MetadataVersion::Number(3)
),
Track::fetch_meta_found(
&MetadataVersion::None,
&metadata2.timestamp.as_ref().unwrap()
),
Track::fetch_meta_found(
&MetadataVersion::Number(2),
&metadata2.snapshot.as_ref().unwrap()
),
Track::fetch_meta_found(
&MetadataVersion::Number(2),
metadata2.targets.as_ref().unwrap()
),
],
);
assert_eq!(
track_local.take_tracks(),
vec![
Track::store_meta(&MetadataVersion::None, &metadata2.root),
Track::store_meta(&MetadataVersion::Number(2), &metadata2.root),
Track::store_meta(
&MetadataVersion::None,
metadata2.timestamp.as_ref().unwrap()
),
Track::store_meta(&MetadataVersion::None, metadata2.snapshot.as_ref().unwrap()),
Track::store_meta(&MetadataVersion::None, metadata2.targets.as_ref().unwrap()),
],
);
// Another update should not fetch anything.
assert_matches!(client.update().await, Ok(false));
assert_eq!(client.tuf.trusted_root().version(), 2);
// Make sure we only fetched the next root and timestamp, and didn't store anything.
assert_eq!(
track_remote.take_tracks(),
vec![
Track::FetchErr(
MetadataPath::from_role(&Role::Root),
MetadataVersion::Number(3)
),
Track::fetch_meta_found(
&MetadataVersion::None,
metadata2.timestamp.as_ref().unwrap(),
),
]
);
assert_eq!(track_local.take_tracks(), vec![]);
}
#[test]
fn constructor_succeeds_with_missing_metadata() {
block_on(async {
let local = EphemeralRepository::<Json>::new();
let remote = EphemeralRepository::<Json>::new();
// Store only a root in the local store.
let metadata1 = RepoBuilder::new(&local)
.root_keys(vec![&KEYS[0]])
.targets_keys(vec![&KEYS[0]])
.snapshot_keys(vec![&KEYS[0]])
.timestamp_keys(vec![&KEYS[0]])
.with_root_builder(|bld| {
bld.version(1)
.consistent_snapshot(true)
.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[0].public().clone())
.snapshot_key(KEYS[0].public().clone())
.targets_key(KEYS[0].public().clone())
.timestamp_key(KEYS[0].public().clone())
})
.commit()
.await
.unwrap();
let track_local = TrackRepository::new(&local);
let track_remote = TrackRepository::new(&remote);
// Create a client, which should try to fetch metadata from the local store.
let mut client = Client::with_trusted_root(
Config::default(),
&metadata1.root,
&track_local,
&track_remote,
)
.await
.unwrap();
assert_eq!(client.tuf.trusted_root().version(), 1);
// We shouldn't fetch metadata.
assert_eq!(track_remote.take_tracks(), vec![]);
// We should have tried fetching a new timestamp, but it shouldn't exist in the
// repository.
assert_eq!(
track_local.take_tracks(),
vec![
Track::FetchErr(
MetadataPath::from_role(&Role::Root),
MetadataVersion::Number(2)
),
Track::FetchErr(
MetadataPath::from_role(&Role::Timestamp),
MetadataVersion::None
)
],
);
// An update should succeed.
let _metadata2 = RepoBuilder::new(&remote)
.root_keys(vec![&KEYS[0]])
.targets_keys(vec![&KEYS[0]])
.snapshot_keys(vec![&KEYS[0]])
.timestamp_keys(vec![&KEYS[0]])
.with_root_builder(|bld| {
bld.version(2)
.consistent_snapshot(true)
.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[0].public().clone())
.snapshot_key(KEYS[0].public().clone())
.targets_key(KEYS[0].public().clone())
.timestamp_key(KEYS[0].public().clone())
})
.targets_version(1)
.snapshot_version(1)
.timestamp_version(1)
.commit()
.await
.unwrap();
assert_matches!(client.update().await, Ok(true));
assert_eq!(client.tuf.trusted_root().version(), 2);
})
}
#[test]
fn constructor_succeeds_with_expired_metadata() {
block_on(async {
let local = EphemeralRepository::<Json>::new();
let remote = EphemeralRepository::<Json>::new();
// Store an expired root in the local store.
let metadata1 = RepoBuilder::new(&local)
.root_keys(vec![&KEYS[0]])
.targets_keys(vec![&KEYS[0]])
.snapshot_keys(vec![&KEYS[0]])
.timestamp_keys(vec![&KEYS[0]])
.with_root_builder(|bld| {
bld.version(1)
.consistent_snapshot(true)
.expires(Utc.ymd(1970, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[0].public().clone())
.snapshot_key(KEYS[0].public().clone())
.targets_key(KEYS[0].public().clone())
.timestamp_key(KEYS[0].public().clone())
})
.targets_version(1)
.commit()
.await
.unwrap();
let metadata2 = RepoBuilder::new(&local)
.root_keys(vec![&KEYS[0]])
.targets_keys(vec![&KEYS[0]])
.snapshot_keys(vec![&KEYS[0]])
.timestamp_keys(vec![&KEYS[0]])
.with_root_builder(|bld| {
bld.version(2)
.consistent_snapshot(true)
.expires(Utc.ymd(1970, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[0].public().clone())
.snapshot_key(KEYS[0].public().clone())
.targets_key(KEYS[0].public().clone())
.timestamp_key(KEYS[0].public().clone())
})
.targets_version(2)
.snapshot_version(2)
.timestamp_version(2)
.commit()
.await
.unwrap();
// Now, make sure that the local metadata got version 1.
let track_local = TrackRepository::new(&local);
let track_remote = TrackRepository::new(&remote);
let mut client = Client::with_trusted_root(
Config::default(),
&metadata1.root,
&track_local,
&track_remote,
)
.await
.unwrap();
assert_eq!(client.tuf.trusted_root().version(), 2);
// We shouldn't fetch metadata.
assert_eq!(track_remote.take_tracks(), vec![]);
// We should only load the root metadata, but because it's expired we don't try
// fetching the other local metadata.
assert_eq!(
track_local.take_tracks(),
vec![
Track::fetch_meta_found(&MetadataVersion::Number(2), &metadata2.root),
Track::FetchErr(
MetadataPath::from_role(&Role::Root),
MetadataVersion::Number(3)
)
],
);
// An update should succeed.
let _metadata3 = RepoBuilder::new(&remote)
.root_keys(vec![&KEYS[0]])
.targets_keys(vec![&KEYS[0]])
.snapshot_keys(vec![&KEYS[0]])
.timestamp_keys(vec![&KEYS[0]])
.with_root_builder(|bld| {
bld.version(3)
.consistent_snapshot(true)
.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[0].public().clone())
.snapshot_key(KEYS[0].public().clone())
.targets_key(KEYS[0].public().clone())
.timestamp_key(KEYS[0].public().clone())
})
.targets_version(3)
.snapshot_version(3)
.timestamp_version(3)
.commit()
.await
.unwrap();
assert_matches!(client.update().await, Ok(true));
assert_eq!(client.tuf.trusted_root().version(), 3);
})
}
#[test]
fn constructor_succeeds_with_malformed_metadata() {
block_on(async {
// Store a malformed timestamp in the local repository.
let local = EphemeralRepository::<Json>::new();
let junk_timestamp = "junk timestamp";
local
.store_metadata(
&MetadataPath::from_role(&Role::Timestamp),
&MetadataVersion::None,
&mut junk_timestamp.as_bytes(),
)
.await
.unwrap();
// Create a normal repository on the remote server.
let remote = EphemeralRepository::<Json>::new();
let metadata1 = RepoBuilder::new(&remote)
.root_keys(vec![&KEYS[0]])
.targets_keys(vec![&KEYS[0]])
.snapshot_keys(vec![&KEYS[0]])
.timestamp_keys(vec![&KEYS[0]])
.with_root_builder(|bld| {
bld.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[0].public().clone())
.snapshot_key(KEYS[0].public().clone())
.targets_key(KEYS[0].public().clone())
.timestamp_key(KEYS[0].public().clone())
})
.targets_version(1)
.commit()
.await
.unwrap();
// Create the client. It should ignore the malformed timestamp.
let track_local = TrackRepository::new(&local);
let track_remote = TrackRepository::new(&remote);
let mut client = Client::with_trusted_root(
Config::default(),
&metadata1.root,
&track_local,
&track_remote,
)
.await
.unwrap();
assert_eq!(client.tuf.trusted_root().version(), 1);
// We shouldn't fetch metadata.
assert_eq!(track_remote.take_tracks(), vec![]);
// We should only load the root metadata, but because it's expired we don't try
// fetching the other local metadata.
assert_eq!(
track_local.take_tracks(),
vec![
Track::FetchErr(
MetadataPath::from_role(&Role::Root),
MetadataVersion::Number(2)
),
Track::FetchFound {
path: MetadataPath::from_role(&Role::Timestamp),
version: MetadataVersion::None,
metadata: junk_timestamp.into(),
},
],
);
// An update should work.
assert_matches!(client.update().await, Ok(true));
})
}
#[test]
fn root_chain_update_consistent_snapshot_false() {
block_on(root_chain_update(false))
}
#[test]
fn root_chain_update_consistent_snapshot_true() {
block_on(root_chain_update(true))
}
async fn root_chain_update(consistent_snapshot: bool) {
let repo = EphemeralRepository::<Json>::new();
// First, create the initial metadata.
let metadata1 = RepoBuilder::new(&repo)
.root_keys(vec![&KEYS[0]])
.targets_keys(vec![&KEYS[0], &KEYS[1], &KEYS[2]])
.snapshot_keys(vec![&KEYS[0], &KEYS[1], &KEYS[2]])
.timestamp_keys(vec![&KEYS[0], &KEYS[1], &KEYS[2]])
.with_root_builder(|bld| {
bld.version(1)
.consistent_snapshot(consistent_snapshot)
.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[0].public().clone())
.snapshot_key(KEYS[0].public().clone())
.targets_key(KEYS[0].public().clone())
.timestamp_key(KEYS[0].public().clone())
})
.targets_version(1)
.commit()
.await
.unwrap();
let root_path = MetadataPath::from_role(&Role::Root);
let timestamp_path = MetadataPath::from_role(&Role::Timestamp);
let targets_version;
let snapshot_version;
if consistent_snapshot {
targets_version = MetadataVersion::Number(1);
snapshot_version = MetadataVersion::Number(1);
} else {
targets_version = MetadataVersion::None;
snapshot_version = MetadataVersion::None;
};
// Now, make sure that the local metadata got version 1.
let track_local = TrackRepository::new(EphemeralRepository::new());
let track_remote = TrackRepository::new(&repo);
let mut client = Client::with_trusted_root_keys(
Config::default(),
&MetadataVersion::Number(1),
1,
once(&KEYS[0].public().clone()),
&track_local,
&track_remote,
)
.await
.unwrap();
// Check that we tried to load metadata from the local repository.
assert_eq!(
track_remote.take_tracks(),
vec![Track::fetch_found(
&root_path,
&MetadataVersion::Number(1),
metadata1.root.as_bytes()
),]
);
assert_eq!(
track_local.take_tracks(),
vec![
Track::FetchErr(root_path.clone(), MetadataVersion::Number(1)),
Track::store_meta(&MetadataVersion::Number(1), &metadata1.root),
Track::FetchErr(root_path.clone(), MetadataVersion::Number(2)),
Track::FetchErr(timestamp_path.clone(), MetadataVersion::None),
]
);
assert_matches!(client.update().await, Ok(true));
assert_eq!(client.tuf.trusted_root().version(), 1);
// Make sure we fetched the metadata in the right order.
assert_eq!(
track_remote.take_tracks(),
vec![
Track::FetchErr(root_path.clone(), MetadataVersion::Number(2)),
Track::fetch_meta_found(
&MetadataVersion::None,
&metadata1.timestamp.as_ref().unwrap()
),
Track::fetch_meta_found(&snapshot_version, &metadata1.snapshot.as_ref().unwrap()),
Track::fetch_meta_found(&targets_version, metadata1.targets.as_ref().unwrap()),
]
);
assert_eq!(
track_local.take_tracks(),
vec![
Track::store_meta(
&MetadataVersion::None,
metadata1.timestamp.as_ref().unwrap()
),
Track::store_meta(&MetadataVersion::None, metadata1.snapshot.as_ref().unwrap()),
Track::store_meta(&MetadataVersion::None, metadata1.targets.as_ref().unwrap()),
],
);
// Another update should not fetch anything.
assert_matches!(client.update().await, Ok(false));
assert_eq!(client.tuf.trusted_root().version(), 1);
// Make sure we only fetched the next root and timestamp, and didn't store anything.
assert_eq!(
track_remote.take_tracks(),
vec![
Track::FetchErr(root_path.clone(), MetadataVersion::Number(2)),
Track::fetch_meta_found(
&MetadataVersion::None,
metadata1.timestamp.as_ref().unwrap(),
),
]
);
assert_eq!(track_local.take_tracks(), vec![]);
////
// Now bump the root to version 3
// Make sure the version 2 is also signed by version 1's keys.
let metadata2 = RepoBuilder::new(&repo)
.root_keys(vec![&KEYS[0], &KEYS[1]])
.targets_keys(vec![&KEYS[0], &KEYS[1], &KEYS[2]])
.snapshot_keys(vec![&KEYS[0], &KEYS[1], &KEYS[2]])
.timestamp_keys(vec![&KEYS[0], &KEYS[1], &KEYS[2]])
.with_root_builder(|bld| {
bld.version(2)
.consistent_snapshot(consistent_snapshot)
.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[1].public().clone())
.snapshot_key(KEYS[1].public().clone())
.targets_key(KEYS[1].public().clone())
.timestamp_key(KEYS[1].public().clone())
})
.commit()
.await
.unwrap();
// Make sure the version 3 is also signed by version 2's keys.
let metadata3 = RepoBuilder::new(&repo)
.root_keys(vec![&KEYS[1], &KEYS[2]])
.targets_keys(vec![&KEYS[0], &KEYS[1], &KEYS[2]])
.snapshot_keys(vec![&KEYS[0], &KEYS[1], &KEYS[2]])
.timestamp_keys(vec![&KEYS[0], &KEYS[1], &KEYS[2]])
.with_root_builder(|bld| {
bld.version(3)
.consistent_snapshot(consistent_snapshot)
.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[2].public().clone())
.snapshot_key(KEYS[2].public().clone())
.targets_key(KEYS[2].public().clone())
.timestamp_key(KEYS[2].public().clone())
})
.commit()
.await
.unwrap();
////
// Finally, check that the update brings us to version 3.
assert_matches!(client.update().await, Ok(true));
assert_eq!(client.tuf.trusted_root().version(), 3);
// Make sure we fetched and stored the metadata in the expected order. Note that we
// re-fetch snapshot and targets because we rotated keys, which caused `tuf::Tuf` to delete
// the metadata.
assert_eq!(
track_remote.take_tracks(),
vec![
Track::fetch_meta_found(&MetadataVersion::Number(2), &metadata2.root,),
Track::fetch_meta_found(&MetadataVersion::Number(3), &metadata3.root,),
Track::FetchErr(root_path.clone(), MetadataVersion::Number(4)),
Track::fetch_meta_found(
&MetadataVersion::None,
metadata1.timestamp.as_ref().unwrap(),
),
Track::fetch_meta_found(&snapshot_version, metadata1.snapshot.as_ref().unwrap(),),
Track::fetch_meta_found(&targets_version, metadata1.targets.as_ref().unwrap(),),
]
);
assert_eq!(
track_local.take_tracks(),
vec![
Track::store_meta(&MetadataVersion::None, &metadata2.root,),
Track::store_meta(&MetadataVersion::Number(2), &metadata2.root,),
Track::store_meta(&MetadataVersion::None, &metadata3.root),
Track::store_meta(&MetadataVersion::Number(3), &metadata3.root,),
Track::store_meta(
&MetadataVersion::None,
metadata1.timestamp.as_ref().unwrap(),
),
Track::store_meta(&MetadataVersion::None, metadata1.snapshot.as_ref().unwrap(),),
Track::store_meta(&MetadataVersion::None, metadata1.targets.as_ref().unwrap(),),
],
);
}
#[test]
fn test_fetch_target_description_standard() {
block_on(test_fetch_target_description(
"standard/metadata".to_string(),
TargetDescription::from_reader(
"target with no custom metadata".as_bytes(),
&[HashAlgorithm::Sha256],
)
.unwrap(),
));
}
#[test]
fn test_fetch_target_description_custom_empty() {
block_on(test_fetch_target_description(
"custom-empty".to_string(),
TargetDescription::from_reader_with_custom(
"target with empty custom metadata".as_bytes(),
&[HashAlgorithm::Sha256],
hashmap!(),
)
.unwrap(),
));
}
#[test]
fn test_fetch_target_description_custom() {
block_on(test_fetch_target_description(
"custom/metadata".to_string(),
TargetDescription::from_reader_with_custom(
"target with lots of custom metadata".as_bytes(),
&[HashAlgorithm::Sha256],
hashmap!(
"string".to_string() => json!("string"),
"bool".to_string() => json!(true),
"int".to_string() => json!(42),
"object".to_string() => json!({
"string": json!("string"),
"bool": json!(true),
"int": json!(42),
}),
"array".to_string() => json!([1, 2, 3]),
),
)
.unwrap(),
));
}
async fn test_fetch_target_description(path: String, expected_description: TargetDescription) {
// Generate an ephemeral repository with a single target.
let remote = EphemeralRepository::<Json>::new();
let metadata = RepoBuilder::new(&remote)
.root_keys(vec![&KEYS[0]])
.targets_keys(vec![&KEYS[0]])
.snapshot_keys(vec![&KEYS[0]])
.timestamp_keys(vec![&KEYS[0]])
.with_root_builder(|bld| {
bld.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[0].public().clone())
.snapshot_key(KEYS[0].public().clone())
.targets_key(KEYS[0].public().clone())
.timestamp_key(KEYS[0].public().clone())
})
.with_targets_builder(|bld| {
bld.insert_target_description(
VirtualTargetPath::new(path.clone()).unwrap(),
expected_description.clone(),
)
})
.commit()
.await
.unwrap();
// Initialize and update client.
let mut client = Client::with_trusted_root(
Config::default(),
&metadata.root,
EphemeralRepository::new(),
remote,
)
.await
.unwrap();
assert_matches!(client.update().await, Ok(true));
// Verify fetch_target_description returns expected target metadata
let description = client
.fetch_target_description(&TargetPath::new(path).unwrap())
.await
.unwrap();
assert_eq!(description, expected_description);
}
#[test]
fn update_fails_if_cannot_write_to_repo() {
block_on(async {
let remote = EphemeralRepository::<Json>::new();
// First, create the metadata.
let _ = RepoBuilder::new(&remote)
.root_keys(vec![&KEYS[0]])
.targets_keys(vec![&KEYS[0]])
.snapshot_keys(vec![&KEYS[0]])
.timestamp_keys(vec![&KEYS[0]])
.with_root_builder(|bld| {
bld.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[0].public().clone())
.snapshot_key(KEYS[0].public().clone())
.targets_key(KEYS[0].public().clone())
.timestamp_key(KEYS[0].public().clone())
})
.targets_version(1)
.commit()
.await
.unwrap();
// Now, make sure that the local metadata got version 1.
let local = ErrorRepository::new(EphemeralRepository::new());
let mut client = Client::with_trusted_root_keys(
Config::default(),
&MetadataVersion::Number(1),
1,
once(&KEYS[0].public().clone()),
&local,
&remote,
)
.await
.unwrap();
// The first update should succeed.
assert_matches!(client.update().await, Ok(true));
// Publish new metadata.
let _ = RepoBuilder::new(&remote)
.root_keys(vec![&KEYS[0]])
.targets_keys(vec![&KEYS[0]])
.snapshot_keys(vec![&KEYS[0]])
.timestamp_keys(vec![&KEYS[0]])
.with_root_builder(|bld| {
bld.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[0].public().clone())
.snapshot_key(KEYS[0].public().clone())
.targets_key(KEYS[0].public().clone())
.timestamp_key(KEYS[0].public().clone())
})
.targets_version(2)
.snapshot_version(2)
.timestamp_version(2)
.commit()
.await
.unwrap();
local.fail_metadata_stores(true);
// The second update should fail.
assert_matches!(client.update().await, Err(Error::Encoding(_)));
// However, due to https://github.com/theupdateframework/specification/issues/131, if
// the update is retried a few times it will still succeed.
assert_matches!(client.update().await, Err(Error::Encoding(_)));
assert_matches!(client.update().await, Err(Error::Encoding(_)));
assert_matches!(client.update().await, Ok(false));
});
}
#[test]
fn with_trusted_methods_return_correct_metadata() {
block_on(async {
let local = EphemeralRepository::<Json>::new();
let remote = EphemeralRepository::<Json>::new();
// Store an expired root in the local store.
let metadata1 = RepoBuilder::new(&local)
.root_keys(vec![&KEYS[0]])
.targets_keys(vec![&KEYS[0]])
.snapshot_keys(vec![&KEYS[0]])
.timestamp_keys(vec![&KEYS[0]])
.with_root_builder(|bld| {
bld.version(1)
.consistent_snapshot(true)
.expires(Utc.ymd(2038, 1, 1).and_hms(0, 0, 0))
.root_key(KEYS[0].public().clone())
.snapshot_key(KEYS[0].public().clone())
.targets_key(KEYS[0].public().clone())
.timestamp_key(KEYS[0].public().clone())
})
.targets_version(1)
.commit()
.await
.unwrap();
let track_local = TrackRepository::new(&local);
let track_remote = TrackRepository::new(&remote);
let client = Client::with_trusted_root(
Config::default(),
&metadata1.root,
&track_local,
&track_remote,
)
.await
.unwrap();
assert_eq!(client.trusted_targets(), client.tuf.trusted_targets());
assert_eq!(client.trusted_snapshot(), client.tuf.trusted_snapshot());
assert_eq!(client.trusted_timestamp(), client.tuf.trusted_timestamp());
assert_eq!(
client.trusted_delegations(),
client.tuf.trusted_delegations()
);
})
}
}
| 37.021448 | 104 | 0.519082 |
5d643f28e3e210b9ad439f971e3650f3727ad32f | 19,315 | use super::category::AppCategory;
use clap::ArgMatches;
use glob;
use std;
use std::collections::HashMap;
use std::fs::File;
use std::io::Read;
use std::path::{Path, PathBuf};
use target_build_utils::TargetInfo;
use toml;
use walkdir;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum PackageType {
OsxBundle,
IosBundle,
WindowsMsi,
Deb,
Rpm,
}
impl PackageType {
pub fn from_short_name(name: &str) -> Option<PackageType> {
// Other types we may eventually want to support: apk
match name {
"deb" => Some(PackageType::Deb),
"ios" => Some(PackageType::IosBundle),
"msi" => Some(PackageType::WindowsMsi),
"osx" => Some(PackageType::OsxBundle),
"rpm" => Some(PackageType::Rpm),
_ => None,
}
}
pub fn short_name(&self) -> &'static str {
match *self {
PackageType::Deb => "deb",
PackageType::IosBundle => "ios",
PackageType::WindowsMsi => "msi",
PackageType::OsxBundle => "osx",
PackageType::Rpm => "rpm",
}
}
pub fn all() -> &'static [PackageType] {
ALL_PACKAGE_TYPES
}
}
const ALL_PACKAGE_TYPES: &[PackageType] = &[
PackageType::Deb,
PackageType::IosBundle,
PackageType::WindowsMsi,
PackageType::OsxBundle,
PackageType::Rpm,
];
#[derive(Clone, Debug)]
pub enum BuildArtifact {
Main,
Bin(String),
Example(String),
}
#[derive(Clone, Debug, Deserialize)]
struct BundleSettings {
// General settings:
name: Option<String>,
identifier: Option<String>,
icon: Option<Vec<String>>,
version: Option<String>,
resources: Option<Vec<String>>,
copyright: Option<String>,
category: Option<AppCategory>,
short_description: Option<String>,
long_description: Option<String>,
script: Option<PathBuf>,
// OS-specific settings:
deb_depends: Option<Vec<String>>,
osx_frameworks: Option<Vec<String>>,
osx_minimum_system_version: Option<String>,
// Bundles for other binaries/examples:
bin: Option<HashMap<String, BundleSettings>>,
example: Option<HashMap<String, BundleSettings>>,
}
#[derive(Clone, Debug, Deserialize)]
struct MetadataSettings {
bundle: Option<BundleSettings>,
}
#[derive(Clone, Debug, Deserialize)]
struct PackageSettings {
name: String,
version: String,
description: String,
homepage: Option<String>,
authors: Option<Vec<String>>,
metadata: Option<MetadataSettings>,
}
#[derive(Clone, Debug, Deserialize)]
struct WorkspaceSettings {
members: Option<Vec<String>>,
}
#[derive(Clone, Debug, Deserialize)]
struct CargoSettings {
package: Option<PackageSettings>, // "Ancestor" workspace Cargo.toml files may not have package info
workspace: Option<WorkspaceSettings>, // "Ancestor" workspace Cargo.toml files may declare workspaces
}
#[derive(Clone, Debug)]
pub struct Settings {
package: PackageSettings,
package_type: Option<PackageType>, // If `None`, use the default package type for this os
target: Option<(String, TargetInfo)>,
features: Option<Vec<String>>,
project_out_directory: PathBuf,
build_artifact: BuildArtifact,
is_release: bool,
binary_path: PathBuf,
binary_name: String,
bundle_settings: BundleSettings,
}
impl CargoSettings {
/*
Try to load a set of CargoSettings from a "Cargo.toml" file in the specified directory
*/
fn load(dir: &PathBuf) -> crate::Result<Self> {
let toml_path = dir.join("Cargo.toml");
let mut toml_str = String::new();
let mut toml_file = File::open(toml_path)?;
toml_file.read_to_string(&mut toml_str)?;
toml::from_str(&toml_str).map_err(|e| e.into())
}
}
impl Settings {
pub fn new(current_dir: PathBuf, matches: &ArgMatches<'_>) -> crate::Result<Self> {
let package_type = match matches.value_of("format") {
Some(name) => match PackageType::from_short_name(name) {
Some(package_type) => Some(package_type),
None => bail!("Unsupported bundle format: {}", name),
},
None => None,
};
let build_artifact = if let Some(bin) = matches.value_of("bin") {
BuildArtifact::Bin(bin.to_string())
} else if let Some(example) = matches.value_of("example") {
BuildArtifact::Example(example.to_string())
} else {
BuildArtifact::Main
};
let is_release = matches.is_present("release");
let target = match matches.value_of("target") {
Some(triple) => Some((triple.to_string(), TargetInfo::from_str(triple)?)),
None => None,
};
let features = if matches.is_present("features") {
Some(
matches
.values_of("features")
.unwrap()
.map(|s| s.to_string())
.collect(),
)
} else {
None
};
let cargo_settings = CargoSettings::load(¤t_dir)?;
let package = match cargo_settings.package {
Some(package_info) => package_info,
None => bail!("No 'package' info found in 'Cargo.toml'"),
};
let workspace_dir = Settings::get_workspace_dir(¤t_dir);
let target_dir = Settings::get_target_dir(&workspace_dir, &target, is_release, &build_artifact);
let bundle_settings = if let Some(bundle_settings) = package
.metadata
.as_ref()
.and_then(|metadata| metadata.bundle.as_ref())
{
bundle_settings.clone()
} else {
bail!("No [package.metadata.bundle] section in Cargo.toml");
};
let (bundle_settings, binary_name) = match build_artifact {
BuildArtifact::Main => (bundle_settings, package.name.clone()),
BuildArtifact::Bin(ref name) => (
bundle_settings_from_table(&bundle_settings.bin, "bin", name)?,
name.clone(),
),
BuildArtifact::Example(ref name) => (
bundle_settings_from_table(&bundle_settings.example, "example", name)?,
name.clone(),
),
};
let binary_name = if cfg!(windows) {
format!("{}.{}", &binary_name, "exe")
} else {
binary_name
};
let binary_path = target_dir.join(&binary_name);
Ok(Settings {
package,
package_type,
target,
features,
build_artifact,
is_release,
project_out_directory: target_dir,
binary_path,
binary_name,
bundle_settings,
})
}
/*
The target_dir where binaries will be compiled to by cargo can vary:
- this directory is a member of a workspace project
- overridden by CARGO_TARGET_DIR environment variable
- specified in build.target-dir configuration key
- if the build is a 'release' or 'debug' build
This function determines where 'target' dir is and suffixes it with 'release' or 'debug'
to determine where the compiled binary will be located.
*/
fn get_target_dir(
project_root_dir: &PathBuf,
target: &Option<(String, TargetInfo)>,
is_release: bool,
build_artifact: &BuildArtifact,
) -> PathBuf {
let mut path = project_root_dir.join("target");
if let &Some((ref triple, _)) = target {
path.push(triple);
}
path.push(if is_release { "release" } else { "debug" });
if let &BuildArtifact::Example(_) = build_artifact {
path.push("examples");
}
path
}
/*
The specification of the Cargo.toml Manifest that covers the "workspace" section is here:
https://doc.rust-lang.org/cargo/reference/manifest.html#the-workspace-section
Determining if the current project folder is part of a workspace:
- Walk up the file system, looking for a Cargo.toml file.
- Stop at the first one found.
- If one is found before reaching "/" then this folder belongs to that parent workspace
*/
pub fn get_workspace_dir(current_dir: &PathBuf) -> PathBuf {
let mut dir = current_dir.clone();
while dir.pop() {
let set = CargoSettings::load(&dir);
if set.is_ok() {
return dir;
}
}
// Nothing found walking up the file system, return the starting directory
current_dir.clone()
}
/// Returns the directory where the bundle should be placed.
pub fn project_out_directory(&self) -> &Path {
&self.project_out_directory
}
/// Returns the architecture for the binary being bundled (e.g. "arm" or
/// "x86" or "x86_64").
pub fn binary_arch(&self) -> &str {
if let Some((_, ref info)) = self.target {
info.target_arch()
} else {
std::env::consts::ARCH
}
}
/// Returns the file name of the binary being bundled.
pub fn binary_name(&self) -> &str {
&self.binary_name
}
/// Returns the path to the binary being bundled.
pub fn binary_path(&self) -> &Path {
&self.binary_path
}
/// If a specific package type was specified by the command-line, returns
/// that package type; otherwise, if a target triple was specified by the
/// command-line, returns the native package type(s) for that target;
/// otherwise, returns the native package type(s) for the host platform.
/// Fails if the host/target's native package type is not supported.
pub fn package_types(&self) -> crate::Result<Vec<PackageType>> {
if let Some(package_type) = self.package_type {
Ok(vec![package_type])
} else {
let target_os = if let Some((_, ref info)) = self.target {
info.target_os()
} else {
std::env::consts::OS
};
match target_os {
"macos" => Ok(vec![PackageType::OsxBundle]),
"ios" => Ok(vec![PackageType::IosBundle]),
"linux" => Ok(vec![PackageType::Deb]), // TODO: Do Rpm too, once it's implemented.
"windows" => Ok(vec![PackageType::WindowsMsi]),
os => bail!("Native {} bundles not yet supported.", os),
}
}
}
/// If the bundle is being cross-compiled, returns the target triple string
/// (e.g. `"x86_64-apple-darwin"`). If the bundle is targeting the host
/// environment, returns `None`.
pub fn target_triple(&self) -> Option<&str> {
match self.target {
Some((ref triple, _)) => Some(triple.as_str()),
None => None,
}
}
/// Returns the features that is being built.
pub fn build_features(&self) -> Option<Vec<String>> {
self.features.to_owned()
}
/// Returns the artifact that is being bundled.
pub fn build_artifact(&self) -> &BuildArtifact {
&self.build_artifact
}
/// Returns true if the bundle is being compiled in release mode, false if
/// it's being compiled in debug mode.
pub fn is_release_build(&self) -> bool {
self.is_release
}
pub fn bundle_name(&self) -> &str {
self
.bundle_settings
.name
.as_ref()
.unwrap_or(&self.package.name)
}
pub fn bundle_identifier(&self) -> &str {
self
.bundle_settings
.identifier
.as_ref()
.map(String::as_str)
.unwrap_or("")
}
/// Returns an iterator over the icon files to be used for this bundle.
pub fn icon_files(&self) -> ResourcePaths<'_> {
match self.bundle_settings.icon {
Some(ref paths) => ResourcePaths::new(paths.as_slice(), false),
None => ResourcePaths::new(&[], false),
}
}
/// Returns an iterator over the resource files to be included in this
/// bundle.
pub fn resource_files(&self) -> ResourcePaths<'_> {
match self.bundle_settings.resources {
Some(ref paths) => ResourcePaths::new(paths.as_slice(), true),
None => ResourcePaths::new(&[], true),
}
}
pub fn version_string(&self) -> &str {
self
.bundle_settings
.version
.as_ref()
.unwrap_or(&self.package.version)
}
pub fn copyright_string(&self) -> Option<&str> {
self.bundle_settings.copyright.as_ref().map(String::as_str)
}
pub fn author_names(&self) -> &[String] {
match self.package.authors {
Some(ref names) => names.as_slice(),
None => &[],
}
}
pub fn authors_comma_separated(&self) -> Option<String> {
let names = self.author_names();
if names.is_empty() {
None
} else {
Some(names.join(", "))
}
}
pub fn homepage_url(&self) -> &str {
&self
.package
.homepage
.as_ref()
.map(String::as_str)
.unwrap_or("")
}
pub fn app_category(&self) -> Option<AppCategory> {
self.bundle_settings.category
}
pub fn short_description(&self) -> &str {
self
.bundle_settings
.short_description
.as_ref()
.unwrap_or(&self.package.description)
}
pub fn long_description(&self) -> Option<&str> {
self
.bundle_settings
.long_description
.as_ref()
.map(String::as_str)
}
pub fn debian_dependencies(&self) -> &[String] {
match self.bundle_settings.deb_depends {
Some(ref dependencies) => dependencies.as_slice(),
None => &[],
}
}
pub fn osx_frameworks(&self) -> &[String] {
match self.bundle_settings.osx_frameworks {
Some(ref frameworks) => frameworks.as_slice(),
None => &[],
}
}
pub fn osx_minimum_system_version(&self) -> Option<&str> {
self
.bundle_settings
.osx_minimum_system_version
.as_ref()
.map(String::as_str)
}
}
fn bundle_settings_from_table(
opt_map: &Option<HashMap<String, BundleSettings>>,
map_name: &str,
bundle_name: &str,
) -> crate::Result<BundleSettings> {
if let Some(bundle_settings) = opt_map.as_ref().and_then(|map| map.get(bundle_name)) {
Ok(bundle_settings.clone())
} else {
bail!(
"No [package.metadata.bundle.{}.{}] section in Cargo.toml",
map_name,
bundle_name
);
}
}
pub struct ResourcePaths<'a> {
pattern_iter: std::slice::Iter<'a, String>,
glob_iter: Option<glob::Paths>,
walk_iter: Option<walkdir::IntoIter>,
allow_walk: bool,
}
impl<'a> ResourcePaths<'a> {
fn new(patterns: &'a [String], allow_walk: bool) -> ResourcePaths<'a> {
ResourcePaths {
pattern_iter: patterns.iter(),
glob_iter: None,
walk_iter: None,
allow_walk,
}
}
}
impl<'a> Iterator for ResourcePaths<'a> {
type Item = crate::Result<PathBuf>;
fn next(&mut self) -> Option<crate::Result<PathBuf>> {
loop {
if let Some(ref mut walk_entries) = self.walk_iter {
if let Some(entry) = walk_entries.next() {
let entry = match entry {
Ok(entry) => entry,
Err(error) => return Some(Err(crate::Error::from(error))),
};
let path = entry.path();
if path.is_dir() {
continue;
}
return Some(Ok(path.to_path_buf()));
}
}
self.walk_iter = None;
if let Some(ref mut glob_paths) = self.glob_iter {
if let Some(glob_result) = glob_paths.next() {
let path = match glob_result {
Ok(path) => path,
Err(error) => return Some(Err(crate::Error::from(error))),
};
if path.is_dir() {
if self.allow_walk {
let walk = walkdir::WalkDir::new(path);
self.walk_iter = Some(walk.into_iter());
continue;
} else {
let msg = format!("{:?} is a directory", path);
return Some(Err(crate::Error::from(msg)));
}
}
return Some(Ok(path));
}
}
self.glob_iter = None;
if let Some(pattern) = self.pattern_iter.next() {
let glob = match glob::glob(pattern) {
Ok(glob) => glob,
Err(error) => return Some(Err(crate::Error::from(error))),
};
self.glob_iter = Some(glob);
continue;
}
return None;
}
}
}
#[cfg(test)]
mod tests {
use super::{AppCategory, BundleSettings, CargoSettings};
use toml;
#[test]
fn parse_cargo_toml() {
let toml_str = "\
[package]\n\
name = \"example\"\n\
version = \"0.1.0\"\n\
authors = [\"Jane Doe\"]\n\
license = \"MIT\"\n\
description = \"An example application.\"\n\
build = \"build.rs\"\n\
\n\
[package.metadata.bundle]\n\
name = \"Example Application\"\n\
identifier = \"com.example.app\"\n\
resources = [\"data\", \"foo/bar\"]\n\
category = \"Puzzle Game\"\n\
long_description = \"\"\"\n\
This is an example of a\n\
simple application.\n\
\"\"\"\n\
\n\
[dependencies]\n\
rand = \"0.4\"\n";
let cargo_settings: CargoSettings = toml::from_str(toml_str).unwrap();
let package = cargo_settings.package.unwrap();
assert_eq!(package.name, "example");
assert_eq!(package.version, "0.1.0");
assert_eq!(package.description, "An example application.");
assert_eq!(package.homepage, None);
assert_eq!(package.authors, Some(vec!["Jane Doe".to_string()]));
assert!(package.metadata.is_some());
let metadata = package.metadata.as_ref().unwrap();
assert!(metadata.bundle.is_some());
let bundle = metadata.bundle.as_ref().unwrap();
assert_eq!(bundle.name, Some("Example Application".to_string()));
assert_eq!(bundle.identifier, Some("com.example.app".to_string()));
assert_eq!(bundle.icon, None);
assert_eq!(bundle.version, None);
assert_eq!(
bundle.resources,
Some(vec!["data".to_string(), "foo/bar".to_string()])
);
assert_eq!(bundle.category, Some(AppCategory::PuzzleGame));
assert_eq!(
bundle.long_description,
Some(
"This is an example of a\n\
simple application.\n"
.to_string()
)
);
}
#[test]
fn parse_bin_and_example_bundles() {
let toml_str = "\
[package]\n\
name = \"example\"\n\
version = \"0.1.0\"\n\
description = \"An example application.\"\n\
\n\
[package.metadata.bundle.bin.foo]\n\
name = \"Foo App\"\n\
\n\
[package.metadata.bundle.bin.bar]\n\
name = \"Bar App\"\n\
\n\
[package.metadata.bundle.example.baz]\n\
name = \"Baz Example\"\n\
\n\
[[bin]]\n\
name = \"foo\"\n
\n\
[[bin]]\n\
name = \"bar\"\n
\n\
[[example]]\n\
name = \"baz\"\n";
let cargo_settings: CargoSettings = toml::from_str(toml_str).unwrap();
assert!(cargo_settings.package.is_some());
let package = cargo_settings.package.as_ref().unwrap();
assert!(package.metadata.is_some());
let metadata = package.metadata.as_ref().unwrap();
assert!(metadata.bundle.is_some());
let bundle = metadata.bundle.as_ref().unwrap();
assert!(bundle.example.is_some());
let bins = bundle.bin.as_ref().unwrap();
assert!(bins.contains_key("foo"));
let foo: &BundleSettings = bins.get("foo").unwrap();
assert_eq!(foo.name, Some("Foo App".to_string()));
assert!(bins.contains_key("bar"));
let bar: &BundleSettings = bins.get("bar").unwrap();
assert_eq!(bar.name, Some("Bar App".to_string()));
let examples = bundle.example.as_ref().unwrap();
assert!(examples.contains_key("baz"));
let baz: &BundleSettings = examples.get("baz").unwrap();
assert_eq!(baz.name, Some("Baz Example".to_string()));
}
}
| 29.761171 | 103 | 0.606472 |
e8bcca60436e2ac6df9f3feaa3419b709204a3f1 | 4,312 | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core::fmt;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// Unary operators
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum UnaryOperator {
Plus,
Minus,
Not,
/// Bitwise Not, e.g. `~9` (PostgreSQL-specific)
PGBitwiseNot,
/// Square root, e.g. `|/9` (PostgreSQL-specific)
PGSquareRoot,
/// Cube root, e.g. `||/27` (PostgreSQL-specific)
PGCubeRoot,
/// Factorial, e.g. `9!` (PostgreSQL-specific)
PGPostfixFactorial,
/// Factorial, e.g. `!!9` (PostgreSQL-specific)
PGPrefixFactorial,
/// Absolute value, e.g. `@ -9` (PostgreSQL-specific)
PGAbs,
}
impl fmt::Display for UnaryOperator {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match self {
UnaryOperator::Plus => "+",
UnaryOperator::Minus => "-",
UnaryOperator::Not => "NOT",
UnaryOperator::PGBitwiseNot => "~",
UnaryOperator::PGSquareRoot => "|/",
UnaryOperator::PGCubeRoot => "||/",
UnaryOperator::PGPostfixFactorial => "!",
UnaryOperator::PGPrefixFactorial => "!!",
UnaryOperator::PGAbs => "@",
})
}
}
/// Binary operators
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum BinaryOperator {
Plus,
Minus,
Multiply,
Divide,
Modulo,
StringConcat,
Gt,
Lt,
GtEq,
LtEq,
Spaceship,
Eq,
NotEq,
And,
Or,
Xor,
Like,
NotLike,
ILike,
NotILike,
BitwiseOr,
BitwiseAnd,
BitwiseXor,
PGBitwiseXor,
PGBitwiseShiftLeft,
PGBitwiseShiftRight,
PGRegexMatch,
PGRegexIMatch,
PGRegexNotMatch,
PGRegexNotIMatch,
HyphenRight,
HyphenRightRight,
Question,
AtRight,
AtLeft,
DoubleAmpersand
}
impl fmt::Display for BinaryOperator {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match self {
BinaryOperator::Plus => "+",
BinaryOperator::Minus => "-",
BinaryOperator::Multiply => "*",
BinaryOperator::Divide => "/",
BinaryOperator::Modulo => "%",
BinaryOperator::StringConcat => "||",
BinaryOperator::Gt => ">",
BinaryOperator::Lt => "<",
BinaryOperator::GtEq => ">=",
BinaryOperator::LtEq => "<=",
BinaryOperator::Spaceship => "<=>",
BinaryOperator::Eq => "=",
BinaryOperator::NotEq => "<>",
BinaryOperator::And => "AND",
BinaryOperator::Or => "OR",
BinaryOperator::Xor => "XOR",
BinaryOperator::Like => "LIKE",
BinaryOperator::NotLike => "NOT LIKE",
BinaryOperator::ILike => "ILIKE",
BinaryOperator::NotILike => "NOT ILIKE",
BinaryOperator::BitwiseOr => "|",
BinaryOperator::BitwiseAnd => "&",
BinaryOperator::BitwiseXor => "^",
BinaryOperator::PGBitwiseXor => "#",
BinaryOperator::PGBitwiseShiftLeft => "<<",
BinaryOperator::PGBitwiseShiftRight => ">>",
BinaryOperator::PGRegexMatch => "~",
BinaryOperator::PGRegexIMatch => "~*",
BinaryOperator::PGRegexNotMatch => "!~",
BinaryOperator::PGRegexNotIMatch => "!~*",
BinaryOperator::HyphenRight => "->",
BinaryOperator::HyphenRightRight => "->>",
BinaryOperator::Question => "?",
BinaryOperator::AtRight => "@>",
BinaryOperator::AtLeft => "@<",
BinaryOperator::DoubleAmpersand => "&&",
})
}
}
| 31.021583 | 75 | 0.57282 |
d66056339c618a944885b1d24defea2e5fabeb7b | 3,639 | //! Endpoints for serving static contents on the file system.
use {
crate::{
action::{
ActionContext, //
EndpointAction,
Preflight,
PreflightContext,
},
endpoint::{Endpoint, IsEndpoint},
error::{self, Error},
output::fs::{NamedFile, OpenNamedFile},
},
futures::Poll,
std::path::PathBuf,
};
/// Create an endpoint which serves a specified file on the file system.
#[inline]
pub fn file(path: impl Into<PathBuf>) -> File {
File { path: path.into() }
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct File {
path: PathBuf,
}
mod file {
use super::*;
use futures::Future as _Future;
use std::marker::PhantomData;
impl IsEndpoint for File {}
impl<Bd> Endpoint<Bd> for File {
type Output = (NamedFile,);
type Action = FileAction<Bd>;
fn action(&self) -> Self::Action {
FileAction {
path: self.path.clone(),
opening: None,
_marker: PhantomData,
}
}
}
#[allow(missing_debug_implementations)]
pub struct FileAction<Bd> {
path: PathBuf,
opening: Option<OpenNamedFile>,
_marker: PhantomData<fn(Bd)>,
}
impl<Bd> EndpointAction<Bd> for FileAction<Bd> {
type Output = (NamedFile,);
fn poll_action(&mut self, _: &mut ActionContext<'_, Bd>) -> Poll<Self::Output, Error> {
loop {
if let Some(ref mut opening) = self.opening {
return opening.poll().map(|x| x.map(|x| (x,))).map_err(Into::into);
}
self.opening = Some(NamedFile::open(self.path.clone()));
}
}
}
}
/// Create an endpoint which serves files in the specified directory.
#[inline]
pub fn dir(root: impl Into<PathBuf>) -> Dir {
Dir { root: root.into() }
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct Dir {
root: PathBuf,
}
mod dir {
use super::*;
use futures::Future as _Future;
impl IsEndpoint for Dir {}
impl<Bd> Endpoint<Bd> for Dir {
type Output = (NamedFile,);
type Action = DirAction;
fn action(&self) -> Self::Action {
DirAction {
root: self.root.clone(),
state: State::Init,
}
}
}
#[allow(missing_debug_implementations)]
pub struct DirAction {
root: PathBuf,
state: State,
}
enum State {
Init,
Opening(OpenNamedFile),
}
impl<Bd> EndpointAction<Bd> for DirAction {
type Output = (NamedFile,);
fn preflight(
&mut self,
cx: &mut PreflightContext<'_>,
) -> Result<Preflight<Self::Output>, Error> {
let path = cx
.cursor()
.remaining_path()
.percent_decode()
.map(|path| PathBuf::from(path.into_owned()));
let _ = cx.cursor().count();
let path = path.map_err(error::bad_request)?;
let mut path = self.root.join(path);
if path.is_dir() {
path = path.join("index.html");
}
self.state = State::Opening(NamedFile::open(path));
Ok(Preflight::Incomplete)
}
fn poll_action(&mut self, _: &mut ActionContext<'_, Bd>) -> Poll<Self::Output, Error> {
match self.state {
State::Init => unreachable!(),
State::Opening(ref mut f) => f.poll().map(|x| x.map(|x| (x,))).map_err(Into::into),
}
}
}
}
| 25.096552 | 99 | 0.521847 |
11fffefe0a9d6a56c324e2beba266430cb0ef815 | 7,300 | #[cfg(feature = "cc")]
use cc::Build;
use std::env::var;
use std::io::Write;
/// The directory for out-of-line ("outline") libraries.
const OUTLINE_PATH: &str = "src/imp/linux_raw/arch/outline";
fn main() {
// Don't rerun this on changes other than build.rs, as we only depend on
// the rustc version.
println!("cargo:rerun-if-changed=build.rs");
use_feature_or_nothing("rustc_attrs");
// Features only used in no-std configurations.
#[cfg(not(feature = "std"))]
{
use_feature_or_nothing("const_raw_ptr_deref");
use_feature_or_nothing("core_c_str");
use_feature_or_nothing("alloc_c_string");
}
// Gather target information.
let arch = var("CARGO_CFG_TARGET_ARCH").unwrap();
let asm_name = format!("{}/{}.s", OUTLINE_PATH, arch);
let asm_name_present = std::fs::metadata(&asm_name).is_ok();
let os_name = var("CARGO_CFG_TARGET_OS").unwrap();
let pointer_width = var("CARGO_CFG_TARGET_POINTER_WIDTH").unwrap();
let endian = var("CARGO_CFG_TARGET_ENDIAN").unwrap();
// Check for special target variants.
let is_x32 = arch == "x86_64" && pointer_width == "32";
let is_arm64_ilp32 = arch == "aarch64" && pointer_width == "32";
let is_powerpc64be = arch == "powerpc64" && endian == "big";
let is_mipseb = arch == "mips" && endian == "big";
let is_mips64eb = arch == "mips64" && endian == "big";
let is_unsupported_abi = is_x32 || is_arm64_ilp32 || is_powerpc64be || is_mipseb || is_mips64eb;
// Check for `--features=use-libc`. This allows crate users to enable the
// libc backend.
let feature_use_libc = var("CARGO_FEATURE_USE_LIBC").is_ok();
// Check for `--features=rustc-dep-of-std`. This is used when rustix is
// being used to build std, in which case `can_compile` doesn't work
// because `core` isn't available yet, but also, we can assume we have a
// recent compiler.
let feature_rustc_dep_of_std = var("CARGO_FEATURE_RUSTC_DEP_OF_STD").is_ok();
// Check for `RUSTFLAGS=--cfg=rustix_use_libc`. This allows end users to
// enable the libc backend even if rustix is depended on transitively.
let cfg_use_libc = var("CARGO_CFG_RUSTIX_USE_LIBC").is_ok();
// Check for eg. `RUSTFLAGS=--cfg=rustix_use_experimental_asm`. This is a
// rustc flag rather than a cargo feature flag because it's experimental
// and not something we want accidentally enabled via --all-features.
let rustix_use_experimental_asm = var("CARGO_CFG_RUSTIX_USE_EXPERIMENTAL_ASM").is_ok();
// Miri doesn't support inline asm, and has builtin support for recognizing
// libc FFI calls, so if we're running under miri, use the libc backend.
let miri = var("CARGO_CFG_MIRI").is_ok();
// If the libc backend is requested, or if we're not on a platform for
// which we have linux_raw support, use the libc backend.
//
// For now Android uses the libc backend; in theory it could use the
// linux_raw backend, but to do that we'll need to figure out how to
// install the toolchain for it.
if feature_use_libc
|| cfg_use_libc
|| os_name != "linux"
|| !asm_name_present
|| is_unsupported_abi
|| miri
{
// Use the libc backend.
use_feature("libc");
} else {
// Use the linux_raw backend.
use_feature("linux_raw");
use_feature_or_nothing("core_intrinsics");
// Use inline asm if we have it, or outline asm otherwise. On PowerPC
// and MIPS, Rust's inline asm is considered experimental, so only use
// it if `--cfg=rustix_use_experimental_asm` is given.
if (feature_rustc_dep_of_std || can_compile("use std::arch::asm;"))
&& (arch != "x86" || has_feature("naked_functions"))
&& ((arch != "powerpc64" && arch != "mips" && arch != "mips64")
|| rustix_use_experimental_asm)
{
use_feature("asm");
if arch == "x86" {
use_feature("naked_functions");
}
if rustix_use_experimental_asm {
use_feature("asm_experimental_arch");
}
} else {
link_in_librustix_outline(&arch, &asm_name);
}
}
println!("cargo:rerun-if-env-changed=CARGO_CFG_RUSTIX_USE_EXPERIMENTAL_ASM");
}
/// Link in the desired version of librustix_outline_{arch}.a, containing the
/// outline assembly code for making syscalls.
fn link_in_librustix_outline(arch: &str, asm_name: &str) {
let name = format!("rustix_outline_{}", arch);
let profile = var("PROFILE").unwrap();
let to = format!("{}/{}/lib{}.a", OUTLINE_PATH, profile, name);
println!("cargo:rerun-if-changed={}", to);
// If "cc" is not enabled, use a pre-built library.
#[cfg(not(feature = "cc"))]
{
let _ = asm_name;
println!("cargo:rustc-link-search={}/{}", OUTLINE_PATH, profile);
println!("cargo:rustc-link-lib=static={}", name);
}
// If "cc" is enabled, build the library from source, update the pre-built
// version, and assert that the pre-built version is checked in.
#[cfg(feature = "cc")]
{
let out_dir = var("OUT_DIR").unwrap();
Build::new().file(&asm_name).compile(&name);
println!("cargo:rerun-if-changed={}", asm_name);
if std::fs::metadata(".git").is_ok() {
let from = format!("{}/lib{}.a", out_dir, name);
let prev_metadata = std::fs::metadata(&to);
std::fs::copy(&from, &to).unwrap();
assert!(
prev_metadata.is_ok(),
"{} didn't previously exist; please inspect the new file and `git add` it",
to
);
assert!(
std::process::Command::new("git")
.arg("diff")
.arg("--quiet")
.arg(&to)
.status()
.unwrap()
.success(),
"{} changed; please inspect the change and `git commit` it",
to
);
}
}
}
fn use_feature_or_nothing(feature: &str) {
if has_feature(feature) {
use_feature(feature);
}
}
fn use_feature(feature: &str) {
println!("cargo:rustc-cfg={}", feature);
}
/// Test whether the rustc at `var("RUSTC")` supports the given feature.
fn has_feature(feature: &str) -> bool {
can_compile(&format!(
"#![allow(stable_features)]\n#![feature({})]",
feature
))
}
/// Test whether the rustc at `var("RUSTC")` can compile the given code.
fn can_compile(code: &str) -> bool {
use std::process::Stdio;
let out_dir = var("OUT_DIR").unwrap();
let rustc = var("RUSTC").unwrap();
let mut child = std::process::Command::new(rustc)
.arg("--crate-type=rlib") // Don't require `main`.
.arg("--emit=metadata") // Do as little as possible but still parse.
.arg("--out-dir")
.arg(out_dir) // Put the output somewhere inconsequential.
.arg("-") // Read from stdin.
.stdin(Stdio::piped()) // Stdin is a pipe.
.stderr(Stdio::null())
.spawn()
.unwrap();
writeln!(child.stdin.take().unwrap(), "{}", code).unwrap();
child.wait().unwrap().success()
}
| 38.020833 | 100 | 0.603836 |
ff16dc4e4ba1b4a517727930faf7ef961beb819f | 144,461 |
mod create_optional;
pub use self::create_optional::CreateOptional;
mod create_response;
pub use self::create_response::CreateResponse;
mod delete_optional;
pub use self::delete_optional::DeleteOptional;
mod delete_response;
pub use self::delete_response::DeleteResponse;
mod list;
pub use self::list::List;
mod list_optional;
pub use self::list_optional::ListOptional;
mod list_response;
pub use self::list_response::ListResponse;
mod patch_optional;
pub use self::patch_optional::PatchOptional;
mod patch_response;
pub use self::patch_response::PatchResponse;
mod replace_optional;
pub use self::replace_optional::ReplaceOptional;
mod replace_response;
pub use self::replace_response::ReplaceResponse;
mod watch_optional;
pub use self::watch_optional::WatchOptional;
mod watch_response;
pub use self::watch_response::WatchResponse;
pub mod api;
pub mod apiextensions_apiserver;
pub mod apimachinery;
pub mod kube_aggregator;
// Generated from operation getAPIVersions
/// get available API versions
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAPIVersionsResponse`]`>` constructor, or [`GetAPIVersionsResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_api_versions(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAPIVersionsResponse>), crate::RequestError> {
let __url = "/apis/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAPIVersionsResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_api_versions`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAPIVersionsResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroupList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAPIVersionsResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAPIVersionsResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAPIVersionsResponse::Other(result), read))
},
}
}
}
// Generated from operation getAdmissionregistrationAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAdmissionregistrationAPIGroupResponse`]`>` constructor, or [`GetAdmissionregistrationAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_admissionregistration_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAdmissionregistrationAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/admissionregistration.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAdmissionregistrationAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_admissionregistration_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAdmissionregistrationAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAdmissionregistrationAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAdmissionregistrationAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAdmissionregistrationAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getAdmissionregistrationV1alpha1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAdmissionregistrationV1alpha1APIResourcesResponse`]`>` constructor, or [`GetAdmissionregistrationV1alpha1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_admissionregistration_v1alpha1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAdmissionregistrationV1alpha1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/admissionregistration.k8s.io/v1alpha1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAdmissionregistrationV1alpha1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_admissionregistration_v1alpha1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAdmissionregistrationV1alpha1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAdmissionregistrationV1alpha1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAdmissionregistrationV1alpha1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAdmissionregistrationV1alpha1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getAdmissionregistrationV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAdmissionregistrationV1beta1APIResourcesResponse`]`>` constructor, or [`GetAdmissionregistrationV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_admissionregistration_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAdmissionregistrationV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/admissionregistration.k8s.io/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAdmissionregistrationV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_admissionregistration_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAdmissionregistrationV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAdmissionregistrationV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAdmissionregistrationV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAdmissionregistrationV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getApiextensionsAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetApiextensionsAPIGroupResponse`]`>` constructor, or [`GetApiextensionsAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_apiextensions_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetApiextensionsAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/apiextensions.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetApiextensionsAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_apiextensions_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetApiextensionsAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetApiextensionsAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetApiextensionsAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetApiextensionsAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getApiextensionsV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetApiextensionsV1beta1APIResourcesResponse`]`>` constructor, or [`GetApiextensionsV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_apiextensions_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetApiextensionsV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/apiextensions.k8s.io/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetApiextensionsV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_apiextensions_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetApiextensionsV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetApiextensionsV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetApiextensionsV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetApiextensionsV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getApiregistrationAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetApiregistrationAPIGroupResponse`]`>` constructor, or [`GetApiregistrationAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_apiregistration_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetApiregistrationAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/apiregistration.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetApiregistrationAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_apiregistration_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetApiregistrationAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetApiregistrationAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetApiregistrationAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetApiregistrationAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getApiregistrationV1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetApiregistrationV1APIResourcesResponse`]`>` constructor, or [`GetApiregistrationV1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_apiregistration_v1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetApiregistrationV1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/apiregistration.k8s.io/v1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetApiregistrationV1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_apiregistration_v1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetApiregistrationV1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetApiregistrationV1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetApiregistrationV1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetApiregistrationV1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getApiregistrationV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetApiregistrationV1beta1APIResourcesResponse`]`>` constructor, or [`GetApiregistrationV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_apiregistration_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetApiregistrationV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/apiregistration.k8s.io/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetApiregistrationV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_apiregistration_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetApiregistrationV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetApiregistrationV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetApiregistrationV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetApiregistrationV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getAppsAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAppsAPIGroupResponse`]`>` constructor, or [`GetAppsAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_apps_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAppsAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/apps/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAppsAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_apps_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAppsAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAppsAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAppsAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAppsAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getAppsV1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAppsV1APIResourcesResponse`]`>` constructor, or [`GetAppsV1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_apps_v1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAppsV1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/apps/v1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAppsV1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_apps_v1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAppsV1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAppsV1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAppsV1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAppsV1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getAppsV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAppsV1beta1APIResourcesResponse`]`>` constructor, or [`GetAppsV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_apps_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAppsV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/apps/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAppsV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_apps_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAppsV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAppsV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAppsV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAppsV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getAppsV1beta2APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAppsV1beta2APIResourcesResponse`]`>` constructor, or [`GetAppsV1beta2APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_apps_v1beta2_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAppsV1beta2APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/apps/v1beta2/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAppsV1beta2APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_apps_v1beta2_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAppsV1beta2APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAppsV1beta2APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAppsV1beta2APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAppsV1beta2APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getAuditregistrationAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAuditregistrationAPIGroupResponse`]`>` constructor, or [`GetAuditregistrationAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_auditregistration_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAuditregistrationAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/auditregistration.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAuditregistrationAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_auditregistration_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAuditregistrationAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAuditregistrationAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAuditregistrationAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAuditregistrationAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getAuditregistrationV1alpha1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAuditregistrationV1alpha1APIResourcesResponse`]`>` constructor, or [`GetAuditregistrationV1alpha1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_auditregistration_v1alpha1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAuditregistrationV1alpha1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/auditregistration.k8s.io/v1alpha1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAuditregistrationV1alpha1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_auditregistration_v1alpha1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAuditregistrationV1alpha1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAuditregistrationV1alpha1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAuditregistrationV1alpha1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAuditregistrationV1alpha1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getAuthenticationAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAuthenticationAPIGroupResponse`]`>` constructor, or [`GetAuthenticationAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_authentication_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAuthenticationAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/authentication.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAuthenticationAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_authentication_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAuthenticationAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAuthenticationAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAuthenticationAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAuthenticationAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getAuthenticationV1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAuthenticationV1APIResourcesResponse`]`>` constructor, or [`GetAuthenticationV1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_authentication_v1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAuthenticationV1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/authentication.k8s.io/v1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAuthenticationV1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_authentication_v1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAuthenticationV1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAuthenticationV1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAuthenticationV1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAuthenticationV1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getAuthenticationV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAuthenticationV1beta1APIResourcesResponse`]`>` constructor, or [`GetAuthenticationV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_authentication_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAuthenticationV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/authentication.k8s.io/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAuthenticationV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_authentication_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAuthenticationV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAuthenticationV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAuthenticationV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAuthenticationV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getAuthorizationAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAuthorizationAPIGroupResponse`]`>` constructor, or [`GetAuthorizationAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_authorization_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAuthorizationAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/authorization.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAuthorizationAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_authorization_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAuthorizationAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAuthorizationAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAuthorizationAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAuthorizationAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getAuthorizationV1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAuthorizationV1APIResourcesResponse`]`>` constructor, or [`GetAuthorizationV1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_authorization_v1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAuthorizationV1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/authorization.k8s.io/v1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAuthorizationV1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_authorization_v1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAuthorizationV1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAuthorizationV1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAuthorizationV1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAuthorizationV1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getAuthorizationV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAuthorizationV1beta1APIResourcesResponse`]`>` constructor, or [`GetAuthorizationV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_authorization_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAuthorizationV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/authorization.k8s.io/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAuthorizationV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_authorization_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAuthorizationV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAuthorizationV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAuthorizationV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAuthorizationV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getAutoscalingAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAutoscalingAPIGroupResponse`]`>` constructor, or [`GetAutoscalingAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_autoscaling_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAutoscalingAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/autoscaling/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAutoscalingAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_autoscaling_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAutoscalingAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAutoscalingAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAutoscalingAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAutoscalingAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getAutoscalingV1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAutoscalingV1APIResourcesResponse`]`>` constructor, or [`GetAutoscalingV1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_autoscaling_v1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAutoscalingV1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/autoscaling/v1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAutoscalingV1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_autoscaling_v1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAutoscalingV1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAutoscalingV1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAutoscalingV1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAutoscalingV1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getAutoscalingV2beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAutoscalingV2beta1APIResourcesResponse`]`>` constructor, or [`GetAutoscalingV2beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_autoscaling_v2beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAutoscalingV2beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/autoscaling/v2beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAutoscalingV2beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_autoscaling_v2beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAutoscalingV2beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAutoscalingV2beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAutoscalingV2beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAutoscalingV2beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getAutoscalingV2beta2APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetAutoscalingV2beta2APIResourcesResponse`]`>` constructor, or [`GetAutoscalingV2beta2APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_autoscaling_v2beta2_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetAutoscalingV2beta2APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/autoscaling/v2beta2/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetAutoscalingV2beta2APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_autoscaling_v2beta2_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetAutoscalingV2beta2APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetAutoscalingV2beta2APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetAutoscalingV2beta2APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetAutoscalingV2beta2APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getBatchAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetBatchAPIGroupResponse`]`>` constructor, or [`GetBatchAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_batch_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetBatchAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/batch/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetBatchAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_batch_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetBatchAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetBatchAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetBatchAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetBatchAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getBatchV1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetBatchV1APIResourcesResponse`]`>` constructor, or [`GetBatchV1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_batch_v1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetBatchV1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/batch/v1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetBatchV1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_batch_v1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetBatchV1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetBatchV1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetBatchV1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetBatchV1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getBatchV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetBatchV1beta1APIResourcesResponse`]`>` constructor, or [`GetBatchV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_batch_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetBatchV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/batch/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetBatchV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_batch_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetBatchV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetBatchV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetBatchV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetBatchV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getBatchV2alpha1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetBatchV2alpha1APIResourcesResponse`]`>` constructor, or [`GetBatchV2alpha1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_batch_v2alpha1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetBatchV2alpha1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/batch/v2alpha1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetBatchV2alpha1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_batch_v2alpha1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetBatchV2alpha1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetBatchV2alpha1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetBatchV2alpha1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetBatchV2alpha1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getCertificatesAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetCertificatesAPIGroupResponse`]`>` constructor, or [`GetCertificatesAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_certificates_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetCertificatesAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/certificates.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetCertificatesAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_certificates_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetCertificatesAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetCertificatesAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetCertificatesAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetCertificatesAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getCertificatesV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetCertificatesV1beta1APIResourcesResponse`]`>` constructor, or [`GetCertificatesV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_certificates_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetCertificatesV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/certificates.k8s.io/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetCertificatesV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_certificates_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetCertificatesV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetCertificatesV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetCertificatesV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetCertificatesV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getCodeVersion
/// get the code version
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetCodeVersionResponse`]`>` constructor, or [`GetCodeVersionResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_code_version(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetCodeVersionResponse>), crate::RequestError> {
let __url = "/version/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetCodeVersionResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_code_version`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetCodeVersionResponse {
Ok(crate::apimachinery::pkg::version::Info),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetCodeVersionResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetCodeVersionResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetCodeVersionResponse::Other(result), read))
},
}
}
}
// Generated from operation getCoordinationAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetCoordinationAPIGroupResponse`]`>` constructor, or [`GetCoordinationAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_coordination_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetCoordinationAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/coordination.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetCoordinationAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_coordination_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetCoordinationAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetCoordinationAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetCoordinationAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetCoordinationAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getCoordinationV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetCoordinationV1beta1APIResourcesResponse`]`>` constructor, or [`GetCoordinationV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_coordination_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetCoordinationV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/coordination.k8s.io/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetCoordinationV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_coordination_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetCoordinationV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetCoordinationV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetCoordinationV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetCoordinationV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getCoreAPIVersions
/// get available API versions
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetCoreAPIVersionsResponse`]`>` constructor, or [`GetCoreAPIVersionsResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_core_api_versions(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetCoreAPIVersionsResponse>), crate::RequestError> {
let __url = "/api/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetCoreAPIVersionsResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_core_api_versions`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetCoreAPIVersionsResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIVersions),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetCoreAPIVersionsResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetCoreAPIVersionsResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetCoreAPIVersionsResponse::Other(result), read))
},
}
}
}
// Generated from operation getCoreV1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetCoreV1APIResourcesResponse`]`>` constructor, or [`GetCoreV1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_core_v1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetCoreV1APIResourcesResponse>), crate::RequestError> {
let __url = "/api/v1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetCoreV1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_core_v1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetCoreV1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetCoreV1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetCoreV1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetCoreV1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getEventsAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetEventsAPIGroupResponse`]`>` constructor, or [`GetEventsAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_events_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetEventsAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/events.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetEventsAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_events_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetEventsAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetEventsAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetEventsAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetEventsAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getEventsV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetEventsV1beta1APIResourcesResponse`]`>` constructor, or [`GetEventsV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_events_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetEventsV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/events.k8s.io/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetEventsV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_events_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetEventsV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetEventsV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetEventsV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetEventsV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getExtensionsAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetExtensionsAPIGroupResponse`]`>` constructor, or [`GetExtensionsAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_extensions_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetExtensionsAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/extensions/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetExtensionsAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_extensions_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetExtensionsAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetExtensionsAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetExtensionsAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetExtensionsAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getExtensionsV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetExtensionsV1beta1APIResourcesResponse`]`>` constructor, or [`GetExtensionsV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_extensions_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetExtensionsV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/extensions/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetExtensionsV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_extensions_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetExtensionsV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetExtensionsV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetExtensionsV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetExtensionsV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getNetworkingAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetNetworkingAPIGroupResponse`]`>` constructor, or [`GetNetworkingAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_networking_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetNetworkingAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/networking.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetNetworkingAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_networking_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetNetworkingAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetNetworkingAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetNetworkingAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetNetworkingAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getNetworkingV1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetNetworkingV1APIResourcesResponse`]`>` constructor, or [`GetNetworkingV1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_networking_v1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetNetworkingV1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/networking.k8s.io/v1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetNetworkingV1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_networking_v1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetNetworkingV1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetNetworkingV1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetNetworkingV1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetNetworkingV1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getPolicyAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetPolicyAPIGroupResponse`]`>` constructor, or [`GetPolicyAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_policy_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetPolicyAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/policy/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetPolicyAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_policy_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetPolicyAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetPolicyAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetPolicyAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetPolicyAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getPolicyV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetPolicyV1beta1APIResourcesResponse`]`>` constructor, or [`GetPolicyV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_policy_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetPolicyV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/policy/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetPolicyV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_policy_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetPolicyV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetPolicyV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetPolicyV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetPolicyV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getRbacAuthorizationAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetRbacAuthorizationAPIGroupResponse`]`>` constructor, or [`GetRbacAuthorizationAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_rbac_authorization_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetRbacAuthorizationAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/rbac.authorization.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetRbacAuthorizationAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_rbac_authorization_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetRbacAuthorizationAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetRbacAuthorizationAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetRbacAuthorizationAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetRbacAuthorizationAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getRbacAuthorizationV1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetRbacAuthorizationV1APIResourcesResponse`]`>` constructor, or [`GetRbacAuthorizationV1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_rbac_authorization_v1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetRbacAuthorizationV1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/rbac.authorization.k8s.io/v1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetRbacAuthorizationV1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_rbac_authorization_v1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetRbacAuthorizationV1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetRbacAuthorizationV1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetRbacAuthorizationV1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetRbacAuthorizationV1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getRbacAuthorizationV1alpha1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetRbacAuthorizationV1alpha1APIResourcesResponse`]`>` constructor, or [`GetRbacAuthorizationV1alpha1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_rbac_authorization_v1alpha1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetRbacAuthorizationV1alpha1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/rbac.authorization.k8s.io/v1alpha1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetRbacAuthorizationV1alpha1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_rbac_authorization_v1alpha1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetRbacAuthorizationV1alpha1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetRbacAuthorizationV1alpha1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetRbacAuthorizationV1alpha1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetRbacAuthorizationV1alpha1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getRbacAuthorizationV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetRbacAuthorizationV1beta1APIResourcesResponse`]`>` constructor, or [`GetRbacAuthorizationV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_rbac_authorization_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetRbacAuthorizationV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/rbac.authorization.k8s.io/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetRbacAuthorizationV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_rbac_authorization_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetRbacAuthorizationV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetRbacAuthorizationV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetRbacAuthorizationV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetRbacAuthorizationV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getSchedulingAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetSchedulingAPIGroupResponse`]`>` constructor, or [`GetSchedulingAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_scheduling_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetSchedulingAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/scheduling.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetSchedulingAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_scheduling_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetSchedulingAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetSchedulingAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetSchedulingAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetSchedulingAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getSchedulingV1alpha1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetSchedulingV1alpha1APIResourcesResponse`]`>` constructor, or [`GetSchedulingV1alpha1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_scheduling_v1alpha1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetSchedulingV1alpha1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/scheduling.k8s.io/v1alpha1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetSchedulingV1alpha1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_scheduling_v1alpha1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetSchedulingV1alpha1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetSchedulingV1alpha1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetSchedulingV1alpha1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetSchedulingV1alpha1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getSchedulingV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetSchedulingV1beta1APIResourcesResponse`]`>` constructor, or [`GetSchedulingV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_scheduling_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetSchedulingV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/scheduling.k8s.io/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetSchedulingV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_scheduling_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetSchedulingV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetSchedulingV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetSchedulingV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetSchedulingV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getSettingsAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetSettingsAPIGroupResponse`]`>` constructor, or [`GetSettingsAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_settings_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetSettingsAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/settings.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetSettingsAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_settings_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetSettingsAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetSettingsAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetSettingsAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetSettingsAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getSettingsV1alpha1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetSettingsV1alpha1APIResourcesResponse`]`>` constructor, or [`GetSettingsV1alpha1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_settings_v1alpha1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetSettingsV1alpha1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/settings.k8s.io/v1alpha1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetSettingsV1alpha1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_settings_v1alpha1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetSettingsV1alpha1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetSettingsV1alpha1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetSettingsV1alpha1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetSettingsV1alpha1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getStorageAPIGroup
/// get information of a group
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetStorageAPIGroupResponse`]`>` constructor, or [`GetStorageAPIGroupResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_storage_api_group(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetStorageAPIGroupResponse>), crate::RequestError> {
let __url = "/apis/storage.k8s.io/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetStorageAPIGroupResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_storage_api_group`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetStorageAPIGroupResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIGroup),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetStorageAPIGroupResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetStorageAPIGroupResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetStorageAPIGroupResponse::Other(result), read))
},
}
}
}
// Generated from operation getStorageV1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetStorageV1APIResourcesResponse`]`>` constructor, or [`GetStorageV1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_storage_v1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetStorageV1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/storage.k8s.io/v1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetStorageV1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_storage_v1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetStorageV1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetStorageV1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetStorageV1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetStorageV1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getStorageV1alpha1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetStorageV1alpha1APIResourcesResponse`]`>` constructor, or [`GetStorageV1alpha1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_storage_v1alpha1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetStorageV1alpha1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/storage.k8s.io/v1alpha1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetStorageV1alpha1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_storage_v1alpha1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetStorageV1alpha1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetStorageV1alpha1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetStorageV1alpha1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetStorageV1alpha1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation getStorageV1beta1APIResources
/// get available resources
///
/// Use the returned [`crate::ResponseBody`]`<`[`GetStorageV1beta1APIResourcesResponse`]`>` constructor, or [`GetStorageV1beta1APIResourcesResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn get_storage_v1beta1_api_resources(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<GetStorageV1beta1APIResourcesResponse>), crate::RequestError> {
let __url = "/apis/storage.k8s.io/v1beta1/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<GetStorageV1beta1APIResourcesResponse as Response>::try_from_parts` to parse the HTTP response body of [`get_storage_v1beta1_api_resources`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum GetStorageV1beta1APIResourcesResponse {
Ok(crate::apimachinery::pkg::apis::meta::v1::APIResourceList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for GetStorageV1beta1APIResourcesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((GetStorageV1beta1APIResourcesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((GetStorageV1beta1APIResourcesResponse::Other(result), read))
},
}
}
}
// Generated from operation logFileHandler
/// Use the returned [`crate::ResponseBody`]`<`[`LogFileHandlerResponse`]`>` constructor, or [`LogFileHandlerResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `logpath`
///
/// path to the log
#[cfg(feature = "api")]
pub fn log_file_handler(
logpath: &str,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<LogFileHandlerResponse>), crate::RequestError> {
let __url = format!("/logs/{logpath}",
logpath = crate::percent_encoding::percent_encode(logpath.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<LogFileHandlerResponse as Response>::try_from_parts` to parse the HTTP response body of [`log_file_handler`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum LogFileHandlerResponse {
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for LogFileHandlerResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((LogFileHandlerResponse::Other(result), read))
},
}
}
}
// Generated from operation logFileListHandler
/// Use the returned [`crate::ResponseBody`]`<`[`LogFileListHandlerResponse`]`>` constructor, or [`LogFileListHandlerResponse`] directly, to parse the HTTP response.
#[cfg(feature = "api")]
pub fn log_file_list_handler(
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<LogFileListHandlerResponse>), crate::RequestError> {
let __url = "/logs/".to_owned();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
/// Use `<LogFileListHandlerResponse as Response>::try_from_parts` to parse the HTTP response body of [`log_file_list_handler`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum LogFileListHandlerResponse {
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for LogFileListHandlerResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((LogFileListHandlerResponse::Other(result), read))
},
}
}
}
| 43.26475 | 217 | 0.580821 |
ed0974559246f53136a9ecbc8bcf9a1891a7008e | 12,756 | //! Module that contains SMTLib Backend Implementation.
//!
//! This backend outputs the constraints in standard smt-lib2 format. Hence,
//! any solver that supports this format maybe used to solve for constraints.
use std::process::{Child};
use std::collections::{HashMap};
use std::io::{Read, Write};
use regex::Regex;
use petgraph::graph::{Graph, NodeIndex};
use petgraph::EdgeDirection;
use petgraph::visit::EdgeRef;
use backends::backend::{Logic, SMTBackend, SMTNode, SMTResult};
use super::backend::SMTRes;
macro_rules! must {
($e: expr) => {
match $e { Err(_) => panic!("write failed"), Ok(_) => {} }
}
}
/// Trait that needs to be implemented in order to support a new solver. `SMTProc` is short for
/// "SMT Process".
///
/// To support a new solver that accepts input in the standard SMTLIB2 format, it is sufficient to
/// implement this trait for the struct. This trait describes method needed to spawn, and
/// communicate (read / write) with the solver.
///
/// `read` and `write` methods are implemented by deafult and needs to be changed only if the
/// mode of communication is different (other than process pipes), or if some custom functionality
/// is required for the specific solver.
pub trait SMTProc {
/// Function to initialize the solver. This includes spawning a process and keeping the process
/// pipe open for read and write. The function takes &mut self as an argument to allow
/// configuration during initialization.
fn init(&mut self);
/// Return a mutable reference to the process pipe.
fn pipe<'a>(&'a mut self) -> &'a mut Child;
fn write<T: AsRef<str>>(&mut self, s: T) -> Result<(), String> {
// TODO: Check for errors.
if let Some(ref mut stdin) = self.pipe().stdin.as_mut() {
stdin.write(s.as_ref().as_bytes()).expect("Write to stdin failed");
stdin.flush().expect("Failed to flush stdin");
}
Ok(())
}
// Helper function to insert a process specific read if needed
// at places where buffering is an issue.
fn proc_specific_read(&mut self) {
}
fn read(&mut self) -> String {
// XXX: This read may block indefinitely if there is nothing on the pipe to be
// read. To prevent this we need a timeout mechanism after which we should
// return with
// an error, such as: ErrTimeout.
// Another important point to note here is that, if the data available to read
// is exactly
// 2048 bytes, then this reading mechanism fails and will end up waiting to
// read more data
// (when none is available) indefinitely.
let mut bytes_read = [0; 2048];
let mut s = String::new();
let solver = self.pipe();
if let Some(ref mut stdout) = solver.stdout.as_mut() {
loop {
let n = stdout.read(&mut bytes_read).unwrap();
s = format!("{}{}",
s,
String::from_utf8(bytes_read[0..n].to_vec()).unwrap());
if n < 2048 {
break;
}
}
}
s
}
// A specific read taylored to output from (check-sat) z3 command
fn read_checksat_output(&mut self) -> String {
// Buffer to read into
let mut buf = String::new();
// Read from z3's stdout
if let Some(ref mut stdout) = self.pipe().stdout.as_mut() {
loop {
for (_,c) in stdout.bytes().enumerate() {
let chr = c.unwrap() as char;
if chr == '\n' {
continue
}
buf.push(chr);
if buf.ends_with("sat") {
return buf;
}
}
}
}
unreachable!()
}
// A specific read taylored to output from (get-model) z3 command
fn read_getmodel_output(&mut self) -> String {
// Buffer to read into
let mut buf = String::new();
// Read from z3's stdout
if let Some(ref mut stdout) = self.pipe().stdout.as_mut() {
// Count for paren matching (to detect end of output)
let mut count = 0;
loop {
for (_,c) in stdout.bytes().enumerate() {
let chr = c.unwrap() as char;
// Hotfix to deal with newline remaining in the buffer
if chr=='\n' && count==0 {
continue;
}
buf.push(chr);
match chr {
'(' => { count+=1; },
')' => { count-=1; },
_ => {}
}
if count==0 {
return buf;
}
}
}
}
unreachable!()
}
}
#[derive(Clone, Debug)]
pub enum EdgeData {
EdgeOrder(usize),
}
#[derive(Clone, Debug)]
pub struct SMTLib2<T: Logic> {
logic: Option<T>,
gr: Graph<T::Fns, EdgeData>,
var_index: usize,
var_map: HashMap<String, (NodeIndex, T::Sorts)>,
idx_map: HashMap<NodeIndex, String>,
}
impl<L: Logic> SMTLib2<L> {
pub fn new(logic: Option<L>) -> SMTLib2<L> {
let solver = SMTLib2 {
logic: logic,
gr: Graph::new(),
var_index: 0,
var_map: HashMap::new(),
idx_map: HashMap::new(),
};
solver
}
pub fn get_node_info(&self, ni: NodeIndex) -> &L::Fns {
self.gr.node_weight(ni).unwrap()
}
pub fn get_operands(&self, ni: NodeIndex) -> Vec<NodeIndex> {
self.gr.neighbors_directed(ni, EdgeDirection::Outgoing)
.collect::<Vec<_>>()
}
// Recursive function that builds up the assertion string from the tree.
pub fn expand_assertion(&self, ni: NodeIndex) -> String {
let mut children = self.gr
.edges_directed(ni, EdgeDirection::Outgoing)
.map(|edge_ref| {
let edge = edge_ref.weight();
let other = edge_ref.target();
match *edge {
EdgeData::EdgeOrder(ref i) => (other, *i),
}
})
.collect::<Vec<_>>();
children.sort_by(|x, y| (x.1).cmp(&y.1));
let mut assertion = self.gr[ni].to_string();
assertion = if self.gr[ni].is_fn() {
format!("({}", assertion)
} else {
assertion
};
for node in &children {
assertion = format!("{} {}", assertion, self.expand_assertion(node.0))
}
if self.gr[ni].is_fn() {
format!("{})", assertion)
} else {
assertion
}
}
pub fn new_const<T: Into<L::Fns>>(&mut self, cval: T) -> NodeIndex {
self.gr.add_node(cval.into())
}
pub fn generate_asserts(&self, debug: bool) -> String {
// Write out all variable definitions.
let mut decls = Vec::new();
for (name, val) in &self.var_map {
let ni = &val.0;
let ty = &val.1;
if self.gr[*ni].is_var() {
decls.push(format!("(declare-fun {} () {})\n", name, ty));
}
}
// Identify root nodes and generate the assertion strings.
let mut assertions = Vec::new();
for idx in self.gr.node_indices() {
if self.gr.edges_directed(idx, EdgeDirection::Incoming).count() == 0 {
if self.gr[idx].is_fn() && self.gr[idx].is_bool() {
assertions.push(format!("(assert {})\n", self.expand_assertion(idx)));
}
}
}
let mut result = String::new();
for w in decls.iter().chain(assertions.iter()) {
if debug { print!("{}", w) };
result = format!("{}{}", result, w)
}
result
}
}
impl<L: Logic> SMTBackend for SMTLib2<L> {
type Idx = NodeIndex;
type Logic = L;
fn new_var<T, P>(&mut self, var_name: Option<T>, ty: P) -> Self::Idx
where T: AsRef<str>,
P: Into<<<Self as SMTBackend>::Logic as Logic>::Sorts>
{
let var_name = var_name.map(|s| s.as_ref().to_owned()).unwrap_or({
self.var_index += 1;
format!("X_{}", self.var_index)
});
let typ = ty.into();
let idx = self.gr.add_node(Self::Logic::free_var(var_name.clone(), typ.clone()));
self.var_map.insert(var_name.clone(), (idx, typ));
self.idx_map.insert(idx, var_name);
idx
}
fn set_logic<S: SMTProc>(&mut self, smt_proc: &mut S) {
if self.logic.is_none() {
return;
}
let logic = self.logic.unwrap().clone();
must!(smt_proc.write(format!("(set-logic {})\n", logic)));
}
fn assert<T: Into<L::Fns>>(&mut self, assert: T, ops: &[Self::Idx]) -> Self::Idx {
// TODO: Check correctness like operator arity.
let assertion = self.gr.add_node(assert.into());
for (i, op) in ops.iter().enumerate() {
self.gr.add_edge(assertion, *op, EdgeData::EdgeOrder(i));
}
assertion
}
fn check_sat<S: SMTProc>(&mut self, smt_proc: &mut S, debug: bool) -> SMTRes {
smt_proc.write(self.generate_asserts(debug));
smt_proc.write("(check-sat)\n".to_owned());
let read = smt_proc.read_checksat_output();
if &read == "sat" {
SMTRes::Sat(read, None)
} else if &read == "unsat" {
SMTRes::Unsat(read, None)
} else {
SMTRes::Error(read, None)
}
}
fn simplify<S: SMTProc>(&mut self, smt_proc: &mut S, ni: Self::Idx) -> SMTResult<u64> {
smt_proc.write(format!("(simplify {})\n", self.expand_assertion(ni)));
smt_proc.proc_specific_read();
let val_str = smt_proc.read();
let val = if val_str.len() > 2 && &val_str[0..2] == "#x" {
u64::from_str_radix(&val_str[2..], 16)
} else if val_str.len() > 2 && &val_str[0..2] == "#b" {
u64::from_str_radix(&val_str[2..], 2)
} else {
val_str.parse::<u64>()
}
.unwrap();
Ok(val)
}
// TODO: Return type information along with the value.
fn solve<S: SMTProc>(&mut self, smt_proc: &mut S, debug: bool) -> (SMTResult<HashMap<Self::Idx, u64>>, SMTRes) {
let mut result = HashMap::new();
let check_sat = self.check_sat(smt_proc, debug);
// If the VC was satisfyable get the model
match check_sat {
SMTRes::Sat(ref res, _) => {
must!(smt_proc.write("(get-model)\n".to_owned()));
// XXX: For some reason we need two reads here in order to get the result from
// the SMT solver. Need to look into the reason for this. This might stop
// working in the
// future.
//let _ = smt_proc.read();
smt_proc.proc_specific_read();
let read_result = smt_proc.read_getmodel_output();
let re = Regex::new(r"\s+\(define-fun (?P<var>[0-9a-zA-Z_]+) \(\) [(]?[ _a-zA-Z0-9]+[)]?\n\s+(?P<val>([0-9]+|#x[0-9a-f]+|#b[01]+))")
.unwrap();
for caps in re.captures_iter(&read_result) {
// Here the caps.name("val") can be a hex value, or a binary value or a decimal
// value. We need to parse the output to a u64 accordingly.
let val_str = caps.name("val").unwrap().as_str();
let val = if val_str.len() > 2 && &val_str[0..2] == "#x" {
u64::from_str_radix(&val_str[2..], 16)
} else if val_str.len() > 2 && &val_str[0..2] == "#b" {
u64::from_str_radix(&val_str[2..], 2)
} else {
val_str.parse::<u64>()
}
.unwrap();
let vname = caps.name("var").unwrap().as_str();
result.insert(self.var_map[vname].0.clone(), val);
}
return (Ok(result), SMTRes::Sat(res.clone(), Some(read_result)));
},
_ => {}
}
(Ok(result), check_sat.clone())
}
}
| 36.973913 | 148 | 0.501411 |
28a6a4afd26f6aac1228380017714a5877f104b5 | 3,771 | //! By default, Tetra runs updates at a fixed tick rate, decoupled from the speed at which
//! the screen is rendering. This means your game's physics will behave consistently regardless
//! of how powerful the player's device is, which is nice! However, it does mean that if the
//! update and render rates don't line up nicely, your game might look a bit choppy.
//!
//! There are two ways of handling this:
//!
//! * Interpolation
//! * Store both the previous state and the current state, and render somewhere in between the two
//! * Pros: Accurate, no guesswork
//! * Cons: Complex, introduces a tiny bit of input lag as you're updating one frame ahead of rendering
//! * Extrapolation
//! * Store only the current state, and guess what the next state will look like
//! * Pros: Simple to implement, doesn't cause lag
//! * Cons: Looks weird when the extrapolated state doesn't end up matching reality
//!
//! The example below shows how to implement very naive forms of these techniques, with the
//! tick rate set extremely low for demonstration purposes.
//!
//! For more information, see these articles:
//!
//! * https://gafferongames.com/post/fix_your_timestep/
//! * http://gameprogrammingpatterns.com/game-loop.html
use tetra::graphics::{self, Color, Texture};
use tetra::math::Vec2;
use tetra::{Context, ContextBuilder, State};
struct GameState {
texture: Texture,
velocity: Vec2<f32>,
position_none: Vec2<f32>,
position_ex: Vec2<f32>,
position_in_prev: Vec2<f32>,
position_in_curr: Vec2<f32>,
}
impl GameState {
fn new(ctx: &mut Context) -> tetra::Result<GameState> {
Ok(GameState {
texture: Texture::new(ctx, "./examples/resources/player.png")?,
velocity: Vec2::new(16.0, 0.0),
position_none: Vec2::new(16.0, 16.0),
position_ex: Vec2::new(16.0, 32.0),
position_in_prev: Vec2::new(16.0, 48.0),
position_in_curr: Vec2::new(16.0, 48.0),
})
}
}
impl State for GameState {
fn update(&mut self, _: &mut Context) -> tetra::Result {
// Without special handling, or with extrapolation, we can just
// update normally.
self.position_none += self.velocity;
self.position_ex += self.velocity;
// For interpolation, we have to store the previous state as well.
// We're effectively running the simulation 1 tick ahead of the
// renderer.
self.position_in_prev = self.position_in_curr;
self.position_in_curr += self.velocity;
Ok(())
}
fn draw(&mut self, ctx: &mut Context, dt: f64) -> tetra::Result {
graphics::clear(ctx, Color::rgb(0.769, 0.812, 0.631));
// `dt` is a number between 0.0 and 1.0 which represents how
// far between ticks we currently are. For example, 0.0 would
// mean the update just ran, 0.99 would mean another update is
// about to run.
// No special handling - looks choppy!
graphics::draw(ctx, &self.texture, self.position_none);
// With extrapolation - just guess where the position should be
// based on the object's velocity.
graphics::draw(
ctx,
&self.texture,
self.position_ex + (self.velocity * dt as f32),
);
// With interpolation - we draw at a fixed point between the
// two stored states.
graphics::draw(
ctx,
&self.texture,
Vec2::lerp(self.position_in_prev, self.position_in_curr, dt as f32),
);
Ok(())
}
}
fn main() -> tetra::Result {
ContextBuilder::new("Interpolation", 640, 480)
.tick_rate(5.0)
.quit_on_escape(true)
.build()?
.run(GameState::new)
}
| 35.242991 | 105 | 0.633254 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.