hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
0e445a7058311753833eff1b0f076c9cae0e2e2e | 11,037 | use legion::prelude::*;
use std::{convert::TryInto, sync::Arc};
use crate::{
graphics::{
pipeline_manager::PipelineManager,
resources::GPUResourceManager,
shadows::{OmniShadowManager, ShadowCamera},
CommandBufferQueue, CommandQueueItem, pipelines::{PointLight, DirectionalLight, MAX_LIGHTS, LightingUniform}, lighting::cluster::{FROXELS_Y, FROXELS_X, FAR_PLANE_DISTANCE, FROXELS_Z},
},
scene::components,
};
use nalgebra_glm::Vec4;
pub fn create() -> Box<dyn Schedulable> {
SystemBuilder::new("shadows")
.read_resource::<Arc<GPUResourceManager>>()
.write_resource::<crate::core::PerformanceMetrics>()
.read_resource::<Arc<wgpu::Device>>()
.write_resource::<ShadowCamera>()
.write_resource::<CommandBufferQueue>()
.read_resource::<Arc<GPUResourceManager>>()
.read_resource::<PipelineManager>()
.write_resource::<OmniShadowManager>()
.with_query(<(Write<components::PointLightData>, Read<components::Transform>)>::query())
.with_query(<(Read<components::Mesh>, Read<components::Transform>)>::query())
.with_query(<(Read<components::CameraData>, )>::query())
.with_query(<(Read<components::DirectionalLightData>,)>::query())
.build(
|_,
mut world,
(resource_manager, perf_metrics, device, shadow_camera, command_buffer_queue, gpu_resource_manager, pipeline_manager, omni_shadow_manager),
(point_light_query, transform_mesh_query, camera_query, directional_light_query)| {
// Get camera for update_globals function.
let (cam_pos, camera_view) = {
let filtered_camera_data: Vec<_> = camera_query
.iter(&world)
.filter(|(camera,)| camera.active)
.collect();
let camera_data: Option<&(
legion::borrow::Ref<'_, crate::scene::components::camera_data::CameraData>,
)> = filtered_camera_data.first();
// No camera no shadows
if camera_data.is_none() {
return;
}
let camera = &camera_data.as_ref().unwrap().0;
(camera.position, camera.view)
};
// Create shadow encoder
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("shadow"),
});
let shadow_sort_time = std::time::Instant::now();
let point_lights = {
let mut point_lights = point_light_query.iter_mut(world)
.filter(|(light, _)| light.shadow)
.collect::<Vec<_>>();
// First sort point lights by range / distance.
point_lights.sort_by(|(light_a, transform_a), (light_b, transform_b)| {
let distance_a = nalgebra_glm::distance2(&transform_a.position, &cam_pos);
let distance_b = nalgebra_glm::distance2(&transform_b.position, &cam_pos);
let importance_a = light_a.attenuation / distance_a;
let importance_b = light_b.attenuation / distance_b;
return importance_b.partial_cmp(&importance_a).unwrap();
});
let mut total = 0;
let mut i = 0;
let mut j = 0;
// Allocate shadow maps
point_lights.iter_mut().for_each(|(light, transform)| {
light.shadow_texture_id = (j, i);
i += 1;
// TODO: Uh we could do better..
if i >= 3 && j == 0 {
i = 0;
j += 1;
}
if i >= 10 && j == 1 {
i = 0;
j += 1;
}
if i >= 42 && j == 2 {
i = 0;
j += 1;
}
if i >= 170 && j == 3 {
i = 0;
j += 1;
}
});
// Sort point lights by distance and age.
point_lights.sort_by(|(light_a, transform_a), (light_b, transform_b)| {
let distance_a = nalgebra_glm::distance2(&transform_a.position, &cam_pos);
let distance_b = nalgebra_glm::distance2(&transform_b.position, &cam_pos);
let importance_a = light_a.attenuation / distance_a * (light_a.age + 1) as f32;
let importance_b = light_b.attenuation / distance_b * (light_b.age + 1) as f32;
return importance_b.partial_cmp(&importance_a).unwrap();
});
// Calculate or clear out ages.
for (light, _) in point_lights.iter_mut() {
if total < omni_shadow_manager.max_casters_per_frame {
light.age = 0;
} else {
light.age = light.age + 1;
}
total += 1;
}
// Collect data to pass to shadow renderer.
point_lights.iter().map(|(light, transform)| {
(light.attenuation, transform.position.clone(), light.shadow_texture_id)
})
.collect::<Vec<_>>()
};
perf_metrics.insert("shadow light sort", std::time::Instant::now().duration_since(shadow_sort_time));
let shadow_time = std::time::Instant::now();
omni_shadow_manager.update(
point_lights,
pipeline_manager,
gpu_resource_manager.clone(),
&mut encoder,
shadow_camera,
transform_mesh_query,
world,
);
perf_metrics.insert("shadow generation", std::time::Instant::now().duration_since(shadow_time));
// ******************************************************************************
// This section is where we upload our lighting uniforms to the GPU
// ******************************************************************************
if directional_light_query.iter(&world).count() > 0 || point_light_query.iter_mut(&mut world).count() > 0 {
let mut directional_light_data_vec: Vec<DirectionalLight> = directional_light_query
.iter(&world)
.map(|(data,)| DirectionalLight {
direction: Vec4::new(
data.direction.x,
data.direction.y,
data.direction.z,
0.0,
),
color: Vec4::new(data.color.x, data.color.y, data.color.z, data.intensity),
})
.collect();
// TODO: Use some sort of distance calculation to get the closest lights.
let mut point_light_data_vec: Vec<PointLight> = point_light_query
.iter_mut(&mut world)
.map(|(data, transform)| {
let position = Vec4::new(
transform.position.x,
transform.position.y,
transform.position.z,
1.0,
);
PointLight {
attenuation: Vec4::new(data.attenuation, if data.shadow { 1.0 } else { 0.0 }, data.shadow_texture_id.0 as f32, data.shadow_texture_id.1 as f32),
color: Vec4::new(data.color.x, data.color.y, data.color.z, data.intensity),
position,
view_position: camera_view * position,
shadow_matrix: nalgebra_glm::perspective_fov_lh_no(
90f32.to_radians(),
512.0,
512.0,
0.1,
data.attenuation,
),
..Default::default()
}
})
.collect();
let total_dir_lights = directional_light_data_vec.len() as u32;
let total_point_lights = point_light_data_vec.len() as u32;
// Fill in missing data if we don't have it.
point_light_data_vec.resize_with(MAX_LIGHTS, || PointLight::default());
directional_light_data_vec
.resize_with(4, || DirectionalLight::default());
let light_uniform = LightingUniform {
cluster_count: [FROXELS_X, FROXELS_Y, FROXELS_Z, 0],
light_num: Vec4::new(
total_dir_lights as f32,
total_point_lights as f32,
0.0,
FAR_PLANE_DISTANCE,
),
directional_lights: directional_light_data_vec
.as_slice()
.try_into()
.unwrap(),
point_lights: point_light_data_vec.as_slice().try_into().unwrap(),
};
let lighting_buffer = device.create_buffer_with_data(
bytemuck::bytes_of(&light_uniform),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(
&lighting_buffer,
0,
&resource_manager.global_lighting_buffer,
0,
std::mem::size_of::<LightingUniform>() as u64,
);
}
command_buffer_queue
.push(CommandQueueItem {
buffer: encoder.finish(),
name: "shadow".to_string(),
})
.unwrap();
},
)
}
| 47.573276 | 191 | 0.438163 |
90136ec94315e8f41aaf6ec8dfbc70e544b10bb1 | 2,399 | // Copyright (c) 2016-2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use base64::DecodeError;
use rusoto_ecr::GetAuthorizationTokenError;
use std::process::ExitStatus;
use std::result;
use std::string::FromUtf8Error;
use failure;
pub type Result<T> = result::Result<T, failure::Error>;
#[derive(Debug, Fail)]
pub enum Error {
#[fail(display = "{}", _0)]
Base64DecodeError(DecodeError),
#[fail(display = "Docker build failed with exit code: {}", _0)]
BuildFailed(ExitStatus),
#[fail(
display = "Could not determine Docker image ID for image: {}",
_0
)]
DockerImageIdNotFound(String),
#[fail(
display = "Switch to Windows containers to export Docker images on Windows. \
Current Docker Server OS is set to: {}",
_0
)]
DockerNotInWindowsMode(String),
#[fail(display = "Invalid registry type: {}", _0)]
InvalidRegistryType(String),
#[fail(display = "{}", _0)]
InvalidToken(FromUtf8Error),
#[fail(display = "Docker login failed with exit code: {}", _0)]
LoginFailed(ExitStatus),
#[fail(display = "Docker logout failed with exit code: {}", _0)]
LogoutFailed(ExitStatus),
#[fail(display = "No ECR Tokens returned")]
NoECRTokensReturned,
#[fail(display = "{}", _0)]
TokenFetchFailed(GetAuthorizationTokenError),
#[fail(
display = "A primary service package could not be determined from: {:?}. \
At least one package with a run hook must be provided.",
_0
)]
PrimaryServicePackageNotFound(Vec<String>),
#[fail(display = "Docker image push failed with exit code: {}", _0)]
PushImageFailed(ExitStatus),
#[fail(
display = "Removing Docker local images failed with exit code: {}",
_0
)]
RemoveImageFailed(ExitStatus),
}
| 35.279412 | 85 | 0.665694 |
3a972924b38eee0330726c99e8a6513610664115 | 1,773 | use anyhow::{bail, Result};
use std::sync::Arc;
use tangram_app_context::Context;
use tangram_app_core::{
error::{bad_request, not_found, redirect_to_login, service_unavailable},
path_components,
user::{authorize_user, authorize_user_for_model},
};
use tangram_id::Id;
#[derive(serde::Deserialize)]
struct Action {
identifier: String,
}
pub async fn post(request: &mut http::Request<hyper::Body>) -> Result<http::Response<hyper::Body>> {
let context = Arc::clone(request.extensions().get::<Arc<Context>>().unwrap());
let app = &context.app;
let model_id = if let ["repos", _, "models", model_id, "production_predictions", ""] =
path_components(request).as_slice()
{
model_id.to_owned()
} else {
bail!("unexpected path");
};
let mut db = match app.begin_transaction().await {
Ok(db) => db,
Err(_) => return Ok(service_unavailable()),
};
let user = match authorize_user(request, &mut db, app.options().auth_enabled()).await? {
Ok(user) => user,
Err(_) => return Ok(redirect_to_login()),
};
let model_id: Id = match model_id.parse() {
Ok(model_id) => model_id,
Err(_) => return Ok(bad_request()),
};
if !authorize_user_for_model(&mut db, &user, model_id).await? {
return Ok(not_found());
}
let data = match hyper::body::to_bytes(request.body_mut()).await {
Ok(data) => data,
Err(_) => return Ok(bad_request()),
};
let Action { identifier } = match serde_urlencoded::from_bytes(&data) {
Ok(data) => data,
Err(_) => return Ok(bad_request()),
};
// Redirect.
let path = format!("predictions/{}", identifier);
let response = http::Response::builder()
.status(http::StatusCode::SEE_OTHER)
.header(http::header::LOCATION, path)
.body(hyper::Body::empty())
.unwrap();
app.commit_transaction(db).await?;
Ok(response)
}
| 30.050847 | 100 | 0.680203 |
d54a3c7402609c6b95f45b5862a28ca3a785421c | 1,198 | #[macro_use]
extern crate quote;
extern crate proc_macro;
extern crate syn;
#[macro_use]
extern crate failure;
extern crate heck;
use failure::Error;
use heck::ShoutySnakeCase;
use proc_macro::TokenStream;
use quote::quote;
use syn::Ident;
#[proc_macro_derive(Singleton)]
pub fn singleton_derive(input: TokenStream) -> TokenStream {
// Construct a string representation of the type definition
let s = input.to_string();
// Parse the string representation
let ast = syn::parse_derive_input(&s).unwrap();
// Build the impl
let gen = impl_singleton(&ast);
// Return the generated impl
gen.parse().unwrap()
}
fn impl_singleton(ast: &syn::DeriveInput) -> quote::Tokens {
let name = &ast.ident;
let static_var_name = format!("{}_SINGLETON", name.as_ref().to_shouty_snake_case());
let static_var_ident = Ident::from(static_var_name);
quote! {
static #static_var_ident: singletonum::OnceCell<#name> = singletonum::OnceCell::INIT;
impl singletonum::Singleton for #name {
fn get_instance(init: &Self::Init) -> &'static Self {
#static_var_ident.get_or_init(|| Self::init(init))
}
}
}
}
| 27.227273 | 93 | 0.672788 |
8f54a7511f6620bd2345d9ad28503411ca4157cb | 680 | //! Various utilities for cratesfyi
pub(crate) use self::copy::copy_doc_dir;
pub use self::github_updater::github_updater;
pub use self::release_activity_updater::update_release_activity;
pub use self::daemon::start_daemon;
pub(crate) use self::rustc_version::parse_rustc_version;
pub(crate) use self::html::extract_head_and_body;
pub use self::queue::add_crate_to_queue;
pub(crate) use self::cargo_metadata::{CargoMetadata, Package as MetadataPackage};
#[cfg(test)]
pub(crate) use self::cargo_metadata::{Dependency, Target};
mod cargo_metadata;
mod github_updater;
mod copy;
mod release_activity_updater;
mod daemon;
mod pubsubhubbub;
mod rustc_version;
mod html;
mod queue;
| 27.2 | 81 | 0.798529 |
719362f84db7e83e5a034fef32c273e17ad57afe | 377 | #![feature(proc_macro_hygiene)]
use ink_lang2 as ink;
#[ink::contract(version = "0.1.0")]
mod noop {
#[ink(storage)]
struct Noop {}
impl Noop {
#[ink(constructor)]
fn new(&mut self) {
let _ = &self.__env;
}
#[ink(message)]
fn noop(&self) {
let _ = &self.__storage;
}
}
}
fn main() {}
| 15.708333 | 36 | 0.477454 |
e96eb4a306fa0fd3e950f81a3482da0338c4f737 | 7,254 | use super::board::{Coordinate, GamePiece, Move, PieceColor};
pub struct GameEngine {
board: [[Option<GamePiece>; 8]; 8],
current_turn: PieceColor,
move_count: u32,
}
pub struct MoveResult {
pub mv: Move,
pub crowned: bool,
}
impl GameEngine {
pub fn new() -> GameEngine {
let mut engine = GameEngine {
board: [[None; 8]; 8],
current_turn: PieceColor::Black,
move_count: 0,
};
engine.initialize_pieces();
engine
}
pub fn initialize_pieces(&mut self) {
[1, 3, 5, 7, 0, 2, 4, 6, 1, 3, 5, 7]
.iter()
.zip([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2].iter())
.map(|(a, b)| (*a as usize, *b as usize))
.for_each(|(x, y)| {
self.board[x][y] = Some(GamePiece::new(PieceColor::White));
});
[0, 2, 4, 6, 1, 3, 5, 7, 0, 2, 4, 6]
.iter()
.zip([5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7].iter())
.map(|(a, b)| (*a as usize, *b as usize))
.for_each(|(x, y)| {
self.board[x][y] = Some(GamePiece::new(PieceColor::Black));
});
}
pub fn move_piece(&mut self, mv: &Move) -> Result<MoveResult, ()> {
let legal_moves = self.legal_moves();
if !legal_moves.contains(mv) {
return Err(());
}
let Coordinate(fx, fy) = mv.from;
let Coordinate(tx, ty) = mv.to;
let piece = self.board[fx][fy].unwrap();
let midpiece_coordinate = self.midpiece_coordinate(fx, fy, tx, ty);
if let Some(Coordinate(x, y)) = midpiece_coordinate {
self.board[x][y] = None; // remove the jumped piece
}
// Move piece from source to dest
self.board[tx][ty] = Some(piece);
self.board[fx][fy] = None;
let crowned = if self.should_crown(piece, mv.to) {
self.crown_piece(mv.to);
true
} else {
false
};
self.advance_turn();
Ok(MoveResult {
mv: mv.clone(),
crowned: crowned,
})
}
pub fn get_piece(&self, coord: Coordinate) -> Result<Option<GamePiece>, ()> {
let Coordinate(x, y) = coord;
if x <= 7 && y <= 7 {
Ok(self.board[x][y])
} else {
Err(())
}
}
pub fn current_turn(&self) -> PieceColor {
self.current_turn
}
fn advance_turn(&mut self) {
if self.current_turn == PieceColor::Black {
self.current_turn = PieceColor::White
} else {
self.current_turn = PieceColor::Black
}
self.move_count += 1;
}
// Black pieces in row 0 or White pieces in row 7 are crowned
fn should_crown(&self, piece: GamePiece, coord: Coordinate) -> bool {
let Coordinate(_x, y) = coord;
(y == 0 && piece.color == PieceColor::Black) || (y == 7 && piece.color == PieceColor::White)
}
fn crown_piece(&mut self, coord: Coordinate) -> bool {
let Coordinate(x, y) = coord;
if let Some(piece) = self.board[x][y] {
self.board[x][y] = Some(GamePiece::crowned(piece));
true
} else {
false
}
}
pub fn is_crowned(&self, coord: Coordinate) -> bool {
let Coordinate(x, y) = coord;
match self.board[x][y] {
Some(piece) => piece.crowned,
None => false,
}
}
pub fn move_count(&self) -> u32 {
self.move_count
}
fn legal_moves(&self) -> Vec<Move> {
let mut moves: Vec<Move> = Vec::new();
for col in 0..8 {
for row in 0..8 {
if let Some(piece) = self.board[col][row] {
if piece.color == self.current_turn {
let loc = Coordinate(col, row);
let mut vmoves = self.valid_moves_from(loc);
moves.append(&mut vmoves);
}
}
}
}
moves
}
fn valid_moves_from(&self, loc: Coordinate) -> Vec<Move> {
let Coordinate(x, y) = loc;
if let Some(p) = self.board[x][y] {
let mut jumps = loc
.jump_targets_from()
.filter(|t| self.valid_jump(&p, &loc, &t))
.map(|ref t| Move {
from: loc.clone(),
to: t.clone(),
})
.collect::<Vec<Move>>();
let mut moves = loc
.move_targets_from()
.filter(|t| self.valid_move(&p, &loc, &t))
.map(|ref t| Move {
from: loc.clone(),
to: t.clone(),
})
.collect::<Vec<Move>>();
jumps.append(&mut moves);
jumps
} else {
Vec::new()
}
}
fn midpiece_coordinate(&self, x: usize, y: usize, tx: usize, ty: usize) -> Option<Coordinate> {
if tx == x + 2 && ty == y + 2 {
Some(Coordinate(x + 1, y + 1))
} else if x >= 2 && y >= 2 && tx == x - 2 && ty == y - 2 {
Some(Coordinate(x - 1, y - 1))
} else if x >= 2 && tx == x - 2 && ty == y + 2 {
Some(Coordinate(x - 1, y + 1))
} else if y >= 2 && tx == x + 2 && ty == y - 2 {
Some(Coordinate(x + 1, y - 1))
} else {
None
}
}
fn midpiece(&self, x: usize, y: usize, tx: usize, ty: usize) -> Option<GamePiece> {
match self.midpiece_coordinate(x, y, tx, ty) {
Some(Coordinate(x, y)) => self.board[x][y],
None => None,
}
}
fn valid_jump(&self, p: &GamePiece, from: &Coordinate, to: &Coordinate) -> bool {
if !to.on_board() || !from.on_board() {
false
} else {
let Coordinate(x, y) = *from;
let Coordinate(tx, ty) = *to;
let midpiece = self.midpiece(x, y, tx, ty);
match midpiece {
Some(mp) if mp.color != p.color => true,
_ => false,
}
}
}
fn valid_move(&self, p: &GamePiece, from: &Coordinate, to: &Coordinate) -> bool {
if !to.on_board() || !from.on_board() {
false
} else {
let Coordinate(tx, ty) = *to;
if let Some(_piece) = self.board[tx][ty] {
false
} else {
let Coordinate(_fx, fy) = *from;
let mut valid = false;
if ty > fy && p.color == PieceColor::White {
// white moves down
valid = true;
}
if ty < fy && p.color == PieceColor::Black {
// black moves up
valid = true;
}
if ty > fy && p.color == PieceColor::Black && p.crowned {
// crowned black mv down
valid = true;
}
if ty < fy && p.color == PieceColor::White && p.crowned {
// crowned white mv up
valid = true;
}
valid
}
}
}
}
| 30.478992 | 100 | 0.444031 |
d7b300a836370400791ddb12a75eb107a6c7be88 | 2,103 | use std::prelude::v1::*;
use bigint::H256;
use {Change, TrieMut, DatabaseHandle, get, insert, delete};
pub trait ItemCounter {
fn increase(&mut self, key: H256) -> usize;
fn decrease(&mut self, key: H256) -> usize;
}
pub trait DatabaseMut {
fn get(&self, key: H256) -> &[u8];
fn set(&mut self, key: H256, value: Option<&[u8]>);
}
impl<'a, D: DatabaseMut> DatabaseHandle for &'a D {
fn get(&self, key: H256) -> &[u8] {
DatabaseMut::get(*self, key)
}
}
pub struct TrieCollection<D: DatabaseMut, C: ItemCounter> {
database: D,
counter: C,
}
impl<D: DatabaseMut, C: ItemCounter> TrieCollection<D, C> {
pub fn new(database: D, counter: C) -> Self {
Self { database, counter }
}
pub fn trie_for<'a>(&'a self, root: H256) -> DatabaseTrieMut<'a, D> {
DatabaseTrieMut {
database: &self.database,
change: Change::default(),
root: root
}
}
pub fn apply<'a>(&'a mut self, trie: DatabaseTrieMut<'a, D>) {
for (key, value) in trie.change.adds {
self.database.set(key, Some(&value));
self.counter.increase(key);
}
for key in trie.change.removes {
let r = self.counter.decrease(key);
if r == 0 {
self.database.set(key, None);
}
}
}
}
pub struct DatabaseTrieMut<'a, D: DatabaseMut + 'a> {
database: &'a D,
change: Change,
root: H256,
}
impl<'a, D: DatabaseMut> TrieMut for DatabaseTrieMut<'a, D> {
fn root(&self) -> H256 {
self.root
}
fn insert(&mut self, key: &[u8], value: &[u8]) {
let (new_root, change) = insert(self.root, &self.database, key, value);
self.change.merge(&change);
self.root = new_root;
}
fn delete(&mut self, key: &[u8]) {
let (new_root, change) = delete(self.root, &self.database, key);
self.change.merge(&change);
self.root = new_root;
}
fn get(&self, key: &[u8]) -> Option<Vec<u8>> {
get(self.root, &self.database, key).map(|v| v.into())
}
}
| 25.337349 | 79 | 0.552068 |
9bf0882a8c7d0795933060c81d4e1dcae4648d1a | 3,182 | //! Low-Power Timer wakeup.
#![no_main]
#![no_std]
extern crate panic_semihosting;
use cortex_m::{asm, peripheral::NVIC};
use cortex_m_rt::entry;
use nb::block;
use stm32l0xx_hal::{
exti::{DirectLine, Exti},
gpio::{gpiob::PB, Output, PushPull},
lptim::{self, ClockSrc, LpTimer},
pac,
prelude::*,
pwr::{self, PWR},
rcc,
};
#[entry]
fn main() -> ! {
let cp = pac::CorePeripherals::take().unwrap();
let dp = pac::Peripherals::take().unwrap();
let mut scb = cp.SCB;
let mut rcc = dp.RCC.freeze(rcc::Config::msi(rcc::MSIRange::Range0));
let mut exti = Exti::new(dp.EXTI);
let mut pwr = PWR::new(dp.PWR, &mut rcc);
let gpiob = dp.GPIOB.split(&mut rcc);
let mut led = gpiob.pb2.into_push_pull_output().downgrade();
let mut lptim = LpTimer::init_periodic(dp.LPTIM, &mut pwr, &mut rcc, ClockSrc::Lse);
let exti_line = DirectLine::Lptim1;
lptim.enable_interrupts(lptim::Interrupts {
autoreload_match: true,
..lptim::Interrupts::default()
});
exti.listen_direct(exti_line);
// Blink twice to signal the start of the program
blink(&mut led);
blink(&mut led);
// 1 seconds of regular run mode
lptim.start(1.hz());
block!(lptim.wait()).unwrap();
Exti::unpend(exti_line);
NVIC::unpend(pac::Interrupt::LPTIM1);
blink(&mut led);
// 1 seconds of low-power run mode
pwr.enter_low_power_run_mode(rcc.clocks);
block!(lptim.wait()).unwrap();
pwr.exit_low_power_run_mode();
Exti::unpend(exti_line);
NVIC::unpend(pac::Interrupt::LPTIM1);
blink(&mut led);
// 1 seconds of sleep mode
exti.wait_for_irq(exti_line, pwr.sleep_mode(&mut scb));
lptim.wait().unwrap(); // returns immediately; we just got the interrupt
Exti::unpend(exti_line);
NVIC::unpend(pac::Interrupt::LPTIM1);
blink(&mut led);
// 1 seconds of low-power sleep mode
exti.wait_for_irq(exti_line, pwr.low_power_sleep_mode(&mut scb, &mut rcc));
lptim.wait().unwrap(); // returns immediately; we just got the interrupt
Exti::unpend(exti_line);
NVIC::unpend(pac::Interrupt::LPTIM1);
blink(&mut led);
// 1 seconds of stop mode
exti.wait_for_irq(
exti_line,
pwr.stop_mode(
&mut scb,
&mut rcc,
pwr::StopModeConfig {
ultra_low_power: true,
},
),
);
lptim.wait().unwrap(); // returns immediately; we just got the interrupt
blink(&mut led);
// 1 second of standby mode
NVIC::unpend(pac::Interrupt::LPTIM1);
exti.wait_for_irq(exti_line, pwr.standby_mode(&mut scb));
// The microcontroller resets after leaving standby mode. We should never
// reach this point.
loop {
blink(&mut led);
}
}
fn blink(led: &mut PB<Output<PushPull>>) {
led.set_high().unwrap();
delay();
led.set_low().unwrap();
delay();
}
fn delay() {
// We can't use `Delay`, as that requires a frequency of at least one MHz.
// Given our clock selection, the following loop should give us a nice delay
// when compiled in release mode.
for _ in 0..1_000 {
asm::nop()
}
}
| 25.869919 | 88 | 0.620679 |
23fe2da5b5b737b99b6bc8fb6b3806ed2a3de1ec | 436 | table! {
users (id) {
id -> Integer,
ws_id -> Integer,
uuid -> Text,
admin -> Bool,
login -> Text,
passw -> Nullable<Text>,
points -> Integer,
connected -> Bool,
playing -> Bool,
}
}
table! {
game_in_progress (id) {
id -> Integer,
id_player1 -> Integer,
id_player2 -> Integer,
serialize_grid -> Nullable<Text>,
}
}
| 19.818182 | 42 | 0.470183 |
08a38027512d83e19c8b227c17eca8737cbc242a | 3,810 | // Copyright (c) 2016-2021 Memgraph Ltd. [https://memgraph.com]
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use c_str_macro::c_str;
use serial_test::serial;
use std::ffi::{CStr, CString};
use std::ptr::null_mut;
use super::*;
use crate::memgraph::Memgraph;
use crate::mgp::mock_ffi::*;
use crate::{mock_mgp_once, with_dummy};
#[test]
#[serial]
fn test_id() {
mock_mgp_once!(mgp_vertex_get_id_context, |_, vertex_id_ptr| unsafe {
(*vertex_id_ptr).as_int = 72;
mgp_error::MGP_ERROR_NO_ERROR
});
with_dummy!(Vertex, |vertex: &Vertex| {
assert_eq!(vertex.id(), 72);
});
}
#[test]
#[serial]
fn test_labels_count() {
mock_mgp_once!(mgp_vertex_labels_count_context, |_, labels_count| unsafe {
(*labels_count) = 2;
mgp_error::MGP_ERROR_NO_ERROR
});
with_dummy!(Vertex, |vertex: &Vertex| {
assert_eq!(vertex.labels_count().unwrap(), 2);
});
}
#[test]
#[serial]
fn test_has_label() {
mock_mgp_once!(
mgp_vertex_has_label_context,
|vertex, label, result| unsafe {
assert_eq!(vertex, null_mut());
assert_eq!(CStr::from_ptr(label.name), c_str!("labela"));
(*result) = 1;
mgp_error::MGP_ERROR_NO_ERROR
}
);
with_dummy!(Vertex, |vertex: &Vertex| {
assert_eq!(vertex.has_label(c_str!("labela")).unwrap(), true);
});
}
#[test]
#[serial]
fn test_label_at() {
let test_label = CString::new("test");
mock_mgp_once!(
mgp_vertex_label_at_context,
move |vertex, _, result| unsafe {
assert_eq!(vertex, null_mut());
(*result).name = test_label.as_ref().unwrap().as_ptr();
mgp_error::MGP_ERROR_NO_ERROR
}
);
with_dummy!(Vertex, |vertex: &Vertex| {
assert_eq!(vertex.label_at(5).unwrap(), CString::new("test").unwrap());
});
}
#[test]
#[serial]
fn test_property() {
mock_mgp_once!(
mgp_vertex_get_property_context,
move |vertex, prop_name, memory, _| {
assert_eq!(vertex, null_mut());
assert_eq!(prop_name, c_str!("test").as_ptr());
assert_eq!(memory, null_mut());
mgp_error::MGP_ERROR_UNABLE_TO_ALLOCATE
}
);
with_dummy!(Vertex, |vertex: &Vertex| {
assert_eq!(
vertex.property(c_str!("test")).err().unwrap(),
Error::UnableToGetVertexProperty
);
});
}
#[test]
#[serial]
fn test_properties() {
mock_mgp_once!(mgp_vertex_iter_properties_context, |_, _, _| {
mgp_error::MGP_ERROR_UNABLE_TO_ALLOCATE
});
with_dummy!(Vertex, |vertex: &Vertex| {
let iter = vertex.properties();
assert!(iter.is_err());
});
}
#[test]
#[serial]
fn test_in_edges() {
mock_mgp_once!(mgp_vertex_iter_in_edges_context, |_, _, _| {
mgp_error::MGP_ERROR_UNABLE_TO_ALLOCATE
});
with_dummy!(Vertex, |vertex: &Vertex| {
let iter = vertex.in_edges();
assert!(iter.is_err());
});
}
#[test]
#[serial]
fn test_out_edges() {
mock_mgp_once!(mgp_vertex_iter_out_edges_context, |_, _, _| {
mgp_error::MGP_ERROR_UNABLE_TO_ALLOCATE
});
with_dummy!(Vertex, |vertex: &Vertex| {
let iter = vertex.out_edges();
assert!(iter.is_err());
});
}
| 26.09589 | 79 | 0.62231 |
16ff7c84da0b90e33f9d803f065bf80c464df79e | 1,629 | use ethereum_types::{Address, Public, H256, U256};
pub fn clean_0x(s: &str) -> &str {
if s.starts_with("0x") {
&s[2..]
} else {
s
}
}
pub fn string_2_u256(value: String) -> U256 {
let v = Box::leak(value.into_boxed_str());
let v = clean_0x(v);
U256::from(v)
}
pub fn string_2_h256(value: String) -> H256 {
let v = Box::leak(value.into_boxed_str());
let v = clean_0x(v);
if v.len() < 64 {
let mut s = String::from("0").repeat(64 - v.len());
s.push_str(v);
let s: &'static str = Box::leak(s.into_boxed_str());
return H256::from(s);
}
H256::from(v)
}
pub fn string_2_bytes(value: String) -> Vec<u8> {
let v = Box::leak(value.into_boxed_str());
let v = clean_0x(v);
hex::decode(v).unwrap()
}
pub fn string_2_address(value: String) -> Address {
if value.is_empty() {
return Address::zero();
}
let v = Box::leak(value.into_boxed_str());
let v = clean_0x(v);
Address::from(v)
}
pub fn public_2_address(public: &Public) -> Address {
let hash = tiny_keccak::keccak256(&public.0);
let mut result = Address::default();
result.copy_from_slice(&hash[12..]);
result
}
pub fn secret_2_address(secret: &str) -> Address {
let a = hex::decode(clean_0x(secret)).unwrap();
let secret_key = secp256k1::SecretKey::parse_slice(a.as_slice()).unwrap();
let public_key = secp256k1::PublicKey::from_secret_key(&secret_key);
let serialized = public_key.serialize();
let mut public = Public::default();
public.copy_from_slice(&serialized[1..65]);
public_2_address(&public)
}
| 27.15 | 78 | 0.612646 |
0ed964bf82a73d12ae65c5e7e89560abb8e7c8ee | 3,899 | /// Unescape special character sequences into their literal equivalent
///
/// For example `\n` becomes a real new line character
///
/// Expects utf escapes to be in the format `\uXXXX` where `X` are hex digits
///
/// This version creates a new String, use `unescape_str_into` to use an existing String
#[inline]
pub fn unescape_str (source: &str) -> String {
let mut result = String::new();
unescape_str_into(source, &mut result);
result
}
/// Unescape special character sequences into their literal equivalent
///
/// For example `\n` becomes a real new line character
///
/// Expects utf escapes to be in the format `\uXXXX` where `X` are hex digits
///
/// This version copies onto the end of an existing String, use `unescape_str` to use a new String
///
/// Note that if the last char of the String is an unaccompanied backslash `\`,
/// this is considered an invalid escape sequence and it is simply discarded
pub fn unescape_str_into (source: &str, dest: &mut String) {
dest.reserve(source.len());
let mut chars = source.chars();
while let Some(ch) = chars.next() {
dest.push(
if ch != '\\' {
ch
} else {
match chars.next() {
Some('u') => {
let value = chars.by_ref().take(4).fold(0, |acc, c| acc * 16 + c.to_digit(16).unwrap());
std::char::from_u32(value).unwrap()
}
Some('b') => '\x08',
Some('f') => '\x0c',
Some('n') => '\n',
Some('r') => '\r',
Some('t') => '\t',
Some(ch) => ch,
None => return
}
}
)
}
}
/// Unescape special character sequences into their serialization-safe equivalent
///
/// For example `\n` becomes two characters, `\` followed by `n`
///
/// Utf escapes to be in the format `\uXXXX` where `X` are hex digits
///
/// This version creates a new String, use `escape_str_into` to use an existing String
#[inline]
pub fn escape_str (source: &str) -> String {
let mut result = String::new();
escape_str_into(source, &mut result);
result
}
/// Unescape special character sequences into their serialization-safe equivalent
///
/// For example `\n` becomes two characters, `\` followed by `n`
///
/// Utf escapes to be in the format `\uXXXX` where `X` are hex digits
///
/// This version copies onto the end of an existing String, use `escape_str` to use a new String
pub fn escape_str_into (source: &str, dest: &mut String) {
dest.reserve(source.len());
for ch in source.chars() {
match ch {
'\\' => dest.push_str("\\\\"),
'\x08' => dest.push_str("\\b"),
'\x0c' => dest.push_str("\\f"),
'\'' => dest.push_str("\\'"),
'"' => dest.push_str("\\\""),
'\n' => dest.push_str("\\n"),
'\r' => dest.push_str("\\r"),
'\t' => dest.push_str("\\t"),
'\x7f' ..= std::char::MAX => {
let mut esc = *b"\\u0000";
for hex_digit_idx in (0..4).rev() {
let digit = (((ch as u32) >> (hex_digit_idx * 4)) & 0xf) as u8;
esc[5 - hex_digit_idx] = if digit < 10 { b'0' + digit } else { b'a' + digit - 10 }
}
dest.push_str(unsafe { std::str::from_utf8_unchecked(&esc) });
},
_ => dest.push(ch)
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn unescape_ok () {
let result = unescape_str(r#"\\\"\u2764"#);
let expected = "\\\"\u{2764}";
println!("Got unescaped string: `{}`", result);
println!("Expected: `{}`", expected);
assert_eq!(expected, result);
}
#[test]
fn escape_ok () {
let result = escape_str("\\\"\u{2764}");
let expected = r#"\\\"\u2764"#;
println!("Got escaped string: `{}`", result);
println!("Expected: `{}`", expected);
assert_eq!(expected, result);
}
} | 30.460938 | 101 | 0.557322 |
762da4e2a247a898d0f170ad4fc042510b7f24f7 | 2,835 | //! Provide `derive(Trace)` support for structures to implement
//! `gcmodule::Trace` interface.
//!
//! # Example
//!
//! ```
//! use gcmodule_derive::Trace;
//!
//! #[derive(Trace)]
//! struct S<T: gcmodule::Trace> {
//! a: String,
//! b: Option<T>,
//!
//! #[trace(skip)] // ignore this field for Trace.
//! c: MyType,
//! }
//!
//! struct MyType;
//! ```
extern crate proc_macro;
use proc_macro::TokenStream;
use quote::quote;
use quote::ToTokens;
use syn::Data;
#[proc_macro_derive(Trace, attributes(trace))]
pub fn gcmodule_trace_derive(input: TokenStream) -> TokenStream {
let input = syn::parse_macro_input!(input as syn::DeriveInput);
let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl();
let ident = input.ident;
let mut trace_fn_body = Vec::new();
let mut is_type_tracked_fn_body = Vec::new();
match input.data {
Data::Struct(data) => {
for (i, field) in data.fields.into_iter().enumerate() {
if field.attrs.into_iter().any(is_skipped) {
continue;
}
let trace_field = match field.ident {
Some(ident) => quote! { self.#ident.trace(tracer); },
None => {
let i = syn::Index::from(i);
quote! { self.#i.trace(tracer); }
}
};
trace_fn_body.push(trace_field);
let ty = field.ty;
is_type_tracked_fn_body.push(quote! {
if <#ty as _gcmodule::Trace>::is_type_tracked() {
return true;
}
});
}
}
Data::Enum(_) | Data::Union(_) => {
trace_fn_body.push(quote! {
compile_error!("enum or union are not supported");
});
}
};
let generated = quote! {
const _: () = {
extern crate gcmodule as _gcmodule;
impl #impl_generics _gcmodule::Trace for #ident #ty_generics #where_clause {
fn trace(&self, tracer: &mut _gcmodule::Tracer) {
#( #trace_fn_body )*
}
fn is_type_tracked() -> bool {
#( #is_type_tracked_fn_body )*
false
}
fn as_any(&self) -> Option<&dyn std::any::Any> {
Some(self)
}
}
};
};
generated.into()
}
fn is_skipped(attr: syn::Attribute) -> bool {
// check if `#[trace(skip)]` exists.
if attr.path.to_token_stream().to_string() == "trace" {
for token in attr.tokens {
if token.to_string() == "(skip)" {
return true;
}
}
}
false
}
| 30.483871 | 88 | 0.489594 |
034322bc2dce8f25798795ff5e228ebdd8489c4d | 627 | //! Tests auto-converted from "sass-spec/spec/libsass-closed-issues/issue_1298.hrx"
#[allow(unused)]
fn runner() -> crate::TestRunner {
super::runner()
}
#[test]
fn test() {
assert_eq!(
runner().ok(
"@import url(//fonts.googleapis.com/css?family=Roboto:400,500,700,400italic);\
\nhtml {\
\n font-family: roboto, arial, helvetica, sans-serif;\
\n}\n"
),
"@import url(//fonts.googleapis.com/css?family=Roboto:400,500,700,400italic);\
\nhtml {\
\n font-family: roboto, arial, helvetica, sans-serif;\
\n}\n"
);
}
| 27.26087 | 90 | 0.561404 |
8f8616732b0da37aa6b8951a52d3b855084fe13f | 57,075 | //! Schema specification for [OpenAPI 3.0.0](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md)
use itertools::Itertools;
use semver;
use serde::{Deserialize, Serialize};
use serde_json;
use std::collections::BTreeMap;
use std::fs::File;
use std::io::BufReader;
use std::path::{Path, PathBuf};
use url;
use url_serde;
use crate::{
v3_0::components::{Components, ObjectOrReference},
Error, Result, MINIMUM_OPENAPI30_VERSION,
};
impl Spec {
pub fn validate_version(&self) -> Result<semver::Version> {
let spec_version = &self.openapi;
let sem_ver = semver::Version::parse(spec_version)?;
let required_version = semver::VersionReq::parse(MINIMUM_OPENAPI30_VERSION).unwrap();
if required_version.matches(&sem_ver) {
Ok(sem_ver)
} else {
Err(Error::UnsupportedSpecFileVersion(sem_ver))?
}
}
pub fn collect_ref_schemas<'a>(
&'a mut self,
root: &'a Path,
) -> Result<BTreeMap<String, Schema>> {
// recursive function to produce iterator from all referenced schemas outside root api file
fn read_schemas(ref_path: PathBuf) -> Option<BTreeMap<String, Schema>> {
// load model from file and deserialize it to vector of schemas
let file = File::open(&ref_path).unwrap();
let reader = BufReader::new(file);
let ext = ref_path.extension().unwrap().to_str().unwrap();
match ext {
"yaml" | "yml" => serde_yaml::from_reader(reader).ok(),
"json" => serde_json::from_reader(reader).ok(),
_ => None,
}
};
Ok(self
.collect_ref_paths()
.flat_map(|path| path.split("#").take(1))
.unique()
.filter(|path| !path.is_empty())
.map(|path| root.join(path))
.filter_map(|path| read_schemas(path))
.flatten()
.collect())
}
pub fn collect_ref_paths<'a>(&'a self) -> Box<dyn Iterator<Item = &String> + 'a> {
// helper function to map ObjectOrReference schemas
fn map_obj_or_refence<'a>(
iter: impl Iterator<Item = &'a ObjectOrReference<Schema>>,
) -> impl Iterator<Item = &'a String> {
iter.map(|obj_or_ref| match obj_or_ref {
ObjectOrReference::Object(schema) => Some(schema),
_ => None,
})
.filter_map(|x| x)
.flat_map(|schema| schema.collect_ref_paths())
};
Box::new(
self.components.iter().flat_map(|components| {
components
.schemas
.iter()
.flat_map(|hashmap| map_obj_or_refence(hashmap.values()))
}), // .chain(self.paths.values().map(|p|{
// [p.get,p.put,p.post,p.delete,p.options,p.head,p.patch,p.trace].iter()
// .filter_map(Option::Some)
// .map(|o| {
// o.iter().map(|op| {
// op.request_body.iter().map(|obj_or_ref| match obj_or_ref {
// ObjectOrReference::Object(requestbody) => {
// requestbody.content.values().
// _ => None,
// })
// .filter_map(|r| r)
// })
// })
// })
)
}
}
/// top level document
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Spec {
/// This string MUST be the [semantic version number](https://semver.org/spec/v2.0.0.html)
/// of the
/// [OpenAPI Specification version](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#versions)
/// that the OpenAPI document uses. The `openapi` field SHOULD be used by tooling
/// specifications and clients to interpret the OpenAPI document. This is not related to
/// the API
/// [`info.version`](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#infoVersion)
/// string.
pub openapi: String,
/// Provides metadata about the API. The metadata MAY be used by tooling as required.
pub info: Info,
/// An array of Server Objects, which provide connectivity information to a target server.
/// If the `servers` property is not provided, or is an empty array, the default value would
/// be a
/// [Server Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#serverObject)
/// with a
/// [url](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#serverUrl)
/// value of `/`.
// FIXME: Provide a default value as specified in documentation instead of `None`.
#[serde(skip_serializing_if = "Option::is_none")]
pub servers: Option<Vec<Server>>,
/// Holds the relative paths to the individual endpoints and their operations. The path is
/// appended to the URL from the
/// [`Server Object`](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#serverObject)
/// in order to construct the full URL. The Paths MAY be empty, due to
/// [ACL constraints](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#securityFiltering).
pub paths: BTreeMap<String, PathItem>,
/// An element to hold various schemas for the specification.
#[serde(skip_serializing_if = "Option::is_none")]
pub components: Option<Components>,
// FIXME: Implement
// /// A declaration of which security mechanisms can be used across the API.
// /// The list of values includes alternative security requirement objects that can be used.
// /// Only one of the security requirement objects need to be satisfied to authorize a request.
// /// Individual operations can override this definition.
// #[serde(skip_serializing_if = "Option::is_none")]
// pub security: Option<SecurityRequirement>,
/// A list of tags used by the specification with additional metadata.
///The order of the tags can be used to reflect on their order by the parsing tools.
/// Not all tags that are used by the
/// [Operation Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#operationObject)
/// must be declared. The tags that are not declared MAY be organized randomly or
/// based on the tools' logic. Each tag name in the list MUST be unique.
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<Vec<Tag>>,
/// Additional external documentation.
#[serde(skip_serializing_if = "Option::is_none", rename = "externalDocs")]
pub external_docs: Option<ExternalDoc>,
// TODO: Add "Specification Extensions" https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#specificationExtensions}
}
/// General information about the API.
///
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#infoObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
// #[serde(rename_all = "lowercase")]
pub struct Info {
/// The title of the application.
pub title: String,
/// A short description of the application. CommonMark syntax MAY be used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// A URL to the Terms of Service for the API. MUST be in the format of a URL.
#[serde(rename = "termsOfService", skip_serializing_if = "Option::is_none")]
pub terms_of_service: Option<Url>,
/// The version of the OpenAPI document (which is distinct from the [OpenAPI Specification
/// version](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#oasVersion)
/// or the API implementation version).
pub version: String,
/// The contact information for the exposed API.
#[serde(skip_serializing_if = "Option::is_none")]
pub contact: Option<Contact>,
/// The license information for the exposed API.
#[serde(skip_serializing_if = "Option::is_none")]
pub license: Option<License>,
}
/// Wraper around `url::Url` to fix serde issue
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
pub struct Url(#[serde(with = "url_serde")] url::Url);
impl Url {
pub fn parse<S: AsRef<str>>(input: S) -> std::result::Result<Url, url::ParseError> {
url::Url::parse(input.as_ref()).map(Url)
}
}
/// Contact information for the exposed API.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#contactObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Contact {
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub url: Option<Url>,
// TODO: Make sure the email is a valid email
#[serde(skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
// TODO: Add "Specification Extensions" https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#specificationExtensions
}
/// License information for the exposed API.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#licenseObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct License {
/// The license name used for the API.
pub name: String,
/// A URL to the license used for the API.
#[serde(skip_serializing_if = "Option::is_none")]
pub url: Option<Url>,
// TODO: Add "Specification Extensions" https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#specificationExtensions}
}
/// An object representing a Server.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#serverObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Server {
/// A URL to the target host. This URL supports Server Variables and MAY be relative, to
/// indicate that the host location is relative to the location where the OpenAPI document
/// is being served. Variable substitutions will be made when a variable is named
/// in {brackets}.
pub url: String,
/// An optional string describing the host designated by the URL. CommonMark syntax MAY be used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// A map between a variable name and its value. The value is used for substitution in
/// the server's URL template.
#[serde(skip_serializing_if = "Option::is_none")]
pub variables: Option<BTreeMap<String, ServerVariable>>,
}
/// An object representing a Server Variable for server URL template substitution.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#serverVariableObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct ServerVariable {
/// The default value to use for substitution, and to send, if an alternate value is not
/// supplied. Unlike the Schema Object's default, this value MUST be provided by the consumer.
pub default: String,
/// An enumeration of string values to be used if the substitution options are from a limited
/// set.
#[serde(rename = "enum", skip_serializing_if = "Option::is_none")]
pub substitutions_enum: Option<Vec<String>>,
/// An optional description for the server variable. [CommonMark] syntax MAY be used for rich
/// text representation.
///
/// [CommonMark]: https://spec.commonmark.org/
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
/// Describes the operations available on a single path.
///
/// A Path Item MAY be empty, due to [ACL
/// constraints](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#securityFiltering).
/// The path itself is still exposed to the documentation viewer but they will not know which
/// operations and parameters are available.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct PathItem {
/// Allows for an external definition of this path item. The referenced structure MUST be
/// in the format of a
/// [Path Item Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#pathItemObject).
/// If there are conflicts between the referenced definition and this Path Item's definition,
/// the behavior is undefined.
// FIXME: Should this ref be moved to an enum?
#[serde(skip_serializing_if = "Option::is_none", rename = "$ref")]
pub reference: Option<String>,
/// An optional, string summary, intended to apply to all operations in this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub summary: Option<String>,
/// An optional, string description, intended to apply to all operations in this path.
/// [CommonMark syntax](http://spec.commonmark.org/) MAY be used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// A definition of a GET operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub get: Option<Operation>,
/// A definition of a PUT operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub put: Option<Operation>,
/// A definition of a POST operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub post: Option<Operation>,
/// A definition of a DELETE operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub delete: Option<Operation>,
/// A definition of a OPTIONS operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub options: Option<Operation>,
/// A definition of a HEAD operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub head: Option<Operation>,
/// A definition of a PATCH operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub patch: Option<Operation>,
/// A definition of a TRACE operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub trace: Option<Operation>,
/// An alternative `server` array to service all operations in this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub servers: Option<Vec<Server>>,
/// A list of parameters that are applicable for all the operations described under this
/// path. These parameters can be overridden at the operation level, but cannot be removed
/// there. The list MUST NOT include duplicated parameters. A unique parameter is defined by
/// a combination of a
/// [name](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterName)
/// and
/// [location](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterIn).
/// The list can use the
/// [Reference Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#referenceObject)
/// to link to parameters that are defined at the
/// [OpenAPI Object's components/parameters](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#componentsParameters).
#[serde(skip_serializing_if = "Option::is_none")]
pub parameters: Option<Vec<ObjectOrReference<Parameter>>>,
// TODO: Add "Specification Extensions" https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#specificationExtensions}
}
/// Describes a single API operation on a path.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#operationObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
// #[serde(rename_all = "lowercase")]
pub struct Operation {
/// A list of tags for API documentation control. Tags can be used for logical grouping of
/// operations by resources or any other qualifier.
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<Vec<String>>,
/// A short summary of what the operation does.
#[serde(skip_serializing_if = "Option::is_none")]
pub summary: Option<String>,
/// A verbose explanation of the operation behavior.
/// [CommonMark syntax](http://spec.commonmark.org/) MAY be used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// Additional external documentation for this operation.
#[serde(skip_serializing_if = "Option::is_none", rename = "externalDocs")]
pub external_docs: Option<ExternalDoc>,
/// Unique string used to identify the operation. The id MUST be unique among all operations
/// described in the API. Tools and libraries MAY use the operationId to uniquely identify an
/// operation, therefore, it is RECOMMENDED to follow common programming naming conventions.
#[serde(skip_serializing_if = "Option::is_none", rename = "operationId")]
pub operation_id: Option<String>,
/// A list of parameters that are applicable for this operation. If a parameter is already
/// defined at the
/// [Path Item](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#pathItemParameters),
/// the new definition will override it but can never remove it. The list MUST NOT
/// include duplicated parameters. A unique parameter is defined by a combination of a
/// [name](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterName)
/// and
/// [location](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterIn).
/// The list can use the
/// [Reference Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#referenceObject)
/// to link to parameters that are defined at the
/// [OpenAPI Object's components/parameters](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#componentsParameters).
#[serde(skip_serializing_if = "Option::is_none")]
pub parameters: Option<Vec<ObjectOrReference<Parameter>>>,
/// The request body applicable for this operation. The requestBody is only supported in HTTP methods where the HTTP 1.1 specification RFC7231 has explicitly defined semantics for request bodies. In other cases where the HTTP spec is vague, requestBody SHALL be ignored by consumers.
#[serde(skip_serializing_if = "Option::is_none", rename = "requestBody")]
pub request_body: Option<ObjectOrReference<RequestBody>>,
/// The list of possible responses as they are returned from executing this operation.
///
/// A container for the expected responses of an operation. The container maps a HTTP
/// response code to the expected response.
///
/// The documentation is not necessarily expected to cover all possible HTTP response codes
/// because they may not be known in advance. However, documentation is expected to cover
/// a successful operation response and any known errors.
///
/// The `default` MAY be used as a default response object for all HTTP codes that are not
/// covered individually by the specification.
///
/// The `Responses Object` MUST contain at least one response code, and it SHOULD be the
/// response for a successful operation call.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#responsesObject>.
pub responses: BTreeMap<String, Response>,
/// A map of possible out-of band callbacks related to the parent operation. The key is
/// a unique identifier for the Callback Object. Each value in the map is a
/// [Callback Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#callbackObject)
/// that describes a request that may be initiated by the API provider and the
/// expected responses. The key value used to identify the callback object is
/// an expression, evaluated at runtime, that identifies a URL to use for the
/// callback operation.
#[serde(skip_serializing_if = "Option::is_none")]
pub callbacks: Option<BTreeMap<String, Callback>>,
/// Declares this operation to be deprecated. Consumers SHOULD refrain from usage
/// of the declared operation. Default value is `false`.
#[serde(skip_serializing_if = "Option::is_none")]
pub deprecated: Option<bool>,
// FIXME: Implement
// /// A declaration of which security mechanisms can be used for this operation. The list of
// /// values includes alternative security requirement objects that can be used. Only one
// /// of the security requirement objects need to be satisfied to authorize a request.
// /// This definition overrides any declared top-level
// /// [`security`](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#oasSecurity).
// /// To remove a top-level security declaration, an empty array can be used.
// pub security: Option<SecurityRequirement>,
/// An alternative `server` array to service this operation. If an alternative `server`
/// object is specified at the Path Item Object or Root level, it will be overridden by
/// this value.
#[serde(skip_serializing_if = "Option::is_none")]
pub servers: Option<Vec<Server>>,
}
// FIXME: Verify against OpenAPI 3.0
/// Describes a single operation parameter.
/// A unique parameter is defined by a combination of a
/// [name](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterName)
/// and [location](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterIn).
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Parameter {
/// The name of the parameter.
pub name: String,
/// values depend on parameter type
/// may be `header`, `query`, 'path`, `formData`
#[serde(rename = "in")]
pub location: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub required: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub schema: Option<Schema>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "uniqueItems")]
pub unique_items: Option<bool>,
/// string, number, boolean, integer, array, file ( only for formData )
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "type")]
pub param_type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub format: Option<String>,
/// A brief description of the parameter. This could contain examples
/// of use. GitHub Flavored Markdown is allowed.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
// collectionFormat: ???
// default: ???
// maximum ?
// exclusiveMaximum ??
// minimum ??
// exclusiveMinimum ??
// maxLength ??
// minLength ??
// pattern ??
// maxItems ??
// minItems ??
// enum ??
// multipleOf ??
// allowEmptyValue ( for query / body params )
/// Describes how the parameter value will be serialized depending on the type of the parameter
/// value. Default values (based on value of in): for `query` - `form`; for `path` - `simple`; for
/// `header` - `simple`; for cookie - `form`.
#[serde(skip_serializing_if = "Option::is_none")]
style: Option<ParameterStyle>,
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "camelCase")]
enum ParameterStyle {
Form,
Simple,
}
// FIXME: Verify against OpenAPI 3.0
/// The Schema Object allows the definition of input and output data types.
/// These types can be objects, but also primitives and arrays.
/// This object is an extended subset of the
/// [JSON Schema Specification Wright Draft 00](http://json-schema.org/).
/// For more information about the properties, see
/// [JSON Schema Core](https://tools.ietf.org/html/draft-wright-json-schema-00) and
/// [JSON Schema Validation](https://tools.ietf.org/html/draft-wright-json-schema-validation-00).
/// Unless stated otherwise, the property definitions follow the JSON Schema.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#schemaObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Schema {
/// [JSON reference](https://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03)
/// path to another definition
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "$ref")]
pub ref_path: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "type")]
pub schema_type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub format: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "enum")]
pub enum_values: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub required: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub items: Option<Box<Schema>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<BTreeMap<String, Schema>>,
#[serde(skip_serializing_if = "Option::is_none", rename = "readOnly")]
pub read_only: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub nullable: Option<bool>,
// FIXME: Why can this be a "boolean" (as per the spec)? It doesn't make sense. Here it's not.
/// Value can be boolean or object. Inline or referenced schema MUST be of a
/// [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#schemaObject)
/// and not a standard JSON Schema.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#properties>.
#[serde(
skip_serializing_if = "Option::is_none",
rename = "additionalProperties"
)]
pub additional_properties: Option<ObjectOrReference<Box<Schema>>>,
/// A free-form property to include an example of an instance for this schema.
/// To represent examples that cannot be naturally represented in JSON or YAML,
/// a string value can be used to contain the example with escaping where necessary.
/// NOTE: According to [spec], _Primitive data types in the OAS are based on the
/// types supported by the JSON Schema Specification Wright Draft 00._
/// This suggest using
/// [`serde_json::Value`](https://docs.serde.rs/serde_json/value/enum.Value.html). [spec][https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#data-types]
#[serde(skip_serializing_if = "Option::is_none")]
pub example: Option<serde_json::value::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub title: Option<String>,
// The following properties are taken directly from the JSON Schema definition and
// follow the same specifications:
// multipleOf
// maximum
// exclusiveMaximum
// minimum
// exclusiveMinimum
// maxLength
// minLength
// pattern (This string SHOULD be a valid regular expression, according to the ECMA 262 regular expression dialect)
// maxItems
// minItems
// uniqueItems
// maxProperties
// minProperties
// required
// enum
// The following properties are taken from the JSON Schema definition but their
// definitions were adjusted to the OpenAPI Specification.
// - type - Value MUST be a string. Multiple types via an array are not supported.
// - allOf - Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.
// - oneOf - Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.
// - anyOf - Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.
// - not - Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.
// - items - Value MUST be an object and not an array. Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema. `items` MUST be present if the `type` is `array`.
// - properties - Property definitions MUST be a [Schema Object](#schemaObject) and not a standard JSON Schema (inline or referenced).
// - additionalProperties - Value can be boolean or object. Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.
// - description - [CommonMark syntax](http://spec.commonmark.org/) MAY be used for rich text representation.
// - format - See [Data Type Formats](#dataTypeFormat) for further details. While relying on JSON Schema's defined formats, the OAS offers a few additional predefined formats.
// - default - The default value represents what would be assumed by the consumer of the input as the value of the schema if one is not provided. Unlike JSON Schema, the value MUST conform to the defined type for the Schema Object defined at the same level. For example, if `type` is `string`, then `default` can be `"foo"` but cannot be `1`.
/// The default value represents what would be assumed by the consumer of the input as the value
/// of the schema if one is not provided. Unlike JSON Schema, the value MUST conform to the
/// defined type for the Schema Object defined at the same level. For example, if type is
/// `string`, then `default` can be `"foo"` but cannot be `1`.
#[serde(skip_serializing_if = "Option::is_none")]
pub default: Option<serde_json::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub minimum: Option<serde_json::Value>,
/// Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard
/// JSON Schema.
#[serde(rename = "allOf", skip_serializing_if = "Option::is_none")]
pub all_of: Option<Vec<ObjectOrReference<Schema>>>,
}
impl Schema {
/// Returns all ref_paths
fn collect_ref_paths<'a>(&'a self) -> Box<dyn Iterator<Item = &String> + 'a> {
Box::new(
self.ref_path
.iter()
.map(|s| Some(s))
.filter_map(|r| r)
.chain(self.properties.iter().flat_map(|hashmap| {
hashmap
.values()
.flat_map(|schema| schema.collect_ref_paths())
}))
.chain(
self.items
.iter()
.flat_map(|schema| schema.collect_ref_paths()),
)
.chain(
self.additional_properties
.iter()
.map(|obj_or_ref| match obj_or_ref {
ObjectOrReference::Object(schema) => Some(schema),
_ => None,
})
.filter_map(|x| x)
.flat_map(|schema| schema.collect_ref_paths()),
),
)
}
}
/// Describes a single response from an API Operation, including design-time, static `links`
/// to operations based on the response.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#responseObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Response {
/// A short description of the response.
/// [CommonMark syntax](http://spec.commonmark.org/) MAY be used for rich text representation.
pub description: Option<String>,
/// Maps a header name to its definition.
/// [RFC7230](https://tools.ietf.org/html/rfc7230#page-22) states header names are case
/// insensitive. If a response header is defined with the name `"Content-Type"`, it SHALL
/// be ignored.
#[serde(skip_serializing_if = "Option::is_none")]
pub headers: Option<BTreeMap<String, ObjectOrReference<Header>>>,
/// A map containing descriptions of potential response payloads. The key is a media type
/// or [media type range](https://tools.ietf.org/html/rfc7231#appendix-D) and the value
/// describes it. For responses that match multiple keys, only the most specific key is
/// applicable. e.g. text/plain overrides text/*
#[serde(skip_serializing_if = "Option::is_none")]
pub content: Option<BTreeMap<String, MediaType>>,
/// A map of operations links that can be followed from the response. The key of the map
/// is a short name for the link, following the naming constraints of the names for
/// [Component Objects](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#componentsObject).
#[serde(skip_serializing_if = "Option::is_none")]
pub links: Option<BTreeMap<String, ObjectOrReference<Link>>>,
// TODO: Add "Specification Extensions" https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#specificationExtensions}
}
/// The Header Object follows the structure of the
/// [Parameter Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterObject)
/// with the following changes:
/// 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map.
/// 1. `in` MUST NOT be specified, it is implicitly in `header`.
/// 1. All traits that are affected by the location MUST be applicable to a location of
/// `header` (for example, [`style`](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterStyle)).
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#headerObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Header {
// FIXME: Is the third change properly implemented?
// FIXME: Merge `ObjectOrReference<Header>::Reference` and `ParameterOrRef::Reference`
#[serde(skip_serializing_if = "Option::is_none")]
pub required: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub schema: Option<Schema>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "uniqueItems")]
pub unique_items: Option<bool>,
/// string, number, boolean, integer, array, file ( only for formData )
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "type")]
pub param_type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub format: Option<String>,
/// A brief description of the parameter. This could contain examples
/// of use. GitHub Flavored Markdown is allowed.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
// collectionFormat: ???
// default: ???
// maximum ?
// exclusiveMaximum ??
// minimum ??
// exclusiveMinimum ??
// maxLength ??
// minLength ??
// pattern ??
// maxItems ??
// minItems ??
// enum ??
// multipleOf ??
// allowEmptyValue ( for query / body params )
}
/// Describes a single request body.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#requestBodyObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct RequestBody {
/// A brief description of the request body. This could contain examples of use.
/// [CommonMark syntax](http://spec.commonmark.org/) MAY be used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// The content of the request body. The key is a media type or
/// [media type range](https://tools.ietf.org/html/rfc7231#appendix-D) and the
/// value describes it. For requests that match multiple keys, only the most specific key
/// is applicable. e.g. text/plain overrides text/*
pub content: BTreeMap<String, MediaType>,
#[serde(skip_serializing_if = "Option::is_none")]
pub required: Option<bool>,
}
/// The Link object represents a possible design-time link for a response.
///
/// The presence of a link does not guarantee the caller's ability to successfully invoke it,
/// rather it provides a known relationship and traversal mechanism between responses and
/// other operations.
///
/// Unlike _dynamic_ links (i.e. links provided *in* the response payload), the OAS linking
/// mechanism does not require link information in the runtime response.
///
/// For computing links, and providing instructions to execute them, a
/// [runtime expression](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#runtimeExpression)
/// is used for accessing values in an operation and using them as parameters while invoking
/// the linked operation.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#linkObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(untagged)]
pub enum Link {
/// A relative or absolute reference to an OAS operation. This field is mutually exclusive
/// of the `operationId` field, and MUST point to an
/// [Operation Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#operationObject).
/// Relative `operationRef` values MAY be used to locate an existing
/// [Operation Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#operationObject)
/// in the OpenAPI definition.
Ref {
#[serde(rename = "operationRef")]
operation_ref: String,
// FIXME: Implement
// /// A map representing parameters to pass to an operation as specified with `operationId`
// /// or identified via `operationRef`. The key is the parameter name to be used, whereas
// /// the value can be a constant or an expression to be evaluated and passed to the
// /// linked operation. The parameter name can be qualified using the
// /// [parameter location](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterIn)
// /// `[{in}.]{name}` for operations that use the same parameter name in different
// /// locations (e.g. path.id).
// parameters: BTreeMap<String, Any | {expression}>,
#[serde(skip_serializing_if = "Option::is_none")]
parameters: Option<BTreeMap<String, String>>,
// FIXME: Implement
// /// A literal value or
// /// [{expression}](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#runtimeExpression)
// /// to use as a request body when calling the target operation.
// #[serde(rename = "requestBody")]
// request_body: Any | {expression}
/// A description of the link. [CommonMark syntax](http://spec.commonmark.org/) MAY be
/// used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
/// A server object to be used by the target operation.
#[serde(skip_serializing_if = "Option::is_none")]
server: Option<Server>,
// TODO: Add "Specification Extensions" https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#specificationExtension
},
/// The name of an _existing_, resolvable OAS operation, as defined with a unique
/// `operationId`. This field is mutually exclusive of the `operationRef` field.
Id {
#[serde(rename = "operationId")]
operation_id: String,
// FIXME: Implement
// /// A map representing parameters to pass to an operation as specified with `operationId`
// /// or identified via `operationRef`. The key is the parameter name to be used, whereas
// /// the value can be a constant or an expression to be evaluated and passed to the
// /// linked operation. The parameter name can be qualified using the
// /// [parameter location](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterIn)
// /// `[{in}.]{name}` for operations that use the same parameter name in different
// /// locations (e.g. path.id).
// parameters: BTreeMap<String, Any | {expression}>,
#[serde(skip_serializing_if = "Option::is_none")]
parameters: Option<BTreeMap<String, String>>,
// FIXME: Implement
// /// A literal value or
// /// [{expression}](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#runtimeExpression)
// /// to use as a request body when calling the target operation.
// #[serde(rename = "requestBody")]
// request_body: Any | {expression}
/// A description of the link. [CommonMark syntax](http://spec.commonmark.org/) MAY be
/// used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
/// A server object to be used by the target operation.
#[serde(skip_serializing_if = "Option::is_none")]
server: Option<Server>,
// TODO: Add "Specification Extensions" https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#specificationExtension
},
}
/// Each Media Type Object provides schema and examples for the media type identified by its key.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#media-type-object>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct MediaType {
/// The schema defining the type used for the request body.
#[serde(skip_serializing_if = "Option::is_none")]
pub schema: Option<ObjectOrReference<Schema>>,
/// Example of the media type.
#[serde(flatten, skip_serializing_if = "Option::is_none")]
pub examples: Option<MediaTypeExample>,
/// A map between a property name and its encoding information. The key, being the
/// property name, MUST exist in the schema as a property. The encoding object SHALL
/// only apply to `requestBody` objects when the media type is `multipart`
/// or `application/x-www-form-urlencoded`.
#[serde(skip_serializing_if = "Option::is_none")]
pub encoding: Option<BTreeMap<String, Encoding>>,
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(untagged)]
pub enum MediaTypeExample {
/// Example of the media type. The example object SHOULD be in the correct format as
/// specified by the media type. The `example` field is mutually exclusive of the
/// `examples` field. Furthermore, if referencing a `schema` which contains an example,
/// the `example` value SHALL override the example provided by the schema.
Example { example: serde_json::Value },
/// Examples of the media type. Each example object SHOULD match the media type and
/// specified schema if present. The `examples` field is mutually exclusive of
/// the `example` field. Furthermore, if referencing a `schema` which contains an
/// example, the `examples` value SHALL override the example provided by the schema.
Examples {
examples: BTreeMap<String, ObjectOrReference<Example>>,
},
}
/// A single encoding definition applied to a single schema property.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Encoding {
/// The Content-Type for encoding a specific property. Default value depends on the
/// property type: for `string` with `format` being `binary` – `application/octet-stream`;
/// for other primitive types – `text/plain`; for `object` - `application/json`;
/// for `array` – the default is defined based on the inner type. The value can be a
/// specific media type (e.g. `application/json`), a wildcard media type
/// (e.g. `image/*`), or a comma-separated list of the two types.
#[serde(skip_serializing_if = "Option::is_none", rename = "contentType")]
pub content_type: Option<String>,
/// A map allowing additional information to be provided as headers, for example
/// `Content-Disposition`. `Content-Type` is described separately and SHALL be
/// ignored in this section. This property SHALL be ignored if the request body
/// media type is not a `multipart`.
#[serde(skip_serializing_if = "Option::is_none")]
pub headers: Option<BTreeMap<String, ObjectOrReference<Header>>>,
/// Describes how a specific property value will be serialized depending on its type.
/// See [Parameter Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterObject)
/// for details on the
/// [`style`](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterStyle)
/// property. The behavior follows the same values as `query` parameters, including
/// default values. This property SHALL be ignored if the request body media type
/// is not `application/x-www-form-urlencoded`.
#[serde(skip_serializing_if = "Option::is_none")]
pub style: Option<String>,
/// When this is true, property values of type `array` or `object` generate
/// separate parameters for each value of the array, or key-value-pair of the map.
/// For other types of properties this property has no effect. When
/// [`style`](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#encodingStyle)
/// is `form`, the default value is `true`. For all other styles, the default value
/// is `false`. This property SHALL be ignored if the request body media type is
/// not `application/x-www-form-urlencoded`.
#[serde(skip_serializing_if = "Option::is_none")]
pub explode: Option<bool>,
/// Determines whether the parameter value SHOULD allow reserved characters, as defined
/// by [RFC3986](https://tools.ietf.org/html/rfc3986#section-2.2) `:/?#[]@!$&'()*+,;=`
/// to be included without percent-encoding. The default value is `false`. This
/// property SHALL be ignored if the request body media type is
/// not `application/x-www-form-urlencoded`.
#[serde(skip_serializing_if = "Option::is_none", rename = "allowReserved")]
pub allow_reserved: Option<bool>,
}
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#exampleObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Example {
/// Short description for the example.
#[serde(skip_serializing_if = "Option::is_none")]
pub summary: Option<String>,
/// Long description for the example.
/// [CommonMark syntax](http://spec.commonmark.org/) MAY be used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
// FIXME: Implement (merge with externalValue as enum)
/// Embedded literal example. The `value` field and `externalValue` field are mutually
/// exclusive. To represent examples of media types that cannot naturally represented
/// in JSON or YAML, use a string value to contain the example, escaping where necessary.
#[serde(skip_serializing_if = "Option::is_none")]
pub value: Option<serde_json::Value>,
// FIXME: Implement (merge with value as enum)
// /// A URL that points to the literal example. This provides the capability to reference
// /// examples that cannot easily be included in JSON or YAML documents. The `value` field
// /// and `externalValue` field are mutually exclusive.
// #[serde(skip_serializing_if = "Option::is_none")]
// pub externalValue: Option<String>,
// TODO: Add "Specification Extensions" https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#specificationExtensions}
}
/// Defines a security scheme that can be used by the operations. Supported schemes are
/// HTTP authentication, an API key (either as a header or as a query parameter),
///OAuth2's common flows (implicit, password, application and access code) as defined
/// in [RFC6749](https://tools.ietf.org/html/rfc6749), and
/// [OpenID Connect Discovery](https://tools.ietf.org/html/draft-ietf-oauth-discovery-06).
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#securitySchemeObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(tag = "type")]
pub enum SecurityScheme {
#[serde(rename = "apiKey")]
ApiKey {
name: String,
#[serde(rename = "in")]
location: String,
},
#[serde(rename = "http")]
Http {
scheme: String,
#[serde(rename = "bearerFormat")]
bearer_format: String,
},
#[serde(rename = "oauth2")]
OAuth2 { flows: Flows },
#[serde(rename = "openIdConnect")]
OpenIdConnect {
#[serde(rename = "openIdConnectUrl")]
open_id_connect_url: String,
},
}
/// Allows configuration of the supported OAuth Flows.
/// See [link]
/// [link][https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#oauth-flows-object]
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct Flows {
#[serde(skip_serializing_if = "Option::is_none")]
pub implicit: Option<ImplicitFlow>,
#[serde(skip_serializing_if = "Option::is_none")]
pub password: Option<PasswordFlow>,
#[serde(skip_serializing_if = "Option::is_none")]
pub client_credentials: Option<ClientCredentialsFlow>,
#[serde(skip_serializing_if = "Option::is_none")]
pub authorization_code: Option<AuthorizationCodeFlow>,
}
/// Configuration details for a implicit OAuth Flow
/// See [link]
/// [link](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#oauth-flow-object)
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct ImplicitFlow {
pub authorization_url: Url,
#[serde(skip_serializing_if = "Option::is_none")]
pub refresh_url: Option<Url>,
pub scopes: BTreeMap<String, String>,
}
/// Configuration details for a password OAuth Flow
/// See [link]
/// [link](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#oauth-flow-object
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct PasswordFlow {
token_url: Url,
#[serde(skip_serializing_if = "Option::is_none")]
pub refresh_url: Option<Url>,
pub scopes: BTreeMap<String, String>,
}
/// Configuration details for a client credentials OAuth Flow
/// See [link]
/// [link](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#oauth-flow-object
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct ClientCredentialsFlow {
token_url: Url,
#[serde(skip_serializing_if = "Option::is_none")]
pub refresh_url: Option<Url>,
pub scopes: BTreeMap<String, String>,
}
/// Configuration details for a authorization code OAuth Flow
/// See [link]
/// [link](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#oauth-flow-object
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct AuthorizationCodeFlow {
pub authorization_url: Url,
token_url: Url,
#[serde(skip_serializing_if = "Option::is_none")]
pub refresh_url: Option<Url>,
pub scopes: BTreeMap<String, String>,
}
// TODO: Implement
/// A map of possible out-of band callbacks related to the parent operation. Each value in
/// the map is a Path Item Object that describes a set of requests that may be initiated by
/// the API provider and the expected responses. The key value used to identify the callback
/// object is an expression, evaluated at runtime, that identifies a URL to use for the
/// callback operation.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#callbackObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Callback(
/// A Path Item Object used to define a callback request and expected responses.
serde_json::Value, // TODO: Add "Specification Extensions" https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#specificationExtensions}
);
// FIXME: Implement
// /// Allows configuration of the supported OAuth Flows.
// /// https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#oauthFlowsObject
// #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
// pub struct OAuthFlows {
// }
/// Adds metadata to a single tag that is used by the
/// [Operation Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#operationObject).
/// It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#tagObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Tag {
/// The name of the tag.
pub name: String,
/// A short description for the tag.
/// [CommonMark syntax](http://spec.commonmark.org/) MAY be used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
// /// Additional external documentation for this tag.
// #[serde(skip_serializing_if = "Option::is_none")]
// pub external_docs: Option<Vec<ExternalDoc>>,
// TODO: Add "Specification Extensions" https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#specificationExtensions}
}
/// Allows referencing an external resource for extended documentation.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#externalDocumentationObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
pub struct ExternalDoc {
/// The URL for the target documentation.
pub url: Url,
/// A short description of the target documentation.
/// [CommonMark syntax](http://spec.commonmark.org/) MAY be used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
// TODO: Add "Specification Extensions" https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#specificationExtensions}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_security_scheme_oauth_deser() {
const IMPLICIT_OAUTH2_SAMPLE: &str = r#"{
"type": "oauth2",
"flows": {
"implicit": {
"authorizationUrl": "https://example.com/api/oauth/dialog",
"scopes": {
"write:pets": "modify pets in your account",
"read:pets": "read your pets"
}
},
"authorizationCode": {
"authorizationUrl": "https://example.com/api/oauth/dialog",
"tokenUrl": "https://example.com/api/oauth/token",
"scopes": {
"write:pets": "modify pets in your account",
"read:pets": "read your pets"
}
}
}
}"#;
let obj: SecurityScheme = serde_json::from_str(&IMPLICIT_OAUTH2_SAMPLE).unwrap();
match obj {
SecurityScheme::OAuth2 { flows } => {
assert!(flows.implicit.is_some());
let implicit = flows.implicit.unwrap();
assert_eq!(
implicit.authorization_url,
Url::parse("https://example.com/api/oauth/dialog").unwrap()
);
assert!(implicit.scopes.contains_key("write:pets"));
assert!(implicit.scopes.contains_key("read:pets"));
assert!(flows.authorization_code.is_some());
let auth_code = flows.authorization_code.unwrap();
assert_eq!(
auth_code.authorization_url,
Url::parse("https://example.com/api/oauth/dialog").unwrap()
);
assert_eq!(
auth_code.token_url,
Url::parse("https://example.com/api/oauth/token").unwrap()
);
assert!(implicit.scopes.contains_key("write:pets"));
assert!(implicit.scopes.contains_key("read:pets"));
}
_ => assert!(false, "wrong security scheme type"),
}
}
}
| 49.803665 | 346 | 0.676251 |
3a5edef480385951b88b88bd2bafb280059f7ae4 | 8,210 | use std::convert::TryInto;
use beserial::{Deserialize, Serialize};
use nimiq_hash::{Blake2bHash, Blake2bHasher, HashOutput, Hasher};
use nimiq_keys::{Address, KeyPair, PrivateKey};
use nimiq_primitives::account::AccountType;
use nimiq_primitives::networks::NetworkId;
use nimiq_transaction::account::htlc_contract::{AnyHash, HashAlgorithm, ProofType};
use nimiq_transaction::{SignatureProof, Transaction};
use nimiq_transaction_builder::{Recipient, TransactionBuilder};
#[test]
#[allow(unused_must_use)]
fn it_can_create_creation_transaction() {
let mut data: Vec<u8> = Vec::with_capacity(Address::SIZE * 2 + AnyHash::SIZE + 10);
let sender = Address::from([0u8; 20]);
let recipient = Address::from([0u8; 20]);
sender.serialize(&mut data);
recipient.serialize(&mut data);
HashAlgorithm::Blake2b.serialize(&mut data);
AnyHash::from([0u8; 32]).serialize(&mut data);
Serialize::serialize(&2u8, &mut data);
Serialize::serialize(&1000u64, &mut data);
let transaction = Transaction::new_contract_creation(
data,
sender.clone(),
AccountType::Basic,
AccountType::HTLC,
100.try_into().unwrap(),
0.try_into().unwrap(),
0,
NetworkId::Dummy,
);
let mut htlc_builder = Recipient::new_htlc_builder();
htlc_builder
.with_sender(sender.clone())
.with_recipient(recipient)
.with_blake2b_hash(Blake2bHash::from([0u8; 32]), 2)
.with_timeout(1000);
let mut builder = TransactionBuilder::new();
builder
.with_sender(sender)
.with_recipient(htlc_builder.generate().unwrap())
.with_value(100.try_into().unwrap())
.with_validity_start_height(0)
.with_network_id(NetworkId::Dummy);
let result = builder
.generate()
.expect("Builder should be able to create transaction");
let result = result.unwrap_basic();
assert_eq!(result.transaction, transaction);
}
fn prepare_outgoing_transaction() -> (
Transaction,
AnyHash,
AnyHash,
KeyPair,
SignatureProof,
KeyPair,
SignatureProof,
) {
let sender_priv_key: PrivateKey = Deserialize::deserialize_from_vec(
&hex::decode("9d5bd02379e7e45cf515c788048f5cf3c454ffabd3e83bd1d7667716c325c3c0").unwrap(),
)
.unwrap();
let recipient_priv_key: PrivateKey = Deserialize::deserialize_from_vec(
&hex::decode("bd1cfcd49a81048c8c8d22a25766bd01bfa0f6b2eb0030f65241189393af96a2").unwrap(),
)
.unwrap();
let sender_key_pair = KeyPair::from(sender_priv_key);
let recipient_key_pair = KeyPair::from(recipient_priv_key);
let pre_image = AnyHash::from([1u8; 32]);
let hash_root = AnyHash::from(<[u8; 32]>::from(
Blake2bHasher::default().digest(
Blake2bHasher::default()
.digest(pre_image.as_bytes())
.as_bytes(),
),
));
let tx = Transaction::new_extended(
Address::from([0u8; 20]),
AccountType::HTLC,
Address::from([1u8; 20]),
AccountType::Basic,
1000.try_into().unwrap(),
0.try_into().unwrap(),
vec![],
1,
NetworkId::Dummy,
);
let sender_signature = sender_key_pair.sign(&tx.serialize_content()[..]);
let recipient_signature = recipient_key_pair.sign(&tx.serialize_content()[..]);
let sender_signature_proof = SignatureProof::from(sender_key_pair.public, sender_signature);
let recipient_signature_proof =
SignatureProof::from(recipient_key_pair.public, recipient_signature);
(
tx,
pre_image,
hash_root,
sender_key_pair,
sender_signature_proof,
recipient_key_pair,
recipient_signature_proof,
)
}
#[test]
#[allow(unused_must_use)]
fn it_can_create_regular_transfer() {
let (mut tx, pre_image, hash_root, _, _, recipient_key_pair, recipient_signature_proof) =
prepare_outgoing_transaction();
// regular: valid Blake-2b
let mut proof =
Vec::with_capacity(3 + 2 * AnyHash::SIZE + recipient_signature_proof.serialized_size());
Serialize::serialize(&ProofType::RegularTransfer, &mut proof);
Serialize::serialize(&HashAlgorithm::Blake2b, &mut proof);
Serialize::serialize(&1u8, &mut proof);
Serialize::serialize(&hash_root, &mut proof);
Serialize::serialize(&pre_image, &mut proof);
Serialize::serialize(&recipient_signature_proof, &mut proof);
tx.proof = proof;
let mut builder = TransactionBuilder::new();
builder
.with_sender(Address::from([0u8; 20]))
.with_sender_type(AccountType::HTLC)
.with_recipient(Recipient::new_basic(Address::from([1u8; 20])))
.with_value(1000.try_into().unwrap())
.with_fee(0.try_into().unwrap())
.with_validity_start_height(1)
.with_network_id(NetworkId::Dummy);
let proof_builder = builder
.generate()
.expect("Builder should be able to create transaction");
let mut proof_builder = proof_builder.unwrap_htlc();
let proof = proof_builder.signature_with_key_pair(&recipient_key_pair);
proof_builder.regular_transfer(HashAlgorithm::Blake2b, pre_image, 1, hash_root, proof);
let tx2 = proof_builder
.generate()
.expect("Builder should be able to create proof");
assert_eq!(tx2, tx);
}
#[test]
#[allow(unused_must_use)]
fn it_can_create_early_resolve() {
let (
mut tx,
_,
_,
sender_key_pair,
sender_signature_proof,
recipient_key_pair,
recipient_signature_proof,
) = prepare_outgoing_transaction();
// early resolve: valid
let mut proof = Vec::with_capacity(
1 + recipient_signature_proof.serialized_size() + sender_signature_proof.serialized_size(),
);
Serialize::serialize(&ProofType::EarlyResolve, &mut proof);
Serialize::serialize(&recipient_signature_proof, &mut proof);
Serialize::serialize(&sender_signature_proof, &mut proof);
tx.proof = proof;
let mut builder = TransactionBuilder::new();
builder
.with_sender(Address::from([0u8; 20]))
.with_sender_type(AccountType::HTLC)
.with_recipient(Recipient::new_basic(Address::from([1u8; 20])))
.with_value(1000.try_into().unwrap())
.with_fee(0.try_into().unwrap())
.with_validity_start_height(1)
.with_network_id(NetworkId::Dummy);
let proof_builder = builder
.generate()
.expect("Builder should be able to create transaction");
let mut proof_builder = proof_builder.unwrap_htlc();
let sender_proof = proof_builder.signature_with_key_pair(&sender_key_pair);
let recipient_proof = proof_builder.signature_with_key_pair(&recipient_key_pair);
proof_builder.early_resolve(sender_proof, recipient_proof);
let tx2 = proof_builder
.generate()
.expect("Builder should be able to create proof");
assert_eq!(tx2, tx);
}
#[test]
#[allow(unused_must_use)]
fn it_can_create_timeout_resolve() {
let (mut tx, _, _, sender_key_pair, sender_signature_proof, _, _) =
prepare_outgoing_transaction();
// timeout resolve: valid
let mut proof = Vec::with_capacity(1 + sender_signature_proof.serialized_size());
Serialize::serialize(&ProofType::TimeoutResolve, &mut proof);
Serialize::serialize(&sender_signature_proof, &mut proof);
tx.proof = proof;
let mut builder = TransactionBuilder::new();
builder
.with_sender(Address::from([0u8; 20]))
.with_sender_type(AccountType::HTLC)
.with_recipient(Recipient::new_basic(Address::from([1u8; 20])))
.with_value(1000.try_into().unwrap())
.with_fee(0.try_into().unwrap())
.with_validity_start_height(1)
.with_network_id(NetworkId::Dummy);
let proof_builder = builder
.generate()
.expect("Builder should be able to create transaction");
let mut proof_builder = proof_builder.unwrap_htlc();
let proof = proof_builder.signature_with_key_pair(&sender_key_pair);
proof_builder.timeout_resolve(proof);
let tx2 = proof_builder
.generate()
.expect("Builder should be able to create proof");
assert_eq!(tx2, tx);
}
| 35.236052 | 99 | 0.674909 |
f931b03ab30d814ea0f3cadbb8ec91b081c27cef | 817 | // https://codeforces.com/problemset/problem/160/A
use std::io;
fn main() {
let mut n = String::new();
io::stdin().read_line(&mut n).unwrap();
//let n: i64 = n.trim().parse().unwrap();
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
let mut a: Vec<i64> = line
.split_whitespace()
.map(|x| x.parse().unwrap())
.collect();
a.sort_by(|x, y| y.cmp(x));
let mut total = 0;
for item in a.clone() {
total += item;
}
let mut mine = 0;
let mut answer = 0;
for item in a {
let twin = total - mine;
if mine > twin {
break;
} else {
mine += item;
answer += 1;
}
}
println!("{}", answer);
}
| 19 | 51 | 0.452876 |
7975bda6218e9507500ef5c9e7ef6950fb2c4b97 | 2,031 | use failure::{format_err, Error};
use mixer_wrappers::{
oauth::{check_shortcode, get_shortcode, get_token_from_code, ShortcodeStatus},
REST,
};
use serde_json::Value;
use std::{thread, time::Duration};
const USERNAME: &str = "YOUR_USERNAME";
const CLIENT_ID: &str = "YOUR_CLIENT_ID";
const CLIENT_SECRET: &str = "CLIENT_SECRET";
fn get_access_token() -> Result<String, Error> {
let resp = get_shortcode(CLIENT_ID, CLIENT_SECRET, &["user:notification:self"]).unwrap();
println!("Code: {}, go to https://mixer.com/go to enter", resp.code);
let code: String;
loop {
let status = check_shortcode(&resp.handle);
let c = match status {
ShortcodeStatus::UserGrantedAccess(ref c) => c.to_owned(),
ShortcodeStatus::UserDeniedAccess => return Err(format_err!("UserDeniedAccess")),
ShortcodeStatus::HandleInvalid => return Err(format_err!("HandleInvalid")),
_ => {
thread::sleep(Duration::from_secs(5));
continue;
}
};
code = c;
break;
}
let token = get_token_from_code(
CLIENT_ID,
CLIENT_SECRET,
&["user:notification:self"],
"",
&code,
)
.unwrap();
Ok(token.access_token)
}
fn get_user_id(rest: &REST) -> Result<u64, Error> {
let text = rest.query(
"GET",
"users/search",
Some(&[("query", USERNAME), ("noCount", "true"), ("fields", "id")]),
None,
None,
)?;
let json: Value = serde_json::from_str(&text)?;
let id = json.as_array().unwrap()[0]["id"].as_u64().unwrap();
Ok(id)
}
fn main() {
let token = get_access_token().unwrap();
let rest = REST::new(CLIENT_ID);
let resp = rest
.query(
"GET",
&format!("users/{}/notifications", get_user_id(&rest).unwrap()),
Some(&[("limit", "5"), ("noCount", "true")]),
None,
Some(&token),
)
.unwrap();
println!("{}", resp);
}
| 29.434783 | 93 | 0.560807 |
1c03707b8ffdf146f1c9bb3c813198711652d7dc | 55 | pub mod board;
pub mod game;
pub mod pattern_selector;
| 13.75 | 25 | 0.781818 |
485248b44f66d916571f4acc50b367236937c0e2 | 19,823 | // Copyright 2019 The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::{
blocks::Block,
mempool::{
consts::{MEMPOOL_UNCONFIRMED_POOL_STORAGE_CAPACITY, MEMPOOL_UNCONFIRMED_POOL_WEIGHT_TRANSACTION_SKIP_COUNT},
priority::{FeePriority, PrioritizedTransaction},
unconfirmed_pool::UnconfirmedPoolError,
},
transactions::{transaction::Transaction, types::Signature},
};
use log::*;
use serde::{Deserialize, Serialize};
use std::{
collections::{BTreeMap, HashMap},
convert::TryFrom,
sync::Arc,
};
use tari_crypto::tari_utilities::hex::Hex;
pub const LOG_TARGET: &str = "c::mp::unconfirmed_pool::unconfirmed_pool_storage";
/// Configuration for the UnconfirmedPool
#[derive(Clone, Copy, Serialize, Deserialize)]
pub struct UnconfirmedPoolConfig {
/// The maximum number of transactions that can be stored in the Unconfirmed Transaction pool
pub storage_capacity: usize,
/// The maximum number of transactions that can be skipped when compiling a set of highest priority transactions,
/// skipping over large transactions are performed in an attempt to fit more transactions into the remaining space.
pub weight_tx_skip_count: usize,
}
impl Default for UnconfirmedPoolConfig {
fn default() -> Self {
Self {
storage_capacity: MEMPOOL_UNCONFIRMED_POOL_STORAGE_CAPACITY,
weight_tx_skip_count: MEMPOOL_UNCONFIRMED_POOL_WEIGHT_TRANSACTION_SKIP_COUNT,
}
}
}
/// The Unconfirmed Transaction Pool consists of all unconfirmed transactions that are ready to be included in a block
/// and they are prioritised according to the priority metric.
/// The txs_by_signature HashMap is used to find a transaction using its excess_sig, this functionality is used to match
/// transactions included in blocks with transactions stored in the pool. The txs_by_priority BTreeMap prioritise the
/// transactions in the pool according to TXPriority, it allows transactions to be inserted in sorted order by their
/// priority. The txs_by_priority BTreeMap makes it easier to select the set of highest priority transactions that can
/// be included in a block. The excess_sig of a transaction is used a key to uniquely identify a specific transaction in
/// these containers.
pub struct UnconfirmedPool {
config: UnconfirmedPoolConfig,
txs_by_signature: HashMap<Signature, PrioritizedTransaction>,
txs_by_priority: BTreeMap<FeePriority, Signature>,
}
impl UnconfirmedPool {
/// Create a new UnconfirmedPool with the specified configuration
pub fn new(config: UnconfirmedPoolConfig) -> Self {
Self {
config,
txs_by_signature: HashMap::new(),
txs_by_priority: BTreeMap::new(),
}
}
fn lowest_priority(&self) -> &FeePriority {
self.txs_by_priority.iter().next().unwrap().0
}
fn remove_lowest_priority_tx(&mut self) {
if let Some((priority, sig)) = self.txs_by_priority.iter().next().map(|(p, s)| (p.clone(), s.clone())) {
self.txs_by_signature.remove(&sig);
self.txs_by_priority.remove(&priority);
}
}
/// Insert a new transaction into the UnconfirmedPool. Low priority transactions will be removed to make space for
/// higher priority transactions. The lowest priority transactions will be removed when the maximum capacity is
/// reached and the new transaction has a higher priority than the currently stored lowest priority transaction.
#[allow(clippy::map_entry)]
pub fn insert(&mut self, tx: Arc<Transaction>) -> Result<(), UnconfirmedPoolError> {
let tx_key = tx.body.kernels()[0].excess_sig.clone();
if !self.txs_by_signature.contains_key(&tx_key) {
debug!(
target: LOG_TARGET,
"Inserting tx into unconfirmed pool: {}",
tx_key.get_signature().to_hex()
);
trace!(target: LOG_TARGET, "Transaction inserted: {}", tx);
let prioritized_tx = PrioritizedTransaction::try_from((*tx).clone())?;
if self.txs_by_signature.len() >= self.config.storage_capacity {
if prioritized_tx.priority < *self.lowest_priority() {
return Ok(());
}
self.remove_lowest_priority_tx();
}
self.txs_by_priority
.insert(prioritized_tx.priority.clone(), tx_key.clone());
self.txs_by_signature.insert(tx_key, prioritized_tx);
}
Ok(())
}
/// Insert a set of new transactions into the UnconfirmedPool
pub fn insert_txs(&mut self, txs: Vec<Arc<Transaction>>) -> Result<(), UnconfirmedPoolError> {
for tx in txs.into_iter() {
self.insert(tx)?;
}
Ok(())
}
/// Check if a transaction is available in the UnconfirmedPool
pub fn has_tx_with_excess_sig(&self, excess_sig: &Signature) -> bool {
self.txs_by_signature.contains_key(excess_sig)
}
/// Returns a set of the highest priority unconfirmed transactions, that can be included in a block
pub fn highest_priority_txs(&self, total_weight: u64) -> Result<Vec<Arc<Transaction>>, UnconfirmedPoolError> {
let mut selected_txs: Vec<Arc<Transaction>> = Vec::new();
let mut curr_weight: u64 = 0;
let mut curr_skip_count: usize = 0;
for (_, tx_key) in self.txs_by_priority.iter().rev() {
let ptx = self
.txs_by_signature
.get(tx_key)
.ok_or_else(|| UnconfirmedPoolError::StorageOutofSync)?;
if curr_weight + ptx.weight <= total_weight {
if !UnconfirmedPool::find_duplicate_input(&selected_txs, &ptx.transaction) {
curr_weight += ptx.weight;
selected_txs.push(ptx.transaction.clone());
}
} else {
// Check if some the next few txs with slightly lower priority wont fit in the remaining space.
curr_skip_count += 1;
if curr_skip_count >= self.config.weight_tx_skip_count {
break;
}
}
}
Ok(selected_txs)
}
// This will search a Vec<Arc<Transaction>> for duplicate inputs of a tx
fn find_duplicate_input(array_of_tx: &[Arc<Transaction>], tx: &Arc<Transaction>) -> bool {
for transaction in array_of_tx {
for input in transaction.body.inputs() {
if tx.body.inputs().contains(input) {
return true;
}
}
}
false
}
/// Remove all published transactions from the UnconfirmedPool and discard all double spend transactions.
/// Returns a list of all transactions that were removed the unconfirmed pool as a result of appearing in the block.
fn discard_double_spends(&mut self, published_block: &Block) {
let mut removed_tx_keys: Vec<Signature> = Vec::new();
for (tx_key, ptx) in self.txs_by_signature.iter() {
for input in ptx.transaction.body.inputs() {
if published_block.body.inputs().contains(input) {
self.txs_by_priority.remove(&ptx.priority);
removed_tx_keys.push(tx_key.clone());
}
}
}
for tx_key in &removed_tx_keys {
trace!(
target: LOG_TARGET,
"Removing double spends from unconfirmed pool: {:?}",
tx_key
);
self.txs_by_signature.remove(&tx_key);
}
}
/// Remove all published transactions from the UnconfirmedPoolStorage and discard double spends
pub fn remove_published_and_discard_double_spends(&mut self, published_block: &Block) -> Vec<Arc<Transaction>> {
let mut removed_txs: Vec<Arc<Transaction>> = Vec::new();
published_block.body.kernels().iter().for_each(|kernel| {
if let Some(ptx) = self.txs_by_signature.get(&kernel.excess_sig) {
self.txs_by_priority.remove(&ptx.priority);
if let Some(ptx) = self.txs_by_signature.remove(&kernel.excess_sig) {
removed_txs.push(ptx.transaction);
}
}
});
// First remove published transactions before discarding double spends
self.discard_double_spends(published_block);
removed_txs
}
/// Remove all unconfirmed transactions that have become time locked. This can happen when the chain height was
/// reduced on some reorgs.
pub fn remove_timelocked(&mut self, tip_height: u64) -> Vec<Arc<Transaction>> {
let mut removed_tx_keys: Vec<Signature> = Vec::new();
for (tx_key, ptx) in self.txs_by_signature.iter() {
if ptx.transaction.min_spendable_height() > tip_height + 1 {
self.txs_by_priority.remove(&ptx.priority);
removed_tx_keys.push(tx_key.clone());
}
}
let mut removed_txs: Vec<Arc<Transaction>> = Vec::new();
for tx_key in removed_tx_keys {
trace!(
target: LOG_TARGET,
"Removing time locked transaction from unconfirmed pool: {:?}",
tx_key
);
if let Some(ptx) = self.txs_by_signature.remove(&tx_key) {
removed_txs.push(ptx.transaction);
}
}
removed_txs
}
/// Returns the total number of unconfirmed transactions stored in the UnconfirmedPool.
pub fn len(&self) -> usize {
self.txs_by_signature.len()
}
/// Returns all transaction stored in the UnconfirmedPool.
pub fn snapshot(&self) -> Vec<Arc<Transaction>> {
self.txs_by_signature
.iter()
.map(|(_, ptx)| ptx.transaction.clone())
.collect()
}
/// Returns the total weight of all transactions stored in the pool.
pub fn calculate_weight(&self) -> u64 {
self.txs_by_signature
.iter()
.fold(0, |weight, (_, ptx)| weight + ptx.transaction.calculate_weight())
}
#[cfg(test)]
/// Checks the consistency status of the Hashmap and BtreeMap
pub fn check_status(&self) -> bool {
if self.txs_by_priority.len() != self.txs_by_signature.len() {
return false;
}
self.txs_by_priority
.iter()
.all(|(_, tx_key)| self.txs_by_signature.contains_key(tx_key))
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::{consensus::Network, helpers::create_orphan_block, transactions::tari_amount::MicroTari, tx};
#[test]
fn test_insert_and_retrieve_highest_priority_txs() {
let tx1 = Arc::new(tx!(MicroTari(5_000), fee: MicroTari(50), inputs: 2, outputs: 1).0);
let tx2 = Arc::new(tx!(MicroTari(5_000), fee: MicroTari(20), inputs: 4, outputs: 1).0);
let tx3 = Arc::new(tx!(MicroTari(5_000), fee: MicroTari(100), inputs: 5, outputs: 1).0);
let tx4 = Arc::new(tx!(MicroTari(5_000), fee: MicroTari(30), inputs: 3, outputs: 1).0);
let tx5 = Arc::new(tx!(MicroTari(5_000), fee: MicroTari(55), inputs: 5, outputs: 1).0);
let mut unconfirmed_pool = UnconfirmedPool::new(UnconfirmedPoolConfig {
storage_capacity: 4,
weight_tx_skip_count: 3,
});
unconfirmed_pool
.insert_txs(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone(), tx5.clone()])
.unwrap();
// Check that lowest priority tx was removed to make room for new incoming transactions
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig),
true
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig),
false
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig),
true
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig),
true
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig),
true
);
// Retrieve the set of highest priority unspent transactions
let desired_weight = tx1.calculate_weight() + tx3.calculate_weight() + tx5.calculate_weight();
let selected_txs = unconfirmed_pool.highest_priority_txs(desired_weight).unwrap();
assert_eq!(selected_txs.len(), 3);
assert!(selected_txs.contains(&tx1));
assert!(selected_txs.contains(&tx3));
assert!(selected_txs.contains(&tx5));
// Note that transaction tx5 could not be included as its weight was to big to fit into the remaining allocated
// space, the second best transaction was then included
assert!(unconfirmed_pool.check_status());
}
#[test]
fn test_remove_published_txs() {
let network = Network::LocalNet;
let consensus_constants = network.create_consensus_constants();
let tx1 = Arc::new(tx!(MicroTari(10_000), fee: MicroTari(50), inputs:2, outputs: 1).0);
let tx2 = Arc::new(tx!(MicroTari(10_000), fee: MicroTari(20), inputs:3, outputs: 1).0);
let tx3 = Arc::new(tx!(MicroTari(10_000), fee: MicroTari(100), inputs:2, outputs: 1).0);
let tx4 = Arc::new(tx!(MicroTari(10_000), fee: MicroTari(30), inputs:4, outputs: 1).0);
let tx5 = Arc::new(tx!(MicroTari(10_000), fee: MicroTari(50), inputs:3, outputs: 1).0);
let tx6 = Arc::new(tx!(MicroTari(10_000), fee: MicroTari(75), inputs:2, outputs: 1).0);
let mut unconfirmed_pool = UnconfirmedPool::new(UnconfirmedPoolConfig {
storage_capacity: 10,
weight_tx_skip_count: 3,
});
unconfirmed_pool
.insert_txs(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone(), tx5.clone()])
.unwrap();
// utx6 should not be added to unconfirmed_pool as it is an unknown transactions that was included in the block
// by another node
let snapshot_txs = unconfirmed_pool.snapshot();
assert_eq!(snapshot_txs.len(), 5);
assert!(snapshot_txs.contains(&tx1));
assert!(snapshot_txs.contains(&tx2));
assert!(snapshot_txs.contains(&tx3));
assert!(snapshot_txs.contains(&tx4));
assert!(snapshot_txs.contains(&tx5));
let published_block = create_orphan_block(
0,
vec![(*tx1).clone(), (*tx3).clone(), (*tx5).clone()],
&consensus_constants,
);
let _ = unconfirmed_pool.remove_published_and_discard_double_spends(&published_block);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig),
false
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig),
true
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig),
false
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig),
true
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig),
false
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig),
false
);
assert!(unconfirmed_pool.check_status());
}
#[test]
fn test_discard_double_spend_txs() {
let network = Network::LocalNet;
let consensus_constants = network.create_consensus_constants();
let tx1 = Arc::new(tx!(MicroTari(5_000), fee: MicroTari(50), inputs:2, outputs:1).0);
let tx2 = Arc::new(tx!(MicroTari(5_000), fee: MicroTari(20), inputs:3, outputs:1).0);
let tx3 = Arc::new(tx!(MicroTari(5_000), fee: MicroTari(100), inputs:2, outputs:1).0);
let tx4 = Arc::new(tx!(MicroTari(5_000), fee: MicroTari(30), inputs:2, outputs:1).0);
let mut tx5 = tx!(MicroTari(5_000), fee:MicroTari(50), inputs:3, outputs:1).0;
let mut tx6 = tx!(MicroTari(5_000), fee:MicroTari(75), inputs: 2, outputs: 1).0;
// tx1 and tx5 have a shared input. Also, tx3 and tx6 have a shared input
tx5.body.inputs_mut()[0] = tx1.body.inputs()[0].clone();
tx6.body.inputs_mut()[1] = tx3.body.inputs()[1].clone();
let tx5 = Arc::new(tx5);
let tx6 = Arc::new(tx6);
let mut unconfirmed_pool = UnconfirmedPool::new(UnconfirmedPoolConfig {
storage_capacity: 10,
weight_tx_skip_count: 3,
});
unconfirmed_pool
.insert_txs(vec![
tx1.clone(),
tx2.clone(),
tx3.clone(),
tx4.clone(),
tx5.clone(),
tx6.clone(),
])
.unwrap();
// The publishing of tx1 and tx3 will be double-spends and orphan tx5 and tx6
let published_block = create_orphan_block(
0,
vec![(*tx1).clone(), (*tx2).clone(), (*tx3).clone()],
&consensus_constants,
);
let _ = unconfirmed_pool.remove_published_and_discard_double_spends(&published_block); // Double spends are discarded
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig),
false
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig),
false
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig),
false
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig),
true
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig),
false
);
assert_eq!(
unconfirmed_pool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig),
false
);
assert!(unconfirmed_pool.check_status());
}
}
| 43.281659 | 125 | 0.62942 |
ed11cfb1172cc02ef7a114a3259c1a4d17e549db | 34,504 | use {
crate::leader_slot_banking_stage_timing_metrics::*,
solana_poh::poh_recorder::BankStart,
solana_runtime::transaction_error_metrics::*,
solana_sdk::{clock::Slot, saturating_add_assign},
std::time::Instant,
};
/// A summary of what happened to transactions passed to the execution pipeline.
/// Transactions can
/// 1) Did not even make it to execution due to being filtered out by things like AccountInUse
/// lock conflicts or CostModel compute limits. These types of errors are retryable and
/// counted in `Self::retryable_transaction_indexes`.
/// 2) Did not execute due to some fatal error like too old, or duplicate signature. These
/// will be dropped from the transactions queue and not counted in `Self::retryable_transaction_indexes`
/// 3) Were executed and committed, captured by `committed_transactions_count` below.
/// 4) Were executed and failed commit, captured by `failed_commit_count` below.
pub(crate) struct ProcessTransactionsSummary {
// Returns true if we hit the end of the block/max PoH height for the block before
// processing all the transactions in the batch.
pub reached_max_poh_height: bool,
// Total number of transactions that were passed as candidates for execution. See description
// of struct above for possible outcomes for these transactions
pub transactions_attempted_execution_count: usize,
// Total number of transactions that made it into the block
pub committed_transactions_count: usize,
// Total number of transactions that made it into the block where the transactions
// output from execution was success/no error.
pub committed_transactions_with_successful_result_count: usize,
// All transactions that were executed but then failed record because the
// slot ended
pub failed_commit_count: usize,
// Indexes of transactions in the transactions slice that were not committed but are retryable
pub retryable_transaction_indexes: Vec<usize>,
// The number of transactions filtered out by the cost model
pub cost_model_throttled_transactions_count: usize,
// Total amount of time spent running the cost model
pub cost_model_us: u64,
// Breakdown of time spent executing and comitting transactions
pub execute_and_commit_timings: LeaderExecuteAndCommitTimings,
// Breakdown of all the transaction errors from transactions passed for execution
pub error_counters: TransactionErrorMetrics,
}
// Metrics describing packets ingested/processed in various parts of BankingStage during this
// validator's leader slot
#[derive(Debug, Default)]
struct LeaderSlotPacketCountMetrics {
// total number of live packets TPU received from verified receiver for processing.
total_new_valid_packets: u64,
// total number of packets TPU received from sigverify that failed signature verification.
newly_failed_sigverify_count: u64,
// total number of dropped packet due to the thread's buffered packets capacity being reached.
exceeded_buffer_limit_dropped_packets_count: u64,
// total number of packets that got added to the pending buffer after arriving to BankingStage
newly_buffered_packets_count: u64,
// total number of transactions in the buffer that were filtered out due to things like age and
// duplicate signature checks
retryable_packets_filtered_count: u64,
// total number of transactions that attempted execution in this slot. Should equal the sum
// of `committed_transactions_count`, `retryable_errored_transaction_count`, and
// `nonretryable_errored_transactions_count`.
transactions_attempted_execution_count: u64,
// total number of transactions that were executed and committed into the block
// on this thread
committed_transactions_count: u64,
// total number of transactions that were executed, got a successful execution output/no error,
// and were then committed into the block
committed_transactions_with_successful_result_count: u64,
// total number of transactions that were not executed or failed commit, BUT were added back to the buffered
// queue becaus they were retryable errors
retryable_errored_transaction_count: u64,
// The size of the unprocessed buffer at the end of the slot
end_of_slot_unprocessed_buffer_len: u64,
// total number of transactions that were rebuffered into the queue after not being
// executed on a previous pass
retryable_packets_count: u64,
// total number of transactions that attempted execution due to some fatal error (too old, duplicate signature, etc.)
// AND were dropped from the buffered queue
nonretryable_errored_transactions_count: u64,
// total number of transactions that were executed, but failed to be committed into the Poh stream because
// the block ended. Some of these may be already counted in `nonretryable_errored_transactions_count` if they
// then hit the age limit after failing to be comitted.
executed_transactions_failed_commit_count: u64,
// total number of transactions that were excluded from the block because they were too expensive
// according to the cost model. These transactions are added back to the buffered queue and are
// already counted in `self.retrayble_errored_transaction_count`.
cost_model_throttled_transactions_count: u64,
// total number of forwardsable packets that failed forwarding
failed_forwarded_packets_count: u64,
// total number of forwardsable packets that were successfully forwarded
successful_forwarded_packets_count: u64,
// total number of attempted forwards that failed. Note this is not a count of the number of packets
// that failed, just the total number of batches of packets that failed forwarding
packet_batch_forward_failure_count: u64,
// total number of valid unprocessed packets in the buffer that were removed after being forwarded
cleared_from_buffer_after_forward_count: u64,
// total number of packets removed at the end of the slot due to being too old, duplicate, etc.
end_of_slot_filtered_invalid_count: u64,
}
impl LeaderSlotPacketCountMetrics {
fn new() -> Self {
Self { ..Self::default() }
}
fn report(&self, id: u32, slot: Slot) {
datapoint_info!(
"banking_stage-leader_slot_packet_counts",
("id", id as i64, i64),
("slot", slot as i64, i64),
(
"total_new_valid_packets",
self.total_new_valid_packets as i64,
i64
),
(
"newly_failed_sigverify_count",
self.newly_failed_sigverify_count as i64,
i64
),
(
"exceeded_buffer_limit_dropped_packets_count",
self.exceeded_buffer_limit_dropped_packets_count as i64,
i64
),
(
"newly_buffered_packets_count",
self.newly_buffered_packets_count as i64,
i64
),
(
"retryable_packets_filtered_count",
self.retryable_packets_filtered_count as i64,
i64
),
(
"transactions_attempted_execution_count",
self.transactions_attempted_execution_count as i64,
i64
),
(
"committed_transactions_count",
self.committed_transactions_count as i64,
i64
),
(
"committed_transactions_with_successful_result_count",
self.committed_transactions_with_successful_result_count as i64,
i64
),
(
"retryable_errored_transaction_count",
self.retryable_errored_transaction_count as i64,
i64
),
(
"retryable_packets_count",
self.retryable_packets_count as i64,
i64
),
(
"nonretryable_errored_transactions_count",
self.nonretryable_errored_transactions_count as i64,
i64
),
(
"executed_transactions_failed_commit_count",
self.executed_transactions_failed_commit_count as i64,
i64
),
(
"cost_model_throttled_transactions_count",
self.cost_model_throttled_transactions_count as i64,
i64
),
(
"failed_forwarded_packets_count",
self.failed_forwarded_packets_count as i64,
i64
),
(
"successful_forwarded_packets_count",
self.successful_forwarded_packets_count as i64,
i64
),
(
"packet_batch_forward_failure_count",
self.packet_batch_forward_failure_count as i64,
i64
),
(
"cleared_from_buffer_after_forward_count",
self.cleared_from_buffer_after_forward_count as i64,
i64
),
(
"end_of_slot_filtered_invalid_count",
self.end_of_slot_filtered_invalid_count as i64,
i64
),
(
"end_of_slot_unprocessed_buffer_len",
self.end_of_slot_unprocessed_buffer_len as i64,
i64
),
);
}
}
#[derive(Debug)]
pub(crate) struct LeaderSlotMetrics {
// banking_stage creates one QosService instance per working threads, that is uniquely
// identified by id. This field allows to categorize metrics for gossip votes, TPU votes
// and other transactions.
id: u32,
// aggregate metrics per slot
slot: Slot,
packet_count_metrics: LeaderSlotPacketCountMetrics,
transaction_error_metrics: TransactionErrorMetrics,
timing_metrics: LeaderSlotTimingMetrics,
// Used by tests to check if the `self.report()` method was called
is_reported: bool,
}
impl LeaderSlotMetrics {
pub(crate) fn new(id: u32, slot: Slot, bank_creation_time: &Instant) -> Self {
Self {
id,
slot,
packet_count_metrics: LeaderSlotPacketCountMetrics::new(),
transaction_error_metrics: TransactionErrorMetrics::new(),
timing_metrics: LeaderSlotTimingMetrics::new(bank_creation_time),
is_reported: false,
}
}
pub(crate) fn report(&mut self) {
self.is_reported = true;
self.timing_metrics.report(self.id, self.slot);
self.transaction_error_metrics.report(self.id, self.slot);
self.packet_count_metrics.report(self.id, self.slot);
}
/// Returns `Some(self.slot)` if the metrics have been reported, otherwise returns None
fn reported_slot(&self) -> Option<Slot> {
if self.is_reported {
Some(self.slot)
} else {
None
}
}
}
#[derive(Debug)]
pub struct LeaderSlotMetricsTracker {
// Only `Some` if BankingStage detects it's time to construct our leader slot,
// otherwise `None`
leader_slot_metrics: Option<LeaderSlotMetrics>,
id: u32,
}
impl LeaderSlotMetricsTracker {
pub fn new(id: u32) -> Self {
Self {
leader_slot_metrics: None,
id,
}
}
// Returns reported slot if metrics were reported
pub(crate) fn update_on_leader_slot_boundary(
&mut self,
bank_start: &Option<BankStart>,
) -> Option<Slot> {
match (self.leader_slot_metrics.as_mut(), bank_start) {
(None, None) => None,
(Some(leader_slot_metrics), None) => {
leader_slot_metrics.report();
// Ensure tests catch that `report()` method was called
let reported_slot = leader_slot_metrics.reported_slot();
// Slot has ended, time to report metrics
self.leader_slot_metrics = None;
reported_slot
}
(None, Some(bank_start)) => {
// Our leader slot has begain, time to create a new slot tracker
self.leader_slot_metrics = Some(LeaderSlotMetrics::new(
self.id,
bank_start.working_bank.slot(),
&bank_start.bank_creation_time,
));
self.leader_slot_metrics.as_ref().unwrap().reported_slot()
}
(Some(leader_slot_metrics), Some(bank_start)) => {
if leader_slot_metrics.slot != bank_start.working_bank.slot() {
// Last slot has ended, new slot has began
leader_slot_metrics.report();
// Ensure tests catch that `report()` method was called
let reported_slot = leader_slot_metrics.reported_slot();
self.leader_slot_metrics = Some(LeaderSlotMetrics::new(
self.id,
bank_start.working_bank.slot(),
&bank_start.bank_creation_time,
));
reported_slot
} else {
leader_slot_metrics.reported_slot()
}
}
}
}
pub(crate) fn accumulate_process_transactions_summary(
&mut self,
process_transactions_summary: &ProcessTransactionsSummary,
) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
let ProcessTransactionsSummary {
transactions_attempted_execution_count,
committed_transactions_count,
committed_transactions_with_successful_result_count,
failed_commit_count,
ref retryable_transaction_indexes,
cost_model_throttled_transactions_count,
cost_model_us,
ref execute_and_commit_timings,
..
} = process_transactions_summary;
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.transactions_attempted_execution_count,
*transactions_attempted_execution_count as u64
);
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.committed_transactions_count,
*committed_transactions_count as u64
);
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.committed_transactions_with_successful_result_count,
*committed_transactions_with_successful_result_count as u64
);
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.executed_transactions_failed_commit_count,
*failed_commit_count as u64
);
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.retryable_errored_transaction_count,
retryable_transaction_indexes.len() as u64
);
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.nonretryable_errored_transactions_count,
transactions_attempted_execution_count
.saturating_sub(*committed_transactions_count)
.saturating_sub(retryable_transaction_indexes.len()) as u64
);
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.cost_model_throttled_transactions_count,
*cost_model_throttled_transactions_count as u64
);
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_packets_timings
.cost_model_us,
*cost_model_us as u64
);
leader_slot_metrics
.timing_metrics
.execute_and_commit_timings
.accumulate(execute_and_commit_timings);
}
}
pub(crate) fn accumulate_transaction_errors(
&mut self,
error_metrics: &TransactionErrorMetrics,
) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
leader_slot_metrics
.transaction_error_metrics
.accumulate(error_metrics);
}
}
// Packet inflow/outflow/processing metrics
pub(crate) fn increment_total_new_valid_packets(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.total_new_valid_packets,
count
);
}
}
pub(crate) fn increment_newly_failed_sigverify_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.newly_failed_sigverify_count,
count
);
}
}
pub(crate) fn increment_exceeded_buffer_limit_dropped_packets_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.exceeded_buffer_limit_dropped_packets_count,
count
);
}
}
pub(crate) fn increment_newly_buffered_packets_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.newly_buffered_packets_count,
count
);
}
}
pub(crate) fn increment_retryable_packets_filtered_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.retryable_packets_filtered_count,
count
);
}
}
pub(crate) fn increment_failed_forwarded_packets_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.failed_forwarded_packets_count,
count
);
}
}
pub(crate) fn increment_successful_forwarded_packets_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.successful_forwarded_packets_count,
count
);
}
}
pub(crate) fn increment_packet_batch_forward_failure_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.packet_batch_forward_failure_count,
count
);
}
}
pub(crate) fn increment_cleared_from_buffer_after_forward_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.cleared_from_buffer_after_forward_count,
count
);
}
}
pub(crate) fn increment_retryable_packets_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.retryable_packets_count,
count
);
}
}
pub(crate) fn increment_end_of_slot_filtered_invalid_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.end_of_slot_filtered_invalid_count,
count
);
}
}
pub(crate) fn set_end_of_slot_unprocessed_buffer_len(&mut self, len: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
leader_slot_metrics
.packet_count_metrics
.end_of_slot_unprocessed_buffer_len = len;
}
}
// Outermost banking thread's loop timing metrics
pub(crate) fn increment_process_buffered_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.outer_loop_timings
.process_buffered_packets_us,
us
);
}
}
pub(crate) fn increment_slot_metrics_check_slot_boundary_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.outer_loop_timings
.slot_metrics_check_slot_boundary_us,
us
);
}
}
pub(crate) fn increment_receive_and_buffer_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.outer_loop_timings
.receive_and_buffer_packets_us,
us
);
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.outer_loop_timings
.receive_and_buffer_packets_invoked_count,
1
);
}
}
// Processing buffer timing metrics
pub(crate) fn increment_make_decision_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_buffered_packets_timings
.make_decision_us,
us
);
}
}
pub(crate) fn increment_consume_buffered_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_buffered_packets_timings
.consume_buffered_packets_us,
us
);
}
}
pub(crate) fn increment_forward_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_buffered_packets_timings
.forward_us,
us
);
}
}
pub(crate) fn increment_forward_and_hold_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_buffered_packets_timings
.forward_and_hold_us,
us
);
}
}
// Consuming buffered packets timing metrics
pub(crate) fn increment_end_of_slot_filtering_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.consume_buffered_packets_timings
.end_of_slot_filtering_us,
us
);
}
}
pub(crate) fn increment_consume_buffered_packets_poh_recorder_lock_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.consume_buffered_packets_timings
.poh_recorder_lock_us,
us
);
}
}
pub(crate) fn increment_process_packets_transactions_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.consume_buffered_packets_timings
.process_packets_transactions_us,
us
);
}
}
// Processing packets timing metrics
pub(crate) fn increment_transactions_from_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_packets_timings
.transactions_from_packets_us,
us
);
}
}
pub(crate) fn increment_process_transactions_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_packets_timings
.process_transactions_us,
us
);
}
}
pub(crate) fn increment_filter_retryable_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_packets_timings
.filter_retryable_packets_us,
us
);
}
}
}
#[cfg(test)]
mod tests {
use {
super::*,
solana_runtime::{bank::Bank, genesis_utils::create_genesis_config},
solana_sdk::pubkey::Pubkey,
std::sync::Arc,
};
struct TestSlotBoundaryComponents {
first_bank: Arc<Bank>,
first_poh_recorder_bank: BankStart,
next_bank: Arc<Bank>,
next_poh_recorder_bank: BankStart,
leader_slot_metrics_tracker: LeaderSlotMetricsTracker,
}
fn setup_test_slot_boundary_banks() -> TestSlotBoundaryComponents {
let genesis = create_genesis_config(10);
let first_bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config));
let first_poh_recorder_bank = BankStart {
working_bank: first_bank.clone(),
bank_creation_time: Arc::new(Instant::now()),
};
// Create a child descended from the first bank
let next_bank = Arc::new(Bank::new_from_parent(
&first_bank,
&Pubkey::new_unique(),
first_bank.slot() + 1,
));
let next_poh_recorder_bank = BankStart {
working_bank: next_bank.clone(),
bank_creation_time: Arc::new(Instant::now()),
};
let banking_stage_thread_id = 0;
let leader_slot_metrics_tracker = LeaderSlotMetricsTracker::new(banking_stage_thread_id);
TestSlotBoundaryComponents {
first_bank,
first_poh_recorder_bank,
next_bank,
next_poh_recorder_bank,
leader_slot_metrics_tracker,
}
}
#[test]
pub fn test_update_on_leader_slot_boundary_not_leader_to_not_leader() {
let TestSlotBoundaryComponents {
mut leader_slot_metrics_tracker,
..
} = setup_test_slot_boundary_banks();
// Test that with no bank being tracked, and no new bank being tracked, nothing is reported
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&None)
.is_none());
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none());
}
#[test]
pub fn test_update_on_leader_slot_boundary_not_leader_to_leader() {
let TestSlotBoundaryComponents {
first_poh_recorder_bank,
mut leader_slot_metrics_tracker,
..
} = setup_test_slot_boundary_banks();
// Test case where the thread has not detected a leader bank, and now sees a leader bank.
// Metrics should not be reported because leader slot has not ended
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none());
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(first_poh_recorder_bank))
.is_none());
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_some());
}
#[test]
pub fn test_update_on_leader_slot_boundary_leader_to_not_leader() {
let TestSlotBoundaryComponents {
first_bank,
first_poh_recorder_bank,
mut leader_slot_metrics_tracker,
..
} = setup_test_slot_boundary_banks();
// Test case where the thread has a leader bank, and now detects there's no more leader bank,
// implying the slot has ended. Metrics should be reported for `first_bank.slot()`,
// because that leader slot has just ended.
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(first_poh_recorder_bank))
.is_none());
assert_eq!(
leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&None)
.unwrap(),
first_bank.slot()
);
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none());
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&None)
.is_none());
}
#[test]
pub fn test_update_on_leader_slot_boundary_leader_to_leader_same_slot() {
let TestSlotBoundaryComponents {
first_bank,
first_poh_recorder_bank,
mut leader_slot_metrics_tracker,
..
} = setup_test_slot_boundary_banks();
// Test case where the thread has a leader bank, and now detects the same leader bank,
// implying the slot is still running. Metrics should not be reported
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(first_poh_recorder_bank.clone()))
.is_none());
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(first_poh_recorder_bank))
.is_none());
assert_eq!(
leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&None)
.unwrap(),
first_bank.slot()
);
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none());
}
#[test]
pub fn test_update_on_leader_slot_boundary_leader_to_leader_bigger_slot() {
let TestSlotBoundaryComponents {
first_bank,
first_poh_recorder_bank,
next_bank,
next_poh_recorder_bank,
mut leader_slot_metrics_tracker,
} = setup_test_slot_boundary_banks();
// Test case where the thread has a leader bank, and now detects there's a new leader bank
// for a bigger slot, implying the slot has ended. Metrics should be reported for the
// smaller slot
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(first_poh_recorder_bank))
.is_none());
assert_eq!(
leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(next_poh_recorder_bank))
.unwrap(),
first_bank.slot()
);
assert_eq!(
leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&None)
.unwrap(),
next_bank.slot()
);
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none());
}
#[test]
pub fn test_update_on_leader_slot_boundary_leader_to_leader_smaller_slot() {
let TestSlotBoundaryComponents {
first_bank,
first_poh_recorder_bank,
next_bank,
next_poh_recorder_bank,
mut leader_slot_metrics_tracker,
} = setup_test_slot_boundary_banks();
// Test case where the thread has a leader bank, and now detects there's a new leader bank
// for a samller slot, implying the slot has ended. Metrics should be reported for the
// bigger slot
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(next_poh_recorder_bank))
.is_none());
assert_eq!(
leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(first_poh_recorder_bank))
.unwrap(),
next_bank.slot()
);
assert_eq!(
leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&None)
.unwrap(),
first_bank.slot()
);
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none());
}
}
| 36.942184 | 121 | 0.607843 |
0ea0d5d1b0c19118753428f99b17017a6e64dfb5 | 1,163 | use rustc_hir as hir;
use rustc_middle::ty::{self, CanonicalUserType, TyCtxt, UserType};
crate trait UserAnnotatedTyHelpers<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx>;
fn tables(&self) -> &ty::TypeckTables<'tcx>;
/// Looks up the type associated with this hir-id and applies the
/// user-given substitutions; the hir-id must map to a suitable
/// type.
fn user_substs_applied_to_ty_of_hir_id(
&self,
hir_id: hir::HirId,
) -> Option<CanonicalUserType<'tcx>> {
let user_provided_types = self.tables().user_provided_types();
let mut user_ty = *user_provided_types.get(hir_id)?;
debug!("user_subts_applied_to_ty_of_hir_id: user_ty={:?}", user_ty);
let ty = self.tables().node_type(hir_id);
match ty.kind {
ty::Adt(adt_def, ..) => {
if let UserType::TypeOf(ref mut did, _) = &mut user_ty.value {
*did = adt_def.did;
}
Some(user_ty)
}
ty::FnDef(..) => Some(user_ty),
_ => bug!("ty: {:?} should not have user provided type {:?} recorded ", ty, user_ty),
}
}
}
| 36.34375 | 97 | 0.576096 |
39e8494c6253da00810075399d16b1b48d542f58 | 28,943 | #[doc = "Reader of register CFG"]
pub type R = crate::R<u32, super::CFG>;
#[doc = "Writer for register CFG"]
pub type W = crate::W<u32, super::CFG>;
#[doc = "Register CFG `reset()`'s with value 0x0200"]
impl crate::ResetValue for super::CFG {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x0200
}
}
#[doc = "Input Clock Select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum ADICLK_A {
#[doc = "0: IPG clock"]
ADICLK_0 = 0,
#[doc = "1: IPG clock divided by 2"]
ADICLK_1 = 1,
#[doc = "3: Asynchronous clock (ADACK)"]
ADICLK_3 = 3,
}
impl From<ADICLK_A> for u8 {
#[inline(always)]
fn from(variant: ADICLK_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `ADICLK`"]
pub type ADICLK_R = crate::R<u8, ADICLK_A>;
impl ADICLK_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, ADICLK_A> {
use crate::Variant::*;
match self.bits {
0 => Val(ADICLK_A::ADICLK_0),
1 => Val(ADICLK_A::ADICLK_1),
3 => Val(ADICLK_A::ADICLK_3),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `ADICLK_0`"]
#[inline(always)]
pub fn is_adiclk_0(&self) -> bool {
*self == ADICLK_A::ADICLK_0
}
#[doc = "Checks if the value of the field is `ADICLK_1`"]
#[inline(always)]
pub fn is_adiclk_1(&self) -> bool {
*self == ADICLK_A::ADICLK_1
}
#[doc = "Checks if the value of the field is `ADICLK_3`"]
#[inline(always)]
pub fn is_adiclk_3(&self) -> bool {
*self == ADICLK_A::ADICLK_3
}
}
#[doc = "Write proxy for field `ADICLK`"]
pub struct ADICLK_W<'a> {
w: &'a mut W,
}
impl<'a> ADICLK_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ADICLK_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "IPG clock"]
#[inline(always)]
pub fn adiclk_0(self) -> &'a mut W {
self.variant(ADICLK_A::ADICLK_0)
}
#[doc = "IPG clock divided by 2"]
#[inline(always)]
pub fn adiclk_1(self) -> &'a mut W {
self.variant(ADICLK_A::ADICLK_1)
}
#[doc = "Asynchronous clock (ADACK)"]
#[inline(always)]
pub fn adiclk_3(self) -> &'a mut W {
self.variant(ADICLK_A::ADICLK_3)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x03) | ((value as u32) & 0x03);
self.w
}
}
#[doc = "Conversion Mode Selection\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum MODE_A {
#[doc = "0: 8-bit conversion"]
MODE_0 = 0,
#[doc = "1: 10-bit conversion"]
MODE_1 = 1,
#[doc = "2: 12-bit conversion"]
MODE_2 = 2,
}
impl From<MODE_A> for u8 {
#[inline(always)]
fn from(variant: MODE_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `MODE`"]
pub type MODE_R = crate::R<u8, MODE_A>;
impl MODE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, MODE_A> {
use crate::Variant::*;
match self.bits {
0 => Val(MODE_A::MODE_0),
1 => Val(MODE_A::MODE_1),
2 => Val(MODE_A::MODE_2),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `MODE_0`"]
#[inline(always)]
pub fn is_mode_0(&self) -> bool {
*self == MODE_A::MODE_0
}
#[doc = "Checks if the value of the field is `MODE_1`"]
#[inline(always)]
pub fn is_mode_1(&self) -> bool {
*self == MODE_A::MODE_1
}
#[doc = "Checks if the value of the field is `MODE_2`"]
#[inline(always)]
pub fn is_mode_2(&self) -> bool {
*self == MODE_A::MODE_2
}
}
#[doc = "Write proxy for field `MODE`"]
pub struct MODE_W<'a> {
w: &'a mut W,
}
impl<'a> MODE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: MODE_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "8-bit conversion"]
#[inline(always)]
pub fn mode_0(self) -> &'a mut W {
self.variant(MODE_A::MODE_0)
}
#[doc = "10-bit conversion"]
#[inline(always)]
pub fn mode_1(self) -> &'a mut W {
self.variant(MODE_A::MODE_1)
}
#[doc = "12-bit conversion"]
#[inline(always)]
pub fn mode_2(self) -> &'a mut W {
self.variant(MODE_A::MODE_2)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 2)) | (((value as u32) & 0x03) << 2);
self.w
}
}
#[doc = "Long Sample Time Configuration\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ADLSMP_A {
#[doc = "0: Short sample mode."]
ADLSMP_0 = 0,
#[doc = "1: Long sample mode."]
ADLSMP_1 = 1,
}
impl From<ADLSMP_A> for bool {
#[inline(always)]
fn from(variant: ADLSMP_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `ADLSMP`"]
pub type ADLSMP_R = crate::R<bool, ADLSMP_A>;
impl ADLSMP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADLSMP_A {
match self.bits {
false => ADLSMP_A::ADLSMP_0,
true => ADLSMP_A::ADLSMP_1,
}
}
#[doc = "Checks if the value of the field is `ADLSMP_0`"]
#[inline(always)]
pub fn is_adlsmp_0(&self) -> bool {
*self == ADLSMP_A::ADLSMP_0
}
#[doc = "Checks if the value of the field is `ADLSMP_1`"]
#[inline(always)]
pub fn is_adlsmp_1(&self) -> bool {
*self == ADLSMP_A::ADLSMP_1
}
}
#[doc = "Write proxy for field `ADLSMP`"]
pub struct ADLSMP_W<'a> {
w: &'a mut W,
}
impl<'a> ADLSMP_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ADLSMP_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Short sample mode."]
#[inline(always)]
pub fn adlsmp_0(self) -> &'a mut W {
self.variant(ADLSMP_A::ADLSMP_0)
}
#[doc = "Long sample mode."]
#[inline(always)]
pub fn adlsmp_1(self) -> &'a mut W {
self.variant(ADLSMP_A::ADLSMP_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Clock Divide Select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum ADIV_A {
#[doc = "0: Input clock"]
ADIV_0 = 0,
#[doc = "1: Input clock / 2"]
ADIV_1 = 1,
#[doc = "2: Input clock / 4"]
ADIV_2 = 2,
#[doc = "3: Input clock / 8"]
ADIV_3 = 3,
}
impl From<ADIV_A> for u8 {
#[inline(always)]
fn from(variant: ADIV_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `ADIV`"]
pub type ADIV_R = crate::R<u8, ADIV_A>;
impl ADIV_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADIV_A {
match self.bits {
0 => ADIV_A::ADIV_0,
1 => ADIV_A::ADIV_1,
2 => ADIV_A::ADIV_2,
3 => ADIV_A::ADIV_3,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `ADIV_0`"]
#[inline(always)]
pub fn is_adiv_0(&self) -> bool {
*self == ADIV_A::ADIV_0
}
#[doc = "Checks if the value of the field is `ADIV_1`"]
#[inline(always)]
pub fn is_adiv_1(&self) -> bool {
*self == ADIV_A::ADIV_1
}
#[doc = "Checks if the value of the field is `ADIV_2`"]
#[inline(always)]
pub fn is_adiv_2(&self) -> bool {
*self == ADIV_A::ADIV_2
}
#[doc = "Checks if the value of the field is `ADIV_3`"]
#[inline(always)]
pub fn is_adiv_3(&self) -> bool {
*self == ADIV_A::ADIV_3
}
}
#[doc = "Write proxy for field `ADIV`"]
pub struct ADIV_W<'a> {
w: &'a mut W,
}
impl<'a> ADIV_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ADIV_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Input clock"]
#[inline(always)]
pub fn adiv_0(self) -> &'a mut W {
self.variant(ADIV_A::ADIV_0)
}
#[doc = "Input clock / 2"]
#[inline(always)]
pub fn adiv_1(self) -> &'a mut W {
self.variant(ADIV_A::ADIV_1)
}
#[doc = "Input clock / 4"]
#[inline(always)]
pub fn adiv_2(self) -> &'a mut W {
self.variant(ADIV_A::ADIV_2)
}
#[doc = "Input clock / 8"]
#[inline(always)]
pub fn adiv_3(self) -> &'a mut W {
self.variant(ADIV_A::ADIV_3)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 5)) | (((value as u32) & 0x03) << 5);
self.w
}
}
#[doc = "Low-Power Configuration\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ADLPC_A {
#[doc = "0: ADC hard block not in low power mode."]
ADLPC_0 = 0,
#[doc = "1: ADC hard block in low power mode."]
ADLPC_1 = 1,
}
impl From<ADLPC_A> for bool {
#[inline(always)]
fn from(variant: ADLPC_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `ADLPC`"]
pub type ADLPC_R = crate::R<bool, ADLPC_A>;
impl ADLPC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADLPC_A {
match self.bits {
false => ADLPC_A::ADLPC_0,
true => ADLPC_A::ADLPC_1,
}
}
#[doc = "Checks if the value of the field is `ADLPC_0`"]
#[inline(always)]
pub fn is_adlpc_0(&self) -> bool {
*self == ADLPC_A::ADLPC_0
}
#[doc = "Checks if the value of the field is `ADLPC_1`"]
#[inline(always)]
pub fn is_adlpc_1(&self) -> bool {
*self == ADLPC_A::ADLPC_1
}
}
#[doc = "Write proxy for field `ADLPC`"]
pub struct ADLPC_W<'a> {
w: &'a mut W,
}
impl<'a> ADLPC_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ADLPC_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "ADC hard block not in low power mode."]
#[inline(always)]
pub fn adlpc_0(self) -> &'a mut W {
self.variant(ADLPC_A::ADLPC_0)
}
#[doc = "ADC hard block in low power mode."]
#[inline(always)]
pub fn adlpc_1(self) -> &'a mut W {
self.variant(ADLPC_A::ADLPC_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Defines the sample time duration\n\nValue on reset: 2"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum ADSTS_A {
#[doc = "0: Sample period (ADC clocks) = 2 if ADLSMP=0b Sample period (ADC clocks) = 12 if ADLSMP=1b"]
ADSTS_0 = 0,
#[doc = "1: Sample period (ADC clocks) = 4 if ADLSMP=0b Sample period (ADC clocks) = 16 if ADLSMP=1b"]
ADSTS_1 = 1,
#[doc = "2: Sample period (ADC clocks) = 6 if ADLSMP=0b Sample period (ADC clocks) = 20 if ADLSMP=1b"]
ADSTS_2 = 2,
#[doc = "3: Sample period (ADC clocks) = 8 if ADLSMP=0b Sample period (ADC clocks) = 24 if ADLSMP=1b"]
ADSTS_3 = 3,
}
impl From<ADSTS_A> for u8 {
#[inline(always)]
fn from(variant: ADSTS_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `ADSTS`"]
pub type ADSTS_R = crate::R<u8, ADSTS_A>;
impl ADSTS_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADSTS_A {
match self.bits {
0 => ADSTS_A::ADSTS_0,
1 => ADSTS_A::ADSTS_1,
2 => ADSTS_A::ADSTS_2,
3 => ADSTS_A::ADSTS_3,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `ADSTS_0`"]
#[inline(always)]
pub fn is_adsts_0(&self) -> bool {
*self == ADSTS_A::ADSTS_0
}
#[doc = "Checks if the value of the field is `ADSTS_1`"]
#[inline(always)]
pub fn is_adsts_1(&self) -> bool {
*self == ADSTS_A::ADSTS_1
}
#[doc = "Checks if the value of the field is `ADSTS_2`"]
#[inline(always)]
pub fn is_adsts_2(&self) -> bool {
*self == ADSTS_A::ADSTS_2
}
#[doc = "Checks if the value of the field is `ADSTS_3`"]
#[inline(always)]
pub fn is_adsts_3(&self) -> bool {
*self == ADSTS_A::ADSTS_3
}
}
#[doc = "Write proxy for field `ADSTS`"]
pub struct ADSTS_W<'a> {
w: &'a mut W,
}
impl<'a> ADSTS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ADSTS_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Sample period (ADC clocks) = 2 if ADLSMP=0b Sample period (ADC clocks) = 12 if ADLSMP=1b"]
#[inline(always)]
pub fn adsts_0(self) -> &'a mut W {
self.variant(ADSTS_A::ADSTS_0)
}
#[doc = "Sample period (ADC clocks) = 4 if ADLSMP=0b Sample period (ADC clocks) = 16 if ADLSMP=1b"]
#[inline(always)]
pub fn adsts_1(self) -> &'a mut W {
self.variant(ADSTS_A::ADSTS_1)
}
#[doc = "Sample period (ADC clocks) = 6 if ADLSMP=0b Sample period (ADC clocks) = 20 if ADLSMP=1b"]
#[inline(always)]
pub fn adsts_2(self) -> &'a mut W {
self.variant(ADSTS_A::ADSTS_2)
}
#[doc = "Sample period (ADC clocks) = 8 if ADLSMP=0b Sample period (ADC clocks) = 24 if ADLSMP=1b"]
#[inline(always)]
pub fn adsts_3(self) -> &'a mut W {
self.variant(ADSTS_A::ADSTS_3)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 8)) | (((value as u32) & 0x03) << 8);
self.w
}
}
#[doc = "High Speed Configuration\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ADHSC_A {
#[doc = "0: Normal conversion selected."]
ADHSC_0 = 0,
#[doc = "1: High speed conversion selected."]
ADHSC_1 = 1,
}
impl From<ADHSC_A> for bool {
#[inline(always)]
fn from(variant: ADHSC_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `ADHSC`"]
pub type ADHSC_R = crate::R<bool, ADHSC_A>;
impl ADHSC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADHSC_A {
match self.bits {
false => ADHSC_A::ADHSC_0,
true => ADHSC_A::ADHSC_1,
}
}
#[doc = "Checks if the value of the field is `ADHSC_0`"]
#[inline(always)]
pub fn is_adhsc_0(&self) -> bool {
*self == ADHSC_A::ADHSC_0
}
#[doc = "Checks if the value of the field is `ADHSC_1`"]
#[inline(always)]
pub fn is_adhsc_1(&self) -> bool {
*self == ADHSC_A::ADHSC_1
}
}
#[doc = "Write proxy for field `ADHSC`"]
pub struct ADHSC_W<'a> {
w: &'a mut W,
}
impl<'a> ADHSC_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ADHSC_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Normal conversion selected."]
#[inline(always)]
pub fn adhsc_0(self) -> &'a mut W {
self.variant(ADHSC_A::ADHSC_0)
}
#[doc = "High speed conversion selected."]
#[inline(always)]
pub fn adhsc_1(self) -> &'a mut W {
self.variant(ADHSC_A::ADHSC_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Voltage Reference Selection\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum REFSEL_A {
#[doc = "0: Selects VREFH/VREFL as reference voltage."]
REFSEL_0 = 0,
}
impl From<REFSEL_A> for u8 {
#[inline(always)]
fn from(variant: REFSEL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `REFSEL`"]
pub type REFSEL_R = crate::R<u8, REFSEL_A>;
impl REFSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, REFSEL_A> {
use crate::Variant::*;
match self.bits {
0 => Val(REFSEL_A::REFSEL_0),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `REFSEL_0`"]
#[inline(always)]
pub fn is_refsel_0(&self) -> bool {
*self == REFSEL_A::REFSEL_0
}
}
#[doc = "Write proxy for field `REFSEL`"]
pub struct REFSEL_W<'a> {
w: &'a mut W,
}
impl<'a> REFSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: REFSEL_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "Selects VREFH/VREFL as reference voltage."]
#[inline(always)]
pub fn refsel_0(self) -> &'a mut W {
self.variant(REFSEL_A::REFSEL_0)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 11)) | (((value as u32) & 0x03) << 11);
self.w
}
}
#[doc = "Conversion Trigger Select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ADTRG_A {
#[doc = "0: Software trigger selected"]
ADTRG_0 = 0,
#[doc = "1: Hardware trigger selected"]
ADTRG_1 = 1,
}
impl From<ADTRG_A> for bool {
#[inline(always)]
fn from(variant: ADTRG_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `ADTRG`"]
pub type ADTRG_R = crate::R<bool, ADTRG_A>;
impl ADTRG_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADTRG_A {
match self.bits {
false => ADTRG_A::ADTRG_0,
true => ADTRG_A::ADTRG_1,
}
}
#[doc = "Checks if the value of the field is `ADTRG_0`"]
#[inline(always)]
pub fn is_adtrg_0(&self) -> bool {
*self == ADTRG_A::ADTRG_0
}
#[doc = "Checks if the value of the field is `ADTRG_1`"]
#[inline(always)]
pub fn is_adtrg_1(&self) -> bool {
*self == ADTRG_A::ADTRG_1
}
}
#[doc = "Write proxy for field `ADTRG`"]
pub struct ADTRG_W<'a> {
w: &'a mut W,
}
impl<'a> ADTRG_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ADTRG_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Software trigger selected"]
#[inline(always)]
pub fn adtrg_0(self) -> &'a mut W {
self.variant(ADTRG_A::ADTRG_0)
}
#[doc = "Hardware trigger selected"]
#[inline(always)]
pub fn adtrg_1(self) -> &'a mut W {
self.variant(ADTRG_A::ADTRG_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13);
self.w
}
}
#[doc = "Hardware Average select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum AVGS_A {
#[doc = "0: 4 samples averaged"]
AVGS_0 = 0,
#[doc = "1: 8 samples averaged"]
AVGS_1 = 1,
#[doc = "2: 16 samples averaged"]
AVGS_2 = 2,
#[doc = "3: 32 samples averaged"]
AVGS_3 = 3,
}
impl From<AVGS_A> for u8 {
#[inline(always)]
fn from(variant: AVGS_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `AVGS`"]
pub type AVGS_R = crate::R<u8, AVGS_A>;
impl AVGS_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> AVGS_A {
match self.bits {
0 => AVGS_A::AVGS_0,
1 => AVGS_A::AVGS_1,
2 => AVGS_A::AVGS_2,
3 => AVGS_A::AVGS_3,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `AVGS_0`"]
#[inline(always)]
pub fn is_avgs_0(&self) -> bool {
*self == AVGS_A::AVGS_0
}
#[doc = "Checks if the value of the field is `AVGS_1`"]
#[inline(always)]
pub fn is_avgs_1(&self) -> bool {
*self == AVGS_A::AVGS_1
}
#[doc = "Checks if the value of the field is `AVGS_2`"]
#[inline(always)]
pub fn is_avgs_2(&self) -> bool {
*self == AVGS_A::AVGS_2
}
#[doc = "Checks if the value of the field is `AVGS_3`"]
#[inline(always)]
pub fn is_avgs_3(&self) -> bool {
*self == AVGS_A::AVGS_3
}
}
#[doc = "Write proxy for field `AVGS`"]
pub struct AVGS_W<'a> {
w: &'a mut W,
}
impl<'a> AVGS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: AVGS_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "4 samples averaged"]
#[inline(always)]
pub fn avgs_0(self) -> &'a mut W {
self.variant(AVGS_A::AVGS_0)
}
#[doc = "8 samples averaged"]
#[inline(always)]
pub fn avgs_1(self) -> &'a mut W {
self.variant(AVGS_A::AVGS_1)
}
#[doc = "16 samples averaged"]
#[inline(always)]
pub fn avgs_2(self) -> &'a mut W {
self.variant(AVGS_A::AVGS_2)
}
#[doc = "32 samples averaged"]
#[inline(always)]
pub fn avgs_3(self) -> &'a mut W {
self.variant(AVGS_A::AVGS_3)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 14)) | (((value as u32) & 0x03) << 14);
self.w
}
}
#[doc = "Data Overwrite Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum OVWREN_A {
#[doc = "0: Disable the overwriting. Existing Data in Data result register will not be overwritten by subsequent converted data."]
OVWREN_0 = 0,
#[doc = "1: Enable the overwriting."]
OVWREN_1 = 1,
}
impl From<OVWREN_A> for bool {
#[inline(always)]
fn from(variant: OVWREN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `OVWREN`"]
pub type OVWREN_R = crate::R<bool, OVWREN_A>;
impl OVWREN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> OVWREN_A {
match self.bits {
false => OVWREN_A::OVWREN_0,
true => OVWREN_A::OVWREN_1,
}
}
#[doc = "Checks if the value of the field is `OVWREN_0`"]
#[inline(always)]
pub fn is_ovwren_0(&self) -> bool {
*self == OVWREN_A::OVWREN_0
}
#[doc = "Checks if the value of the field is `OVWREN_1`"]
#[inline(always)]
pub fn is_ovwren_1(&self) -> bool {
*self == OVWREN_A::OVWREN_1
}
}
#[doc = "Write proxy for field `OVWREN`"]
pub struct OVWREN_W<'a> {
w: &'a mut W,
}
impl<'a> OVWREN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: OVWREN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Disable the overwriting. Existing Data in Data result register will not be overwritten by subsequent converted data."]
#[inline(always)]
pub fn ovwren_0(self) -> &'a mut W {
self.variant(OVWREN_A::OVWREN_0)
}
#[doc = "Enable the overwriting."]
#[inline(always)]
pub fn ovwren_1(self) -> &'a mut W {
self.variant(OVWREN_A::OVWREN_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
impl R {
#[doc = "Bits 0:1 - Input Clock Select"]
#[inline(always)]
pub fn adiclk(&self) -> ADICLK_R {
ADICLK_R::new((self.bits & 0x03) as u8)
}
#[doc = "Bits 2:3 - Conversion Mode Selection"]
#[inline(always)]
pub fn mode(&self) -> MODE_R {
MODE_R::new(((self.bits >> 2) & 0x03) as u8)
}
#[doc = "Bit 4 - Long Sample Time Configuration"]
#[inline(always)]
pub fn adlsmp(&self) -> ADLSMP_R {
ADLSMP_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bits 5:6 - Clock Divide Select"]
#[inline(always)]
pub fn adiv(&self) -> ADIV_R {
ADIV_R::new(((self.bits >> 5) & 0x03) as u8)
}
#[doc = "Bit 7 - Low-Power Configuration"]
#[inline(always)]
pub fn adlpc(&self) -> ADLPC_R {
ADLPC_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bits 8:9 - Defines the sample time duration"]
#[inline(always)]
pub fn adsts(&self) -> ADSTS_R {
ADSTS_R::new(((self.bits >> 8) & 0x03) as u8)
}
#[doc = "Bit 10 - High Speed Configuration"]
#[inline(always)]
pub fn adhsc(&self) -> ADHSC_R {
ADHSC_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bits 11:12 - Voltage Reference Selection"]
#[inline(always)]
pub fn refsel(&self) -> REFSEL_R {
REFSEL_R::new(((self.bits >> 11) & 0x03) as u8)
}
#[doc = "Bit 13 - Conversion Trigger Select"]
#[inline(always)]
pub fn adtrg(&self) -> ADTRG_R {
ADTRG_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bits 14:15 - Hardware Average select"]
#[inline(always)]
pub fn avgs(&self) -> AVGS_R {
AVGS_R::new(((self.bits >> 14) & 0x03) as u8)
}
#[doc = "Bit 16 - Data Overwrite Enable"]
#[inline(always)]
pub fn ovwren(&self) -> OVWREN_R {
OVWREN_R::new(((self.bits >> 16) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:1 - Input Clock Select"]
#[inline(always)]
pub fn adiclk(&mut self) -> ADICLK_W {
ADICLK_W { w: self }
}
#[doc = "Bits 2:3 - Conversion Mode Selection"]
#[inline(always)]
pub fn mode(&mut self) -> MODE_W {
MODE_W { w: self }
}
#[doc = "Bit 4 - Long Sample Time Configuration"]
#[inline(always)]
pub fn adlsmp(&mut self) -> ADLSMP_W {
ADLSMP_W { w: self }
}
#[doc = "Bits 5:6 - Clock Divide Select"]
#[inline(always)]
pub fn adiv(&mut self) -> ADIV_W {
ADIV_W { w: self }
}
#[doc = "Bit 7 - Low-Power Configuration"]
#[inline(always)]
pub fn adlpc(&mut self) -> ADLPC_W {
ADLPC_W { w: self }
}
#[doc = "Bits 8:9 - Defines the sample time duration"]
#[inline(always)]
pub fn adsts(&mut self) -> ADSTS_W {
ADSTS_W { w: self }
}
#[doc = "Bit 10 - High Speed Configuration"]
#[inline(always)]
pub fn adhsc(&mut self) -> ADHSC_W {
ADHSC_W { w: self }
}
#[doc = "Bits 11:12 - Voltage Reference Selection"]
#[inline(always)]
pub fn refsel(&mut self) -> REFSEL_W {
REFSEL_W { w: self }
}
#[doc = "Bit 13 - Conversion Trigger Select"]
#[inline(always)]
pub fn adtrg(&mut self) -> ADTRG_W {
ADTRG_W { w: self }
}
#[doc = "Bits 14:15 - Hardware Average select"]
#[inline(always)]
pub fn avgs(&mut self) -> AVGS_W {
AVGS_W { w: self }
}
#[doc = "Bit 16 - Data Overwrite Enable"]
#[inline(always)]
pub fn ovwren(&mut self) -> OVWREN_W {
OVWREN_W { w: self }
}
}
| 29.176411 | 134 | 0.547663 |
213a1a7a2aff96abbbca21d9d407af6fa8f706ab | 6,893 | use crate::{OpenApiError, Result};
use okapi::openapi3::{MediaType, RefOr, Response, Responses, SchemaObject};
use okapi::Map;
// FIXME this whole file is a huge mess...
/// Takes a `Responses` struct, and sets the status code to the status code provided for each
/// response in the `Responses`.
pub fn set_status_code(responses: &mut Responses, status: u16) -> Result<()> {
let old_responses = std::mem::take(&mut responses.responses);
// Use `0` as `default`
let new_response = if status == 0 {
ensure_not_ref(add_default_response_code(responses))?
} else {
ensure_not_ref(ensure_status_code_exists(responses, status))?
};
for (_, mut response) in old_responses {
*new_response =
produce_either_response(new_response.clone(), ensure_not_ref(&mut response)?.clone());
}
Ok(())
}
/// Checks if the provided `status` code is in the `responses.responses` field. If it isn't, inserts
/// it.
pub fn ensure_status_code_exists(responses: &mut Responses, status: u16) -> &mut RefOr<Response> {
responses
.responses
.entry(status.to_string())
.or_insert_with(|| Response::default().into())
}
/// Add `default` response with a Schema, for when status code is defined at runtime.
/// https://spec.openapis.org/oas/v3.0.0#fixed-fields-13
pub fn add_default_response_schema(
responses: &mut Responses,
content_type: impl ToString,
schema: SchemaObject,
) -> &mut RefOr<Response> {
let media = MediaType {
schema: Some(schema),
..MediaType::default()
};
let response = add_default_response_code(responses);
let response_no_ref = if let Ok(resp) = ensure_not_ref(response) {
resp
} else {
return response;
};
add_media_type(&mut response_no_ref.content, content_type, media);
response
}
/// Add `default` response, for when status code is defined at runtime.
/// https://spec.openapis.org/oas/v3.0.0#fixed-fields-13
pub fn add_default_response_code(responses: &mut Responses) -> &mut RefOr<Response> {
responses
.responses
.entry("default".to_owned())
.or_insert_with(|| Response::default().into())
}
/// Adds a `Response` to a `Responses` object with the given status code, Content-Type and `MediaType`.
pub fn add_content_response(
responses: &mut Responses,
status: u16,
content_type: impl ToString,
media: MediaType,
) -> Result<()> {
let response = ensure_not_ref(ensure_status_code_exists(responses, status))?;
add_media_type(&mut response.content, content_type, media);
Ok(())
}
/// Adds the `media` to the given map. If the map already contains a `MediaType` with the given
/// Content-Type, then it will be combined with `media`.
pub fn add_media_type(
content: &mut Map<String, MediaType>,
content_type: impl ToString,
media: MediaType,
) {
// FIXME these clones shouldn't be necessary
content
.entry(content_type.to_string())
.and_modify(|mt| *mt = accept_either_media_type(mt.clone(), media.clone()))
.or_insert(media);
}
/// Replaces the Content-Type for all responses with `content_type`.
pub fn set_content_type(responses: &mut Responses, content_type: impl ToString) -> Result<()> {
for ref mut resp_refor in responses.responses.values_mut() {
let response = ensure_not_ref(*resp_refor)?;
let content = &mut response.content;
let mt = if content.values().len() == 1 {
content.values().next().unwrap().clone()
} else {
content.values().fold(MediaType::default(), |mt, mt2| {
accept_either_media_type(mt, mt2.clone())
})
};
content.clear();
content.insert(content_type.to_string(), mt);
}
Ok(())
}
/// Adds a `Response` to a `Responses` object with the given status code, Content-Type and `SchemaObject`.
pub fn add_schema_response(
responses: &mut Responses,
status: u16,
content_type: impl ToString,
schema: SchemaObject,
) -> Result<()> {
let media = MediaType {
schema: Some(schema),
..MediaType::default()
};
add_content_response(responses, status, content_type, media)
}
/// Merges the the two given `Responses`.
pub fn produce_any_responses(r1: Responses, r2: Responses) -> Result<Responses> {
let mut result = Responses {
default: r1.default.or(r2.default),
responses: r1.responses,
extensions: extend(r1.extensions, r2.extensions),
};
for (status, mut response2) in r2.responses {
let response1 = ensure_not_ref(
result
.responses
.entry(status)
.or_insert_with(|| Response::default().into()),
)?;
*response1 =
produce_either_response(ensure_not_ref(&mut response2)?.clone(), response1.clone());
}
Ok(result)
}
fn ensure_not_ref(response: &mut RefOr<Response>) -> Result<&mut Response> {
match response {
RefOr::Ref(_) => Err(OpenApiError::new(
"Altering Ref responses is not supported.".to_owned(),
)),
RefOr::Object(o) => Ok(o),
}
}
fn extend<A, E: Extend<A>>(mut a: E, b: impl IntoIterator<Item = A>) -> E {
a.extend(b);
a
}
fn produce_either_response(r1: Response, r2: Response) -> Response {
let description = if r1.description.is_empty() {
r2.description
} else if r2.description.is_empty() {
r1.description
} else {
format!("{}\n{}", r1.description, r2.description)
};
let mut content = r1.content;
for (content_type, media) in r2.content {
add_media_type(&mut content, content_type, media);
}
Response {
description,
content,
headers: extend(r1.headers, r2.headers),
links: extend(r1.links, r2.links),
extensions: extend(r1.extensions, r2.extensions),
}
}
fn accept_either_media_type(mt1: MediaType, mt2: MediaType) -> MediaType {
MediaType {
schema: accept_either_schema(mt1.schema, mt2.schema),
example: mt1.example.or(mt2.example),
examples: match (mt1.examples, mt2.examples) {
(Some(e1), Some(e2)) => Some(extend(e1, e2)),
(Some(e), None) | (None, Some(e)) => Some(e),
(None, None) => None,
},
encoding: extend(mt1.encoding, mt2.encoding),
extensions: extend(mt1.extensions, mt2.extensions),
}
}
fn accept_either_schema(
s1: Option<SchemaObject>,
s2: Option<SchemaObject>,
) -> Option<SchemaObject> {
let (s1, s2) = match (s1, s2) {
(Some(s1), Some(s2)) => (s1, s2),
(Some(s), None) | (None, Some(s)) => return Some(s),
(None, None) => return None,
};
let mut schema = SchemaObject::default();
schema.subschemas().any_of = Some(vec![s1.into(), s2.into()]);
Some(schema)
}
| 33.62439 | 106 | 0.632816 |
e8a1c11e35725141cf0b52b3a8fb07f4efa21688 | 160 | // Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
#[cfg(test)]
mod test;
pub(crate) mod transaction_store_pruner;
pub(crate) mod write_set_pruner;
| 20 | 40 | 0.75 |
50d709c640e9ea2fc8a2a79e0e07b1da87a67d7e | 106 | //! Components used by the Light Client.
pub mod clock;
pub mod io;
pub mod scheduler;
pub mod verifier;
| 15.142857 | 40 | 0.735849 |
e270410feee185a59338d4c2cbc8ae40db8def74 | 1,895 | #[doc = "Control A"]
pub struct CTRLA {
register: ::vcell::VolatileCell<u16>,
}
#[doc = "Control A"]
pub mod ctrla;
#[doc = "Read Request"]
pub struct READREQ {
register: ::vcell::VolatileCell<u16>,
}
#[doc = "Read Request"]
pub mod readreq;
#[doc = "Control B Clear"]
pub struct CTRLBCLR {
register: ::vcell::VolatileCell<u8>,
}
#[doc = "Control B Clear"]
pub mod ctrlbclr;
#[doc = "Control B Set"]
pub struct CTRLBSET {
register: ::vcell::VolatileCell<u8>,
}
#[doc = "Control B Set"]
pub mod ctrlbset;
#[doc = "Control C"]
pub struct CTRLC {
register: ::vcell::VolatileCell<u8>,
}
#[doc = "Control C"]
pub mod ctrlc;
#[doc = "Debug Control"]
pub struct DBGCTRL {
register: ::vcell::VolatileCell<u8>,
}
#[doc = "Debug Control"]
pub mod dbgctrl;
#[doc = "Event Control"]
pub struct EVCTRL {
register: ::vcell::VolatileCell<u16>,
}
#[doc = "Event Control"]
pub mod evctrl;
#[doc = "Interrupt Enable Clear"]
pub struct INTENCLR {
register: ::vcell::VolatileCell<u8>,
}
#[doc = "Interrupt Enable Clear"]
pub mod intenclr;
#[doc = "Interrupt Enable Set"]
pub struct INTENSET {
register: ::vcell::VolatileCell<u8>,
}
#[doc = "Interrupt Enable Set"]
pub mod intenset;
#[doc = "Interrupt Flag Status and Clear"]
pub struct INTFLAG {
register: ::vcell::VolatileCell<u8>,
}
#[doc = "Interrupt Flag Status and Clear"]
pub mod intflag;
#[doc = "Status"]
pub struct STATUS {
register: ::vcell::VolatileCell<u8>,
}
#[doc = "Status"]
pub mod status;
#[doc = "COUNT8 Counter Value"]
pub struct COUNT {
register: ::vcell::VolatileCell<u8>,
}
#[doc = "COUNT8 Counter Value"]
pub mod count;
#[doc = "COUNT8 Period Value"]
pub struct PER {
register: ::vcell::VolatileCell<u8>,
}
#[doc = "COUNT8 Period Value"]
pub mod per;
#[doc = "COUNT8 Compare/Capture"]
pub struct CC {
register: ::vcell::VolatileCell<u8>,
}
#[doc = "COUNT8 Compare/Capture"]
pub mod cc;
| 22.294118 | 42 | 0.658047 |
64b4f7aff5029d4db452820556a7f895704b25d6 | 638 | // Copyright 2016 Peter Reid. See the COPYRIGHT file at the top-level
// directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[macro_use] extern crate slice_as_array;
fn main() {
let mut xs = [0u32; 50];
let xs_prefix = slice_as_array_mut!(&mut xs[0..20], [u32; 20]);
xs[0] = 5; //~error: cannot assign to `xs[..]` because it is borrowed
}
| 35.444444 | 73 | 0.697492 |
e922fdc90626414773cda83915881a5aa908510c | 8,197 | // Copyright 2021 The AccessKit Authors. All rights reserved.
// Licensed under the Apache License, Version 2.0 (found in
// the LICENSE-APACHE file) or the MIT license (found in
// the LICENSE-MIT file), at your option.
use std::sync::Arc;
use accesskit::{ActionHandler, TreeUpdate};
use accesskit_consumer::{Tree, TreeChange};
use lazy_init::LazyTransform;
use windows::Win32::{
Foundation::*,
UI::{Accessibility::*, WindowsAndMessaging::*},
};
use crate::{
node::{PlatformNode, ResolvedPlatformNode},
util::Event,
};
pub struct Adapter<Source = Box<dyn FnOnce() -> TreeUpdate>>
where
Source: Into<TreeUpdate>,
{
hwnd: HWND,
tree: LazyTransform<(Source, Box<dyn ActionHandler>), Arc<Tree>>,
}
impl<Source: Into<TreeUpdate>> Adapter<Source> {
pub fn new(hwnd: HWND, source: Source, action_handler: Box<dyn ActionHandler>) -> Self {
// It's unfortunate that we have to force UIA to initialize early;
// it would be more optimal to let UIA lazily initialize itself
// when we receive the first `WM_GETOBJECT`. But if we don't do this,
// then on a thread that's using a COM STA, we can get a race condition
// that leads to nested WM_GETOBJECT messages and, in some cases,
// ATs not realizing that our window natively implements UIA. See #37.
force_init_uia();
Self {
hwnd,
tree: LazyTransform::new((source, action_handler)),
}
}
fn get_or_create_tree(&self) -> &Arc<Tree> {
self.tree
.get_or_create(|(source, action_handler)| Tree::new(source.into(), action_handler))
}
/// Initialize the tree if it hasn't been initialized already, then apply
/// the provided update.
///
/// The caller must call [`QueuedEvents::raise`] on the return value.
///
/// This method may be safely called on any thread, but refer to
/// [`QueuedEvents::raise`] for restrictions on the context in which
/// it should be called.
pub fn update(&self, update: TreeUpdate) -> QueuedEvents {
let tree = self.get_or_create_tree();
self.update_internal(tree, update)
}
/// If and only if the tree has been initialized, call the provided function
/// and apply the resulting update.
///
/// The caller must call [`QueuedEvents::raise`] on the return value.
///
/// This method may be safely called on any thread, but refer to
/// [`QueuedEvents::raise`] for restrictions on the context in which
/// it should be called.
pub fn update_if_active(&self, updater: impl FnOnce() -> TreeUpdate) -> QueuedEvents {
let tree = match self.tree.get() {
Some(tree) => tree,
None => {
return QueuedEvents(Vec::new());
}
};
self.update_internal(tree, updater())
}
fn update_internal(&self, tree: &Arc<Tree>, update: TreeUpdate) -> QueuedEvents {
let mut queue = Vec::new();
tree.update_and_process_changes(update, |change| {
match change {
TreeChange::FocusMoved {
old_node: _,
new_node: Some(new_node),
} => {
let platform_node = PlatformNode::new(&new_node, self.hwnd);
let element: IRawElementProviderSimple = platform_node.into();
queue.push(Event::Simple {
element,
event_id: UIA_AutomationFocusChangedEventId,
});
}
TreeChange::NodeUpdated { old_node, new_node } => {
let old_node = ResolvedPlatformNode::new(old_node, self.hwnd);
let new_node = ResolvedPlatformNode::new(new_node, self.hwnd);
new_node.enqueue_property_changes(&mut queue, &old_node);
}
// TODO: handle other events (#20)
_ => (),
};
});
QueuedEvents(queue)
}
fn root_platform_node(&self) -> PlatformNode {
let tree = self.get_or_create_tree();
let reader = tree.read();
let node = reader.root();
PlatformNode::new(&node, self.hwnd)
}
/// Handle the `WM_GETOBJECT` window message.
///
/// This returns an `Option` so the caller can pass the message
/// to `DefWindowProc` if AccessKit decides not to handle it.
/// The optional value is an `Into<LRESULT>` rather than simply an `LRESULT`
/// so the necessary call to UIA, which may lead to a nested `WM_GETOBJECT`
/// message, can be done outside of any lock that the caller might hold
/// on the `Adapter` or window state, while still abstracting away
/// the details of that call to UIA.
///
/// Callers must avoid a second deadlock scenario. The tree is lazily
/// initialized on the first call to this method. So if the caller
/// holds a lock while calling this method, it must be careful to ensure
/// that running its tree initialization function while holding that lock
/// doesn't lead to deadlock.
pub fn handle_wm_getobject(
&self,
wparam: WPARAM,
lparam: LPARAM,
) -> Option<impl Into<LRESULT>> {
// Don't bother with MSAA object IDs that are asking for something other
// than the client area of the window. DefWindowProc can handle those.
// First, cast the lparam to i32, to handle inconsistent conversion
// behavior in senders.
let objid: i32 = (lparam.0 & 0xFFFFFFFF) as _;
if objid < 0 && objid != UiaRootObjectId && objid != OBJID_CLIENT.0 {
return None;
}
let el: IRawElementProviderSimple = self.root_platform_node().into();
Some(WmGetObjectResult {
hwnd: self.hwnd,
wparam,
lparam,
el,
})
}
}
struct WmGetObjectResult {
hwnd: HWND,
wparam: WPARAM,
lparam: LPARAM,
el: IRawElementProviderSimple,
}
impl From<WmGetObjectResult> for LRESULT {
fn from(this: WmGetObjectResult) -> Self {
unsafe { UiaReturnRawElementProvider(this.hwnd, this.wparam, this.lparam, this.el) }
}
}
fn force_init_uia() {
// `UiaLookupId` is a cheap way of forcing UIA to initialize itself.
unsafe {
UiaLookupId(
AutomationIdentifierType_Property,
&ControlType_Property_GUID,
)
};
}
/// Events generated by a tree update.
#[must_use = "events must be explicitly raised"]
pub struct QueuedEvents(Vec<Event>);
impl QueuedEvents {
/// Raise all queued events synchronously.
///
/// The window may receive `WM_GETOBJECT` messages during this call.
/// This means that any locks required by the `WM_GETOBJECT` handler
/// must not be held when this method is called.
///
/// This method should be called on the thread that owns the window.
/// It's not clear whether this is a strict requirement of UIA itself,
/// but based on the known behavior of UIA, MSAA, and some ATs,
/// it's strongly recommended.
pub fn raise(self) {
for event in self.0 {
match event {
Event::Simple { element, event_id } => {
unsafe { UiaRaiseAutomationEvent(element, event_id) }.unwrap();
}
Event::PropertyChanged {
element,
property_id,
old_value,
new_value,
} => {
unsafe {
UiaRaiseAutomationPropertyChangedEvent(
element,
property_id,
old_value,
new_value,
)
}
.unwrap();
}
}
}
}
}
// We explicitly want to allow the queued events to be sent to the UI thread,
// so implement Send even though windows-rs doesn't implement it for all
// contained types. This is safe because we're not using COM threading.
unsafe impl Send for QueuedEvents {}
| 36.757848 | 95 | 0.592046 |
48f04c334941ac4e1c8226c207e799938725b000 | 919 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait A<T> {}
struct B<'a, T>(&'a A<T>);
trait X {}
impl<'a, T> X for B<'a, T> {}
fn f<'a, T, U>(v: Box<A<T>>) -> Box<X:> {
box B(v) as Box<X:> //~ ERROR value may contain references; add `'static` bound to `T`
}
fn g<'a, T, U>(v: Box<A<U>>) -> Box<X:> {
box B(v) as Box<X:> //~ ERROR value may contain references; add `'static` bound to `U`
}
fn h<'a, T: 'static>(v: Box<A<T>>) -> Box<X:> {
box B(v) as Box<X:> // ok
}
fn main() {}
| 28.71875 | 90 | 0.624592 |
879d20e376952d789b2ee876806870132cb6b761 | 8,409 | use crate::html_tree::HtmlProp as TagAttribute;
use crate::PeekValue;
use lazy_static::lazy_static;
use proc_macro2::TokenStream;
use quote::{quote, quote_spanned, ToTokens};
use std::collections::HashMap;
use syn::parse::{Parse, ParseStream, Result as ParseResult};
use syn::{Expr, ExprClosure, ExprTuple, Ident, Pat};
pub struct TagAttributes {
pub attributes: Vec<TagAttribute>,
pub listeners: Vec<TokenStream>,
pub classes: Option<ClassesForm>,
pub value: Option<Expr>,
pub kind: Option<Expr>,
pub checked: Option<Expr>,
pub disabled: Option<Expr>,
pub selected: Option<Expr>,
pub href: Option<Expr>,
}
pub enum ClassesForm {
Tuple(Vec<Expr>),
Single(Expr),
}
pub struct TagListener {
name: Ident,
handler: Expr,
event_name: String,
}
lazy_static! {
static ref LISTENER_MAP: HashMap<&'static str, &'static str> = {
let mut m = HashMap::new();
m.insert("onclick", "ClickEvent");
m.insert("ondoubleclick", "DoubleClickEvent");
m.insert("onkeypress", "KeyPressEvent");
m.insert("onkeydown", "KeyDownEvent");
m.insert("onkeyup", "KeyUpEvent");
m.insert("onmousedown", "MouseDownEvent");
m.insert("onmousemove", "MouseMoveEvent");
m.insert("onmouseout", "MouseOutEvent");
m.insert("onmouseenter", "MouseEnterEvent");
m.insert("onmouseleave", "MouseLeaveEvent");
m.insert("onmousewheel", "MouseWheelEvent");
m.insert("onmouseover", "MouseOverEvent");
m.insert("onmouseup", "MouseUpEvent");
m.insert("touchcancel", "TouchCancel");
m.insert("touchend", "TouchEnd");
m.insert("touchenter", "TouchEnter");
m.insert("touchmove", "TouchMove");
m.insert("touchstart", "TouchStart");
m.insert("ongotpointercapture", "GotPointerCaptureEvent");
m.insert("onlostpointercapture", "LostPointerCaptureEvent");
m.insert("onpointercancel", "PointerCancelEvent");
m.insert("onpointerdown", "PointerDownEvent");
m.insert("onpointerenter", "PointerEnterEvent");
m.insert("onpointerleave", "PointerLeaveEvent");
m.insert("onpointermove", "PointerMoveEvent");
m.insert("onpointerout", "PointerOutEvent");
m.insert("onpointerover", "PointerOverEvent");
m.insert("onpointerup", "PointerUpEvent");
m.insert("onscroll", "ScrollEvent");
m.insert("onblur", "BlurEvent");
m.insert("onfocus", "FocusEvent");
m.insert("onsubmit", "SubmitEvent");
m.insert("oninput", "InputData");
m.insert("onchange", "ChangeData");
m.insert("ondrag", "DragEvent");
m.insert("ondragstart", "DragStartEvent");
m.insert("ondragend", "DragEndEvent");
m.insert("ondragenter", "DragEnterEvent");
m.insert("ondragleave", "DragLeaveEvent");
m.insert("ondragover", "DragOverEvent");
m.insert("ondragexit", "DragExitEvent");
m.insert("ondrop", "DragDropEvent");
m.insert("oncontextmenu", "ContextMenuEvent");
m
};
}
impl TagAttributes {
fn drain_listeners(attrs: &mut Vec<TagAttribute>) -> Vec<TagListener> {
let mut i = 0;
let mut drained = Vec::new();
while i < attrs.len() {
let name_str = attrs[i].label.to_string();
if let Some(event_type) = LISTENER_MAP.get(&name_str.as_str()) {
let TagAttribute { label, value } = attrs.remove(i);
drained.push(TagListener {
name: label.name,
handler: value,
event_name: event_type.to_owned().to_string(),
});
} else {
i += 1;
}
}
drained
}
fn remove_attr(attrs: &mut Vec<TagAttribute>, name: &str) -> Option<Expr> {
let mut i = 0;
while i < attrs.len() {
if attrs[i].label.to_string() == name {
return Some(attrs.remove(i).value);
} else {
i += 1;
}
}
None
}
fn map_classes(class_expr: Expr) -> ClassesForm {
match class_expr {
Expr::Tuple(ExprTuple { elems, .. }) => ClassesForm::Tuple(elems.into_iter().collect()),
expr => ClassesForm::Single(expr),
}
}
fn map_listener(listener: TagListener) -> ParseResult<TokenStream> {
let TagListener {
name,
event_name,
handler,
} = listener;
match handler {
Expr::Closure(closure) => {
let ExprClosure {
inputs,
body,
or1_token,
or2_token,
..
} = closure;
let or_span = quote! {#or1_token#or2_token};
if inputs.len() != 1 {
return Err(syn::Error::new_spanned(
or_span,
"there must be one closure argument",
));
}
let var = match inputs.first().unwrap() {
Pat::Ident(pat) => Ok(pat.into_token_stream()),
Pat::Wild(pat) => Ok(pat.into_token_stream()),
_ => Err(syn::Error::new_spanned(or_span, "invalid closure argument")),
}?;
let handler =
Ident::new(&format!("__yew_{}_handler", name.to_string()), name.span());
let listener =
Ident::new(&format!("__yew_{}_listener", name.to_string()), name.span());
let segment = syn::PathSegment {
ident: Ident::new(&event_name, name.span()),
arguments: syn::PathArguments::None,
};
let var_type = quote! { ::yew::events::#segment };
let wrapper_type = quote! { ::yew::html::#name::Wrapper };
let listener_stream = quote_spanned! {name.span()=> {
let #handler = move | #var: #var_type | #body;
let #listener = #wrapper_type::from(#handler);
#listener
}};
Ok(listener_stream)
}
_ => Err(syn::Error::new_spanned(
&name,
format!("`{}` attribute value should be a closure", name),
)),
}
}
}
impl Parse for TagAttributes {
fn parse(input: ParseStream) -> ParseResult<Self> {
let mut attributes: Vec<TagAttribute> = Vec::new();
while TagAttribute::peek(input.cursor()).is_some() {
attributes.push(input.parse::<TagAttribute>()?);
}
let mut listeners = Vec::new();
for listener in TagAttributes::drain_listeners(&mut attributes) {
listeners.push(TagAttributes::map_listener(listener)?);
}
// Multiple listener attributes are allowed, but no others
attributes.sort_by(|a, b| {
a.label
.to_string()
.partial_cmp(&b.label.to_string())
.unwrap()
});
let mut i = 0;
while i + 1 < attributes.len() {
if attributes[i].label.to_string() == attributes[i + 1].label.to_string() {
let label = &attributes[i + 1].label;
return Err(syn::Error::new_spanned(
label,
format!("only one `{}` attribute allowed", label),
));
}
i += 1;
}
let classes =
TagAttributes::remove_attr(&mut attributes, "class").map(TagAttributes::map_classes);
let value = TagAttributes::remove_attr(&mut attributes, "value");
let kind = TagAttributes::remove_attr(&mut attributes, "type");
let checked = TagAttributes::remove_attr(&mut attributes, "checked");
let disabled = TagAttributes::remove_attr(&mut attributes, "disabled");
let selected = TagAttributes::remove_attr(&mut attributes, "selected");
let href = TagAttributes::remove_attr(&mut attributes, "href");
Ok(TagAttributes {
attributes,
classes,
listeners,
value,
kind,
checked,
disabled,
selected,
href,
})
}
}
| 36.402597 | 100 | 0.5374 |
189b5bd0f9e87e576f7a7f490e972dd8c55b8fe1 | 17,731 | use crate::snippet::Style;
use crate::Applicability;
use crate::CodeSuggestion;
use crate::Level;
use crate::Substitution;
use crate::SubstitutionPart;
use crate::SuggestionStyle;
use rustc_span::{MultiSpan, Span, DUMMY_SP};
use std::fmt;
#[must_use]
#[derive(Clone, Debug, PartialEq, Hash, RustcEncodable, RustcDecodable)]
pub struct Diagnostic {
pub level: Level,
pub message: Vec<(String, Style)>,
pub code: Option<DiagnosticId>,
pub span: MultiSpan,
pub children: Vec<SubDiagnostic>,
pub suggestions: Vec<CodeSuggestion>,
/// This is not used for highlighting or rendering any error message. Rather, it can be used
/// as a sort key to sort a buffer of diagnostics. By default, it is the primary span of
/// `span` if there is one. Otherwise, it is `DUMMY_SP`.
pub sort_span: Span,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub enum DiagnosticId {
Error(String),
Lint(String),
}
/// For example a note attached to an error.
#[derive(Clone, Debug, PartialEq, Hash, RustcEncodable, RustcDecodable)]
pub struct SubDiagnostic {
pub level: Level,
pub message: Vec<(String, Style)>,
pub span: MultiSpan,
pub render_span: Option<MultiSpan>,
}
#[derive(Debug, PartialEq, Eq)]
pub struct DiagnosticStyledString(pub Vec<StringPart>);
impl DiagnosticStyledString {
pub fn new() -> DiagnosticStyledString {
DiagnosticStyledString(vec![])
}
pub fn push_normal<S: Into<String>>(&mut self, t: S) {
self.0.push(StringPart::Normal(t.into()));
}
pub fn push_highlighted<S: Into<String>>(&mut self, t: S) {
self.0.push(StringPart::Highlighted(t.into()));
}
pub fn push<S: Into<String>>(&mut self, t: S, highlight: bool) {
if highlight {
self.push_highlighted(t);
} else {
self.push_normal(t);
}
}
pub fn normal<S: Into<String>>(t: S) -> DiagnosticStyledString {
DiagnosticStyledString(vec![StringPart::Normal(t.into())])
}
pub fn highlighted<S: Into<String>>(t: S) -> DiagnosticStyledString {
DiagnosticStyledString(vec![StringPart::Highlighted(t.into())])
}
pub fn content(&self) -> String {
self.0.iter().map(|x| x.content()).collect::<String>()
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum StringPart {
Normal(String),
Highlighted(String),
}
impl StringPart {
pub fn content(&self) -> &str {
match self {
&StringPart::Normal(ref s) | &StringPart::Highlighted(ref s) => s,
}
}
}
impl Diagnostic {
pub fn new(level: Level, message: &str) -> Self {
Diagnostic::new_with_code(level, None, message)
}
pub fn new_with_code(level: Level, code: Option<DiagnosticId>, message: &str) -> Self {
Diagnostic {
level,
message: vec![(message.to_owned(), Style::NoStyle)],
code,
span: MultiSpan::new(),
children: vec![],
suggestions: vec![],
sort_span: DUMMY_SP,
}
}
pub fn is_error(&self) -> bool {
match self.level {
Level::Bug | Level::Fatal | Level::Error | Level::FailureNote => true,
Level::Warning | Level::Note | Level::Help | Level::Cancelled => false,
}
}
/// Cancel the diagnostic (a structured diagnostic must either be emitted or
/// canceled or it will panic when dropped).
pub fn cancel(&mut self) {
self.level = Level::Cancelled;
}
pub fn cancelled(&self) -> bool {
self.level == Level::Cancelled
}
/// Set the sorting span.
pub fn set_sort_span(&mut self, sp: Span) {
self.sort_span = sp;
}
/// Adds a span/label to be included in the resulting snippet.
/// This label will be shown together with the original span/label used when creating the
/// diagnostic, *not* a span added by one of the `span_*` methods.
///
/// This is pushed onto the `MultiSpan` that was created when the
/// diagnostic was first built. If you don't call this function at
/// all, and you just supplied a `Span` to create the diagnostic,
/// then the snippet will just include that `Span`, which is
/// called the primary span.
pub fn span_label<T: Into<String>>(&mut self, span: Span, label: T) -> &mut Self {
self.span.push_span_label(span, label.into());
self
}
pub fn replace_span_with(&mut self, after: Span) -> &mut Self {
let before = self.span.clone();
self.set_span(after);
for span_label in before.span_labels() {
if let Some(label) = span_label.label {
self.span_label(after, label);
}
}
self
}
pub fn note_expected_found(
&mut self,
expected_label: &dyn fmt::Display,
expected: DiagnosticStyledString,
found_label: &dyn fmt::Display,
found: DiagnosticStyledString,
) -> &mut Self {
self.note_expected_found_extra(expected_label, expected, found_label, found, &"", &"")
}
pub fn note_unsuccessfull_coercion(
&mut self,
expected: DiagnosticStyledString,
found: DiagnosticStyledString,
) -> &mut Self {
let mut msg: Vec<_> =
vec![(format!("required when trying to coerce from type `"), Style::NoStyle)];
msg.extend(expected.0.iter().map(|x| match *x {
StringPart::Normal(ref s) => (s.to_owned(), Style::NoStyle),
StringPart::Highlighted(ref s) => (s.to_owned(), Style::Highlight),
}));
msg.push((format!("` to type '"), Style::NoStyle));
msg.extend(found.0.iter().map(|x| match *x {
StringPart::Normal(ref s) => (s.to_owned(), Style::NoStyle),
StringPart::Highlighted(ref s) => (s.to_owned(), Style::Highlight),
}));
msg.push((format!("`"), Style::NoStyle));
// For now, just attach these as notes
self.highlighted_note(msg);
self
}
pub fn note_expected_found_extra(
&mut self,
expected_label: &dyn fmt::Display,
expected: DiagnosticStyledString,
found_label: &dyn fmt::Display,
found: DiagnosticStyledString,
expected_extra: &dyn fmt::Display,
found_extra: &dyn fmt::Display,
) -> &mut Self {
let expected_label = format!("expected {}", expected_label);
let found_label = format!("found {}", found_label);
let (found_padding, expected_padding) = if expected_label.len() > found_label.len() {
(expected_label.len() - found_label.len(), 0)
} else {
(0, found_label.len() - expected_label.len())
};
let mut msg: Vec<_> =
vec![(format!("{}{} `", " ".repeat(expected_padding), expected_label), Style::NoStyle)];
msg.extend(expected.0.iter().map(|x| match *x {
StringPart::Normal(ref s) => (s.to_owned(), Style::NoStyle),
StringPart::Highlighted(ref s) => (s.to_owned(), Style::Highlight),
}));
msg.push((format!("`{}\n", expected_extra), Style::NoStyle));
msg.push((format!("{}{} `", " ".repeat(found_padding), found_label), Style::NoStyle));
msg.extend(found.0.iter().map(|x| match *x {
StringPart::Normal(ref s) => (s.to_owned(), Style::NoStyle),
StringPart::Highlighted(ref s) => (s.to_owned(), Style::Highlight),
}));
msg.push((format!("`{}", found_extra), Style::NoStyle));
// For now, just attach these as notes.
self.highlighted_note(msg);
self
}
pub fn note_trait_signature(&mut self, name: String, signature: String) -> &mut Self {
self.highlighted_note(vec![
(format!("`{}` from trait: `", name), Style::NoStyle),
(signature, Style::Highlight),
("`".to_string(), Style::NoStyle),
]);
self
}
pub fn note(&mut self, msg: &str) -> &mut Self {
self.sub(Level::Note, msg, MultiSpan::new(), None);
self
}
pub fn highlighted_note(&mut self, msg: Vec<(String, Style)>) -> &mut Self {
self.sub_with_highlights(Level::Note, msg, MultiSpan::new(), None);
self
}
/// Prints the span with a note above it.
pub fn span_note<S: Into<MultiSpan>>(&mut self, sp: S, msg: &str) -> &mut Self {
self.sub(Level::Note, msg, sp.into(), None);
self
}
pub fn warn(&mut self, msg: &str) -> &mut Self {
self.sub(Level::Warning, msg, MultiSpan::new(), None);
self
}
/// Prints the span with a warn above it.
pub fn span_warn<S: Into<MultiSpan>>(&mut self, sp: S, msg: &str) -> &mut Self {
self.sub(Level::Warning, msg, sp.into(), None);
self
}
pub fn help(&mut self, msg: &str) -> &mut Self {
self.sub(Level::Help, msg, MultiSpan::new(), None);
self
}
/// Prints the span with some help above it.
pub fn span_help<S: Into<MultiSpan>>(&mut self, sp: S, msg: &str) -> &mut Self {
self.sub(Level::Help, msg, sp.into(), None);
self
}
pub fn multipart_suggestion(
&mut self,
msg: &str,
suggestion: Vec<(Span, String)>,
applicability: Applicability,
) -> &mut Self {
self.suggestions.push(CodeSuggestion {
substitutions: vec![Substitution {
parts: suggestion
.into_iter()
.map(|(span, snippet)| SubstitutionPart { snippet, span })
.collect(),
}],
msg: msg.to_owned(),
style: SuggestionStyle::ShowCode,
applicability,
});
self
}
/// Prints out a message with for a multipart suggestion without showing the suggested code.
///
/// This is intended to be used for suggestions that are obvious in what the changes need to
/// be from the message, showing the span label inline would be visually unpleasant
/// (marginally overlapping spans or multiline spans) and showing the snippet window wouldn't
/// improve understandability.
pub fn tool_only_multipart_suggestion(
&mut self,
msg: &str,
suggestion: Vec<(Span, String)>,
applicability: Applicability,
) -> &mut Self {
self.suggestions.push(CodeSuggestion {
substitutions: vec![Substitution {
parts: suggestion
.into_iter()
.map(|(span, snippet)| SubstitutionPart { snippet, span })
.collect(),
}],
msg: msg.to_owned(),
style: SuggestionStyle::CompletelyHidden,
applicability,
});
self
}
/// Prints out a message with a suggested edit of the code.
///
/// In case of short messages and a simple suggestion, rustc displays it as a label:
///
/// ```text
/// try adding parentheses: `(tup.0).1`
/// ```
///
/// The message
///
/// * should not end in any punctuation (a `:` is added automatically)
/// * should not be a question (avoid language like "did you mean")
/// * should not contain any phrases like "the following", "as shown", etc.
/// * may look like "to do xyz, use" or "to do xyz, use abc"
/// * may contain a name of a function, variable, or type, but not whole expressions
///
/// See `CodeSuggestion` for more information.
pub fn span_suggestion(
&mut self,
sp: Span,
msg: &str,
suggestion: String,
applicability: Applicability,
) -> &mut Self {
self.span_suggestion_with_style(
sp,
msg,
suggestion,
applicability,
SuggestionStyle::ShowCode,
);
self
}
pub fn span_suggestion_with_style(
&mut self,
sp: Span,
msg: &str,
suggestion: String,
applicability: Applicability,
style: SuggestionStyle,
) -> &mut Self {
self.suggestions.push(CodeSuggestion {
substitutions: vec![Substitution {
parts: vec![SubstitutionPart { snippet: suggestion, span: sp }],
}],
msg: msg.to_owned(),
style,
applicability,
});
self
}
pub fn span_suggestion_verbose(
&mut self,
sp: Span,
msg: &str,
suggestion: String,
applicability: Applicability,
) -> &mut Self {
self.span_suggestion_with_style(
sp,
msg,
suggestion,
applicability,
SuggestionStyle::ShowAlways,
);
self
}
/// Prints out a message with multiple suggested edits of the code.
pub fn span_suggestions(
&mut self,
sp: Span,
msg: &str,
suggestions: impl Iterator<Item = String>,
applicability: Applicability,
) -> &mut Self {
self.suggestions.push(CodeSuggestion {
substitutions: suggestions
.map(|snippet| Substitution { parts: vec![SubstitutionPart { snippet, span: sp }] })
.collect(),
msg: msg.to_owned(),
style: SuggestionStyle::ShowCode,
applicability,
});
self
}
/// Prints out a message with a suggested edit of the code. If the suggestion is presented
/// inline, it will only show the message and not the suggestion.
///
/// See `CodeSuggestion` for more information.
pub fn span_suggestion_short(
&mut self,
sp: Span,
msg: &str,
suggestion: String,
applicability: Applicability,
) -> &mut Self {
self.span_suggestion_with_style(
sp,
msg,
suggestion,
applicability,
SuggestionStyle::HideCodeInline,
);
self
}
/// Prints out a message with for a suggestion without showing the suggested code.
///
/// This is intended to be used for suggestions that are obvious in what the changes need to
/// be from the message, showing the span label inline would be visually unpleasant
/// (marginally overlapping spans or multiline spans) and showing the snippet window wouldn't
/// improve understandability.
pub fn span_suggestion_hidden(
&mut self,
sp: Span,
msg: &str,
suggestion: String,
applicability: Applicability,
) -> &mut Self {
self.span_suggestion_with_style(
sp,
msg,
suggestion,
applicability,
SuggestionStyle::HideCodeAlways,
);
self
}
/// Adds a suggestion to the json output, but otherwise remains silent/undisplayed in the cli.
///
/// This is intended to be used for suggestions that are *very* obvious in what the changes
/// need to be from the message, but we still want other tools to be able to apply them.
pub fn tool_only_span_suggestion(
&mut self,
sp: Span,
msg: &str,
suggestion: String,
applicability: Applicability,
) -> &mut Self {
self.span_suggestion_with_style(
sp,
msg,
suggestion,
applicability,
SuggestionStyle::CompletelyHidden,
);
self
}
pub fn set_span<S: Into<MultiSpan>>(&mut self, sp: S) -> &mut Self {
self.span = sp.into();
if let Some(span) = self.span.primary_span() {
self.sort_span = span;
}
self
}
pub fn code(&mut self, s: DiagnosticId) -> &mut Self {
self.code = Some(s);
self
}
pub fn clear_code(&mut self) -> &mut Self {
self.code = None;
self
}
pub fn get_code(&self) -> Option<DiagnosticId> {
self.code.clone()
}
pub fn set_primary_message<M: Into<String>>(&mut self, msg: M) -> &mut Self {
self.message[0] = (msg.into(), Style::NoStyle);
self
}
pub fn message(&self) -> String {
self.message.iter().map(|i| i.0.as_str()).collect::<String>()
}
pub fn styled_message(&self) -> &Vec<(String, Style)> {
&self.message
}
/// Used by a lint. Copies over all details *but* the "main
/// message".
pub fn copy_details_not_message(&mut self, from: &Diagnostic) {
self.span = from.span.clone();
self.code = from.code.clone();
self.children.extend(from.children.iter().cloned())
}
/// Convenience function for internal use, clients should use one of the
/// public methods above.
pub fn sub(
&mut self,
level: Level,
message: &str,
span: MultiSpan,
render_span: Option<MultiSpan>,
) {
let sub = SubDiagnostic {
level,
message: vec![(message.to_owned(), Style::NoStyle)],
span,
render_span,
};
self.children.push(sub);
}
/// Convenience function for internal use, clients should use one of the
/// public methods above.
fn sub_with_highlights(
&mut self,
level: Level,
message: Vec<(String, Style)>,
span: MultiSpan,
render_span: Option<MultiSpan>,
) {
let sub = SubDiagnostic { level, message, span, render_span };
self.children.push(sub);
}
}
impl SubDiagnostic {
pub fn message(&self) -> String {
self.message.iter().map(|i| i.0.as_str()).collect::<String>()
}
pub fn styled_message(&self) -> &Vec<(String, Style)> {
&self.message
}
}
| 32.005415 | 100 | 0.57453 |
013a679629f81dd711ebf9b32b355a5641629961 | 1,664 | //! More information soon...
mod ffi;
pub mod vjoy_base;
pub mod vjoy_extra;
#[doc(hidden)]
// pub is in general not needed for unit tests, but we'll need it for our custom targets defined in
// Cargo.toml
pub mod test_env {
#![allow(unused)]
use super::vjoy_base::device::VJDevice;
// Set of devices used for tests. At least two must exist to test discrete
// POVs and continoues POVs (both cannot reside in one device).
// The set of tests will be based on this development environment:
// - Device of test #1:
// - Device id: 9 (editable)
// - Activated axes: X, Ry, Slider 1 (others are deactivated)
// - Number of buttons: 5
// - # of Disc POVs: 0
// - # of Cont POVs: 2
// - Activated force feedback: constant, ramp, square, sine, triangle,
// sawtooth up, sawtooth down, spring, damper, inertia, friction
//
// - Device of test #2:
// - Device id: 10 (editable)
// - Activated axes: X, Y, Z, Rx, Ry, Rz, Slider 1, Slider 2
// - Number of buttons: 1
// - # of Disc POVs: 1
// - # of Cont POVs: 0
// - Activated force feedback: none, effects disabled
pub const TEST_DEVICE_1: VJDevice = VJDevice::D9; // Device of test #1
pub const TEST_DEVICE_2: VJDevice = VJDevice::D10; // Device of test #2
pub const TEST_DEVICE_INACTIVE: VJDevice = VJDevice::D16; // Device not activated
pub const TEST_VERSION: u16 = 219;
pub const TEST_PRODUCT: &str = "vJoy - Virtual Joystick";
pub const TEST_MANUFACTURER: &str = "Shaul Eizikovich";
pub const TEST_SERIAL_NUMBER: &str = "2.1.9";
}
| 38.697674 | 99 | 0.621394 |
e676d989628aed8c80ad6b596b70e6b738ece466 | 4,909 | #![feature(specialization)]
#![feature(decl_macro)]
#![feature(try_trait)]
#![feature(fnbox)]
#![feature(never_type)]
#![feature(proc_macro_hygiene)]
#![feature(crate_visibility_modifier)]
#![feature(try_from)]
#![feature(label_break_value)]
#![recursion_limit="256"]
#![doc(html_root_url = "https://api.rocket.rs/v0.4")]
#![doc(html_favicon_url = "https://rocket.rs/v0.4/images/favicon.ico")]
#![doc(html_logo_url = "https://rocket.rs/v0.4/images/logo-boxed.png")]
//! # Rocket - Core API Documentation
//!
//! Hello, and welcome to the core Rocket API documentation!
//!
//! This API documentation is highly technical and is purely a reference.
//! There's an [overview] of Rocket on the main site as well as a [full,
//! detailed guide]. If you'd like pointers on getting started, see the
//! [quickstart] or [getting started] chapters of the guide.
//!
//! You may also be interested in looking at the
//! [`rocket_contrib`](../rocket_contrib) documentation, which contains
//! automatic JSON (de)serialiazation, templating support, static file serving,
//! and other useful features.
//!
//! [overview]: https://rocket.rs/v0.4/overview
//! [full, detailed guide]: https://rocket.rs/v0.4/guide
//! [quickstart]: https://rocket.rs/v0.4/guide/quickstart
//! [getting started]: https://rocket.rs/v0.4/guide/getting-started
//!
//! ## Libraries
//!
//! Rocket's functionality is split into two crates:
//!
//! 1. Core - This core library. Needed by every Rocket application.
//! 2. [Contrib](../rocket_contrib) - Provides useful functionality for many
//! Rocket applications. Completely optional.
//!
//! ## Usage
//!
//! First, depend on `rocket` in `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! rocket = "0.4.0"
//! ```
//!
//! Then, add the following to the top of your `main.rs` file:
//!
//! ```rust
//! #![feature(proc_macro_hygiene, decl_macro)]
//!
//! #[macro_use] extern crate rocket;
//! # #[get("/")] fn hello() { }
//! # fn main() { rocket::ignite().mount("/", routes![hello]); }
//! ```
//!
//! See the [guide](https://rocket.rs/v0.4/guide) for more information on how to
//! write Rocket applications. Here's a simple example to get you started:
//!
//! ```rust
//! #![feature(proc_macro_hygiene, decl_macro)]
//!
//! #[macro_use] extern crate rocket;
//!
//! #[get("/")]
//! fn hello() -> &'static str {
//! "Hello, world!"
//! }
//!
//! fn main() {
//! # if false { // We don't actually want to launch the server in an example.
//! rocket::ignite().mount("/", routes![hello]).launch();
//! # }
//! }
//! ```
//!
//! ## Configuration
//!
//! Rocket and Rocket libraries are configured via the `Rocket.toml` file and/or
//! `ROCKET_{PARAM}` environment variables. For more information on how to
//! configure Rocket, see the [configuration section] of the guide as well as
//! the [`config`] module documentation.
//!
//! [configuration section]: https://rocket.rs/v0.4/guide/configuration/
//!
//! ## Testing
//!
//! The [`local`] module contains structures that facilitate unit and
//! integration testing of a Rocket application. The top-level [`local`] module
//! documentation and the [testing chapter of the guide] include detailed
//! examples.
//!
//! [testing chapter of the guide]: https://rocket.rs/v0.4/guide/testing/#testing
#[allow(unused_imports)] #[macro_use] extern crate rocket_codegen;
#[doc(hidden)] pub use rocket_codegen::*;
extern crate rocket_http;
#[macro_use] extern crate log;
#[macro_use] extern crate pear;
extern crate yansi;
extern crate toml;
extern crate num_cpus;
extern crate state;
extern crate time;
extern crate memchr;
extern crate base64;
extern crate atty;
#[cfg(test)] #[macro_use] extern crate lazy_static;
#[doc(hidden)] #[macro_use] pub mod logger;
pub mod local;
pub mod request;
pub mod response;
pub mod outcome;
pub mod config;
pub mod data;
pub mod handler;
pub mod fairing;
pub mod error;
// Reexport of HTTP everything.
pub mod http {
//! Types that map to concepts in HTTP.
//!
//! This module exports types that map to HTTP concepts or to the underlying
//! HTTP library when needed.
#[doc(inline)]
pub use rocket_http::*;
}
mod router;
mod rocket;
mod codegen;
mod catcher;
mod ext;
#[doc(inline)] pub use response::Response;
#[doc(inline)] pub use handler::{Handler, ErrorHandler};
#[doc(hidden)] pub use codegen::{StaticRouteInfo, StaticCatchInfo};
#[doc(inline)] pub use outcome::Outcome;
#[doc(inline)] pub use data::Data;
#[doc(inline)] pub use config::Config;
pub use router::Route;
pub use request::{Request, State};
pub use catcher::Catcher;
pub use rocket::Rocket;
/// Alias to [`Rocket::ignite()`] Creates a new instance of `Rocket`.
pub fn ignite() -> Rocket {
Rocket::ignite()
}
/// Alias to [`Rocket::custom()`]. Creates a new instance of `Rocket` with a
/// custom configuration.
pub fn custom(config: config::Config) -> Rocket {
Rocket::custom(config)
}
| 29.39521 | 81 | 0.676309 |
697465cc609ecc9509c85d5149fb36ee46b2b3a9 | 28,313 | #[doc = "callback object\n\nClients can handle the 'done' event to get notified when\nthe related request is done."]
pub mod wl_callback {
use super::wayland_server::{
backend::{
protocol::{same_interface, Argument, Interface, Message, WEnum},
smallvec, InvalidId, ObjectData, ObjectId,
},
Dispatch, DispatchError, DisplayHandle, New, Resource, ResourceData,
};
use std::sync::Arc;
#[doc = r" The minimal object version supporting this event"]
pub const EVT_DONE_SINCE: u32 = 1u32;
#[derive(Debug)]
#[non_exhaustive]
pub enum Request {}
#[derive(Debug)]
#[non_exhaustive]
pub enum Event {
#[doc = "done event\n\nNotify the client when the related request is done.\n\nThis is a destructor, once sent this object cannot be used any longer."]
Done {
#[doc = "request-specific data for the callback"]
callback_data: u32
},
}
#[derive(Debug, Clone)]
pub struct WlCallback {
id: ObjectId,
version: u32,
data: Option<Arc<dyn std::any::Any + Send + Sync + 'static>>,
}
impl std::cmp::PartialEq for WlCallback {
fn eq(&self, other: &WlCallback) -> bool {
self.id == other.id
}
}
impl std::cmp::Eq for WlCallback {}
impl super::wayland_server::Resource for WlCallback {
type Request = Request;
type Event = Event;
#[inline]
fn interface() -> &'static Interface {
&super::WL_CALLBACK_INTERFACE
}
#[inline]
fn id(&self) -> ObjectId {
self.id.clone()
}
#[inline]
fn version(&self) -> u32 {
self.version
}
#[inline]
fn data<U: 'static>(&self) -> Option<&U> {
self.data
.as_ref()
.and_then(|arc| (&**arc).downcast_ref::<ResourceData<Self, U>>())
.map(|data| &data.udata)
}
#[inline]
fn from_id(conn: &mut DisplayHandle, id: ObjectId) -> Result<Self, InvalidId> {
if !same_interface(id.interface(), Self::interface()) && !id.is_null() {
return Err(InvalidId);
}
let version = conn.object_info(id.clone()).map(|info| info.version).unwrap_or(0);
let data = conn.get_object_data(id.clone()).ok();
Ok(WlCallback { id, data, version })
}
fn parse_request(
conn: &mut DisplayHandle,
msg: Message<ObjectId>,
) -> Result<(Self, Self::Request), DispatchError> {
let me = Self::from_id(conn, msg.sender_id.clone()).unwrap();
match msg.opcode {
_ => Err(DispatchError::BadMessage { msg, interface: Self::interface().name }),
}
}
fn write_event(
&self,
conn: &mut DisplayHandle,
msg: Self::Event,
) -> Result<Message<ObjectId>, InvalidId> {
match msg {
Event::Done { callback_data } => Ok(Message {
sender_id: self.id.clone(),
opcode: 0u16,
args: smallvec::smallvec![Argument::Uint(callback_data)],
}),
}
}
fn __set_object_data(
&mut self,
odata: std::sync::Arc<dyn std::any::Any + Send + Sync + 'static>,
) {
self.data = Some(odata);
}
}
impl WlCallback {
#[allow(clippy::too_many_arguments)]
pub fn done(&self, conn: &mut DisplayHandle, callback_data: u32) {
let _ = conn.send_event(self, Event::Done { callback_data });
}
}
}
pub mod test_global {
use super::wayland_server::{
backend::{
protocol::{same_interface, Argument, Interface, Message, WEnum},
smallvec, InvalidId, ObjectData, ObjectId,
},
Dispatch, DispatchError, DisplayHandle, New, Resource, ResourceData,
};
use std::sync::Arc;
#[doc = r" The minimal object version supporting this request"]
pub const REQ_MANY_ARGS_SINCE: u32 = 1u32;
#[doc = r" The minimal object version supporting this request"]
pub const REQ_GET_SECONDARY_SINCE: u32 = 2u32;
#[doc = r" The minimal object version supporting this request"]
pub const REQ_GET_TERTIARY_SINCE: u32 = 3u32;
#[doc = r" The minimal object version supporting this request"]
pub const REQ_LINK_SINCE: u32 = 3u32;
#[doc = r" The minimal object version supporting this request"]
pub const REQ_DESTROY_SINCE: u32 = 4u32;
#[doc = r" The minimal object version supporting this event"]
pub const EVT_MANY_ARGS_EVT_SINCE: u32 = 1u32;
#[doc = r" The minimal object version supporting this event"]
pub const EVT_ACK_SECONDARY_SINCE: u32 = 1u32;
#[doc = r" The minimal object version supporting this event"]
pub const EVT_CYCLE_QUAD_SINCE: u32 = 1u32;
#[derive(Debug)]
#[non_exhaustive]
pub enum Request {
#[doc = "a request with every possible non-object arg"]
ManyArgs {
#[doc = "an unsigned int"]
unsigned_int: u32,
#[doc = "a singed int"]
signed_int: i32,
#[doc = "a fixed point number"]
fixed_point: f64,
#[doc = "an array"]
number_array: Vec<u8>,
#[doc = "some text"]
some_text: String,
#[doc = "a file descriptor"]
file_descriptor: ::std::os::unix::io::RawFd,
},
#[doc = "Only available since version 2 of the interface"]
GetSecondary {
#[doc = "create a secondary"]
sec: New<super::secondary::Secondary>,
},
#[doc = "Only available since version 3 of the interface"]
GetTertiary {
#[doc = "create a tertiary"]
ter: New<super::tertiary::Tertiary>,
},
#[doc = "link a secondary and a tertiary\n\n\n\nOnly available since version 3 of the interface"]
Link { sec: super::secondary::Secondary, ter: Option<super::tertiary::Tertiary>, time: u32 },
#[doc = "This is a destructor, once received this object cannot be used any longer.\nOnly available since version 4 of the interface"]
Destroy,
}
#[derive(Debug)]
#[non_exhaustive]
pub enum Event {
#[doc = "an event with every possible non-object arg"]
ManyArgsEvt {
#[doc = "an unsigned int"]
unsigned_int: u32,
#[doc = "a singed int"]
signed_int: i32,
#[doc = "a fixed point number"]
fixed_point: f64,
#[doc = "an array"]
number_array: Vec<u8>,
#[doc = "some text"]
some_text: String,
#[doc = "a file descriptor"]
file_descriptor: ::std::os::unix::io::RawFd,
},
#[doc = "acking the creation of a secondary"]
AckSecondary { sec: super::secondary::Secondary },
#[doc = "create a new quad optionally replacing a previous one"]
CycleQuad { new_quad: super::quad::Quad, old_quad: Option<super::quad::Quad> },
}
#[derive(Debug, Clone)]
pub struct TestGlobal {
id: ObjectId,
version: u32,
data: Option<Arc<dyn std::any::Any + Send + Sync + 'static>>,
}
impl std::cmp::PartialEq for TestGlobal {
fn eq(&self, other: &TestGlobal) -> bool {
self.id == other.id
}
}
impl std::cmp::Eq for TestGlobal {}
impl super::wayland_server::Resource for TestGlobal {
type Request = Request;
type Event = Event;
#[inline]
fn interface() -> &'static Interface {
&super::TEST_GLOBAL_INTERFACE
}
#[inline]
fn id(&self) -> ObjectId {
self.id.clone()
}
#[inline]
fn version(&self) -> u32 {
self.version
}
#[inline]
fn data<U: 'static>(&self) -> Option<&U> {
self.data
.as_ref()
.and_then(|arc| (&**arc).downcast_ref::<ResourceData<Self, U>>())
.map(|data| &data.udata)
}
#[inline]
fn from_id(conn: &mut DisplayHandle, id: ObjectId) -> Result<Self, InvalidId> {
if !same_interface(id.interface(), Self::interface()) && !id.is_null() {
return Err(InvalidId);
}
let version = conn.object_info(id.clone()).map(|info| info.version).unwrap_or(0);
let data = conn.get_object_data(id.clone()).ok();
Ok(TestGlobal { id, data, version })
}
fn parse_request(
conn: &mut DisplayHandle,
msg: Message<ObjectId>,
) -> Result<(Self, Self::Request), DispatchError> {
let me = Self::from_id(conn, msg.sender_id.clone()).unwrap();
match msg.opcode {
0u16 => {
if let [Argument::Uint(unsigned_int), Argument::Int(signed_int), Argument::Fixed(fixed_point), Argument::Array(number_array), Argument::Str(some_text), Argument::Fd(file_descriptor)] =
&msg.args[..]
{
Ok((
me,
Request::ManyArgs {
unsigned_int: *unsigned_int,
signed_int: *signed_int,
fixed_point: (*fixed_point as f64) / 256.,
number_array: *number_array.clone(),
some_text: String::from_utf8_lossy(some_text.as_bytes())
.into_owned(),
file_descriptor: *file_descriptor,
},
))
} else {
Err(DispatchError::BadMessage { msg, interface: Self::interface().name })
}
}
1u16 => {
if let [Argument::NewId(sec)] = &msg.args[..] {
Ok((
me,
Request::GetSecondary {
sec: New::wrap(
match <super::secondary::Secondary as Resource>::from_id(
conn,
sec.clone(),
) {
Ok(p) => p,
Err(_) => {
return Err(DispatchError::BadMessage {
msg,
interface: Self::interface().name,
})
}
},
),
},
))
} else {
Err(DispatchError::BadMessage { msg, interface: Self::interface().name })
}
}
2u16 => {
if let [Argument::NewId(ter)] = &msg.args[..] {
Ok((
me,
Request::GetTertiary {
ter: New::wrap(
match <super::tertiary::Tertiary as Resource>::from_id(
conn,
ter.clone(),
) {
Ok(p) => p,
Err(_) => {
return Err(DispatchError::BadMessage {
msg,
interface: Self::interface().name,
})
}
},
),
},
))
} else {
Err(DispatchError::BadMessage { msg, interface: Self::interface().name })
}
}
3u16 => {
if let [Argument::Object(sec), Argument::Object(ter), Argument::Uint(time)] =
&msg.args[..]
{
Ok((
me,
Request::Link {
sec: match <super::secondary::Secondary as Resource>::from_id(
conn,
sec.clone(),
) {
Ok(p) => p,
Err(_) => {
return Err(DispatchError::BadMessage {
msg,
interface: Self::interface().name,
})
}
},
ter: if ter.is_null() {
None
} else {
Some(
match <super::tertiary::Tertiary as Resource>::from_id(
conn,
ter.clone(),
) {
Ok(p) => p,
Err(_) => {
return Err(DispatchError::BadMessage {
msg,
interface: Self::interface().name,
})
}
},
)
},
time: *time,
},
))
} else {
Err(DispatchError::BadMessage { msg, interface: Self::interface().name })
}
}
4u16 => {
if let [] = &msg.args[..] {
Ok((me, Request::Destroy {}))
} else {
Err(DispatchError::BadMessage { msg, interface: Self::interface().name })
}
}
_ => Err(DispatchError::BadMessage { msg, interface: Self::interface().name }),
}
}
fn write_event(
&self,
conn: &mut DisplayHandle,
msg: Self::Event,
) -> Result<Message<ObjectId>, InvalidId> {
match msg {
Event::ManyArgsEvt {
unsigned_int,
signed_int,
fixed_point,
number_array,
some_text,
file_descriptor,
} => Ok(Message {
sender_id: self.id.clone(),
opcode: 0u16,
args: smallvec::smallvec![
Argument::Uint(unsigned_int),
Argument::Int(signed_int),
Argument::Fixed((fixed_point * 256.) as i32),
Argument::Array(Box::new(number_array)),
Argument::Str(Box::new(std::ffi::CString::new(some_text).unwrap())),
Argument::Fd(file_descriptor)
],
}),
Event::AckSecondary { sec } => Ok(Message {
sender_id: self.id.clone(),
opcode: 1u16,
args: smallvec::smallvec![Argument::Object(Resource::id(&sec))],
}),
Event::CycleQuad { new_quad, old_quad } => Ok(Message {
sender_id: self.id.clone(),
opcode: 2u16,
args: smallvec::smallvec![
Argument::NewId(Resource::id(&new_quad)),
if let Some(obj) = old_quad {
Argument::Object(Resource::id(&obj))
} else {
Argument::Object(conn.null_id())
}
],
}),
}
}
fn __set_object_data(
&mut self,
odata: std::sync::Arc<dyn std::any::Any + Send + Sync + 'static>,
) {
self.data = Some(odata);
}
}
impl TestGlobal {
#[allow(clippy::too_many_arguments)]
pub fn many_args_evt(
&self,
conn: &mut DisplayHandle,
unsigned_int: u32,
signed_int: i32,
fixed_point: f64,
number_array: Vec<u8>,
some_text: String,
file_descriptor: ::std::os::unix::io::RawFd,
) {
let _ = conn.send_event(
self,
Event::ManyArgsEvt {
unsigned_int,
signed_int,
fixed_point,
number_array,
some_text,
file_descriptor,
},
);
}
#[allow(clippy::too_many_arguments)]
pub fn ack_secondary(
&self,
conn: &mut DisplayHandle,
sec: &super::secondary::Secondary,
) {
let _ = conn.send_event(self, Event::AckSecondary { sec: sec.clone() });
}
#[allow(clippy::too_many_arguments)]
pub fn cycle_quad(
&self,
conn: &mut DisplayHandle,
new_quad: &super::quad::Quad,
old_quad: Option<&super::quad::Quad>,
) {
let _ = conn.send_event(self, Event::CycleQuad { new_quad: new_quad.clone(), old_quad: old_quad.cloned() });
}
}
}
pub mod secondary {
use super::wayland_server::{
backend::{
protocol::{same_interface, Argument, Interface, Message, WEnum},
smallvec, InvalidId, ObjectData, ObjectId,
},
Dispatch, DispatchError, DisplayHandle, New, Resource, ResourceData,
};
use std::sync::Arc;
#[doc = r" The minimal object version supporting this request"]
pub const REQ_DESTROY_SINCE: u32 = 2u32;
#[derive(Debug)]
#[non_exhaustive]
pub enum Request {
#[doc = "This is a destructor, once received this object cannot be used any longer.\nOnly available since version 2 of the interface"]
Destroy,
}
#[derive(Debug)]
#[non_exhaustive]
pub enum Event {}
#[derive(Debug, Clone)]
pub struct Secondary {
id: ObjectId,
version: u32,
data: Option<Arc<dyn std::any::Any + Send + Sync + 'static>>,
}
impl std::cmp::PartialEq for Secondary {
fn eq(&self, other: &Secondary) -> bool {
self.id == other.id
}
}
impl std::cmp::Eq for Secondary {}
impl super::wayland_server::Resource for Secondary {
type Request = Request;
type Event = Event;
#[inline]
fn interface() -> &'static Interface {
&super::SECONDARY_INTERFACE
}
#[inline]
fn id(&self) -> ObjectId {
self.id.clone()
}
#[inline]
fn version(&self) -> u32 {
self.version
}
#[inline]
fn data<U: 'static>(&self) -> Option<&U> {
self.data
.as_ref()
.and_then(|arc| (&**arc).downcast_ref::<ResourceData<Self, U>>())
.map(|data| &data.udata)
}
#[inline]
fn from_id(conn: &mut DisplayHandle, id: ObjectId) -> Result<Self, InvalidId> {
if !same_interface(id.interface(), Self::interface()) && !id.is_null() {
return Err(InvalidId);
}
let version = conn.object_info(id.clone()).map(|info| info.version).unwrap_or(0);
let data = conn.get_object_data(id.clone()).ok();
Ok(Secondary { id, data, version })
}
fn parse_request(
conn: &mut DisplayHandle,
msg: Message<ObjectId>,
) -> Result<(Self, Self::Request), DispatchError> {
let me = Self::from_id(conn, msg.sender_id.clone()).unwrap();
match msg.opcode {
0u16 => {
if let [] = &msg.args[..] {
Ok((me, Request::Destroy {}))
} else {
Err(DispatchError::BadMessage { msg, interface: Self::interface().name })
}
}
_ => Err(DispatchError::BadMessage { msg, interface: Self::interface().name }),
}
}
fn write_event(
&self,
conn: &mut DisplayHandle,
msg: Self::Event,
) -> Result<Message<ObjectId>, InvalidId> {
match msg {}
}
fn __set_object_data(
&mut self,
odata: std::sync::Arc<dyn std::any::Any + Send + Sync + 'static>,
) {
self.data = Some(odata);
}
}
impl Secondary {}
}
pub mod tertiary {
use super::wayland_server::{
backend::{
protocol::{same_interface, Argument, Interface, Message, WEnum},
smallvec, InvalidId, ObjectData, ObjectId,
},
Dispatch, DispatchError, DisplayHandle, New, Resource, ResourceData,
};
use std::sync::Arc;
#[doc = r" The minimal object version supporting this request"]
pub const REQ_DESTROY_SINCE: u32 = 3u32;
#[derive(Debug)]
#[non_exhaustive]
pub enum Request {
#[doc = "This is a destructor, once received this object cannot be used any longer.\nOnly available since version 3 of the interface"]
Destroy,
}
#[derive(Debug)]
#[non_exhaustive]
pub enum Event {}
#[derive(Debug, Clone)]
pub struct Tertiary {
id: ObjectId,
version: u32,
data: Option<Arc<dyn std::any::Any + Send + Sync + 'static>>,
}
impl std::cmp::PartialEq for Tertiary {
fn eq(&self, other: &Tertiary) -> bool {
self.id == other.id
}
}
impl std::cmp::Eq for Tertiary {}
impl super::wayland_server::Resource for Tertiary {
type Request = Request;
type Event = Event;
#[inline]
fn interface() -> &'static Interface {
&super::TERTIARY_INTERFACE
}
#[inline]
fn id(&self) -> ObjectId {
self.id.clone()
}
#[inline]
fn version(&self) -> u32 {
self.version
}
#[inline]
fn data<U: 'static>(&self) -> Option<&U> {
self.data
.as_ref()
.and_then(|arc| (&**arc).downcast_ref::<ResourceData<Self, U>>())
.map(|data| &data.udata)
}
#[inline]
fn from_id(conn: &mut DisplayHandle, id: ObjectId) -> Result<Self, InvalidId> {
if !same_interface(id.interface(), Self::interface()) && !id.is_null() {
return Err(InvalidId);
}
let version = conn.object_info(id.clone()).map(|info| info.version).unwrap_or(0);
let data = conn.get_object_data(id.clone()).ok();
Ok(Tertiary { id, data, version })
}
fn parse_request(
conn: &mut DisplayHandle,
msg: Message<ObjectId>,
) -> Result<(Self, Self::Request), DispatchError> {
let me = Self::from_id(conn, msg.sender_id.clone()).unwrap();
match msg.opcode {
0u16 => {
if let [] = &msg.args[..] {
Ok((me, Request::Destroy {}))
} else {
Err(DispatchError::BadMessage { msg, interface: Self::interface().name })
}
}
_ => Err(DispatchError::BadMessage { msg, interface: Self::interface().name }),
}
}
fn write_event(
&self,
conn: &mut DisplayHandle,
msg: Self::Event,
) -> Result<Message<ObjectId>, InvalidId> {
match msg {}
}
fn __set_object_data(
&mut self,
odata: std::sync::Arc<dyn std::any::Any + Send + Sync + 'static>,
) {
self.data = Some(odata);
}
}
impl Tertiary {}
}
pub mod quad {
use super::wayland_server::{
backend::{
protocol::{same_interface, Argument, Interface, Message, WEnum},
smallvec, InvalidId, ObjectData, ObjectId,
},
Dispatch, DispatchError, DisplayHandle, New, Resource, ResourceData,
};
use std::sync::Arc;
#[doc = r" The minimal object version supporting this request"]
pub const REQ_DESTROY_SINCE: u32 = 3u32;
#[derive(Debug)]
#[non_exhaustive]
pub enum Request {
#[doc = "This is a destructor, once received this object cannot be used any longer.\nOnly available since version 3 of the interface"]
Destroy,
}
#[derive(Debug)]
#[non_exhaustive]
pub enum Event {}
#[derive(Debug, Clone)]
pub struct Quad {
id: ObjectId,
version: u32,
data: Option<Arc<dyn std::any::Any + Send + Sync + 'static>>,
}
impl std::cmp::PartialEq for Quad {
fn eq(&self, other: &Quad) -> bool {
self.id == other.id
}
}
impl std::cmp::Eq for Quad {}
impl super::wayland_server::Resource for Quad {
type Request = Request;
type Event = Event;
#[inline]
fn interface() -> &'static Interface {
&super::QUAD_INTERFACE
}
#[inline]
fn id(&self) -> ObjectId {
self.id.clone()
}
#[inline]
fn version(&self) -> u32 {
self.version
}
#[inline]
fn data<U: 'static>(&self) -> Option<&U> {
self.data
.as_ref()
.and_then(|arc| (&**arc).downcast_ref::<ResourceData<Self, U>>())
.map(|data| &data.udata)
}
#[inline]
fn from_id(conn: &mut DisplayHandle, id: ObjectId) -> Result<Self, InvalidId> {
if !same_interface(id.interface(), Self::interface()) && !id.is_null() {
return Err(InvalidId);
}
let version = conn.object_info(id.clone()).map(|info| info.version).unwrap_or(0);
let data = conn.get_object_data(id.clone()).ok();
Ok(Quad { id, data, version })
}
fn parse_request(
conn: &mut DisplayHandle,
msg: Message<ObjectId>,
) -> Result<(Self, Self::Request), DispatchError> {
let me = Self::from_id(conn, msg.sender_id.clone()).unwrap();
match msg.opcode {
0u16 => {
if let [] = &msg.args[..] {
Ok((me, Request::Destroy {}))
} else {
Err(DispatchError::BadMessage { msg, interface: Self::interface().name })
}
}
_ => Err(DispatchError::BadMessage { msg, interface: Self::interface().name }),
}
}
fn write_event(
&self,
conn: &mut DisplayHandle,
msg: Self::Event,
) -> Result<Message<ObjectId>, InvalidId> {
match msg {}
}
fn __set_object_data(
&mut self,
odata: std::sync::Arc<dyn std::any::Any + Send + Sync + 'static>,
) {
self.data = Some(odata);
}
}
impl Quad {}
} | 38.46875 | 204 | 0.453149 |
bfb84104c4ef13bacb6b18728f3e244e5ed76437 | 9,569 | use super::Command::AddToUsizeField;
use super::*;
use anyhow::{Context, Result};
use tokio::sync::{mpsc, oneshot};
use crate::{
config::Configuration,
progress::PROGRESS_PRINTER,
scanner::RESPONSES,
send_command, skip_fail,
statistics::StatField::ResourcesDiscovered,
traits::FeroxSerialize,
utils::{ferox_print, fmt_err, make_request, open_file, write_to},
CommandReceiver, CommandSender, Joiner,
};
use std::sync::Arc;
#[derive(Debug)]
/// Container for terminal output transmitter
pub struct TermOutHandle {
/// Transmitter that sends to the TermOutHandler handler
pub tx: CommandSender,
/// Transmitter that sends to the FileOutHandler handler
pub tx_file: CommandSender,
}
/// implementation of OutputHandle
impl TermOutHandle {
/// Given a CommandSender, create a new OutputHandle
pub fn new(tx: CommandSender, tx_file: CommandSender) -> Self {
Self { tx, tx_file }
}
/// Send the given Command over `tx`
pub fn send(&self, command: Command) -> Result<()> {
self.tx.send(command)?;
Ok(())
}
/// Sync the handle with the handler
pub async fn sync(&self, send_to_file: bool) -> Result<()> {
let (tx, rx) = oneshot::channel::<bool>();
self.send(Command::Sync(tx))?;
if send_to_file {
let (tx, rx) = oneshot::channel::<bool>();
self.tx_file.send(Command::Sync(tx))?;
rx.await?;
}
rx.await?;
Ok(())
}
}
#[derive(Debug)]
/// Event handler for files
pub struct FileOutHandler {
/// file output handler's receiver
receiver: CommandReceiver,
/// pointer to "global" configuration struct
config: Arc<Configuration>,
}
impl FileOutHandler {
/// Given a file tx/rx pair along with a filename and awaitable task, create
/// a FileOutHandler
fn new(rx: CommandReceiver, config: Arc<Configuration>) -> Self {
Self {
receiver: rx,
config,
}
}
/// Spawn a single consumer task (sc side of mpsc)
///
/// The consumer simply receives responses from the terminal handler and writes them to disk
async fn start(&mut self, tx_stats: CommandSender) -> Result<()> {
log::trace!("enter: start_file_handler({:?})", tx_stats);
let mut file = open_file(&self.config.output)?;
log::info!("Writing scan results to {}", self.config.output);
while let Some(command) = self.receiver.recv().await {
match command {
Command::Report(response) => {
skip_fail!(write_to(&*response, &mut file, self.config.json));
}
Command::Exit => {
break;
}
Command::Sync(sender) => {
skip_fail!(sender.send(true));
}
_ => {} // no more needed
}
}
// close the file before we tell statistics to save current data to the same file
drop(file);
send_command!(tx_stats, Command::Save);
log::trace!("exit: start_file_handler");
Ok(())
}
}
#[derive(Debug)]
/// Event handler for terminal
pub struct TermOutHandler {
/// terminal output handler's receiver
receiver: CommandReceiver,
/// file handler
tx_file: CommandSender,
/// optional file handler task
file_task: Option<Joiner>,
/// pointer to "global" configuration struct
config: Arc<Configuration>,
}
/// implementation of TermOutHandler
impl TermOutHandler {
/// Given a terminal receiver along with a file transmitter and filename, create
/// an OutputHandler
fn new(
receiver: CommandReceiver,
tx_file: CommandSender,
file_task: Option<Joiner>,
config: Arc<Configuration>,
) -> Self {
Self {
receiver,
tx_file,
config,
file_task,
}
}
/// Creates all required output handlers (terminal, file) and updates the given Handles/Tasks
pub fn initialize(
config: Arc<Configuration>,
tx_stats: CommandSender,
) -> (Joiner, TermOutHandle) {
log::trace!("enter: initialize({:?}, {:?})", config, tx_stats);
let (tx_term, rx_term) = mpsc::unbounded_channel::<Command>();
let (tx_file, rx_file) = mpsc::unbounded_channel::<Command>();
let mut file_handler = FileOutHandler::new(rx_file, config.clone());
let tx_stats_clone = tx_stats.clone();
let file_task = if !config.output.is_empty() {
// -o used, need to spawn the thread for writing to disk
Some(tokio::spawn(async move {
file_handler.start(tx_stats_clone).await
}))
} else {
None
};
let mut term_handler = Self::new(rx_term, tx_file.clone(), file_task, config);
let term_task = tokio::spawn(async move { term_handler.start(tx_stats).await });
let event_handle = TermOutHandle::new(tx_term, tx_file);
log::trace!("exit: initialize -> ({:?}, {:?})", term_task, event_handle);
(term_task, event_handle)
}
/// Start a single consumer task (sc side of mpsc)
///
/// The consumer simply receives `Command` and acts accordingly
async fn start(&mut self, tx_stats: CommandSender) -> Result<()> {
log::trace!("enter: start({:?})", tx_stats);
while let Some(command) = self.receiver.recv().await {
match command {
Command::Report(mut resp) => {
let contains_sentry =
self.config.status_codes.contains(&resp.status().as_u16());
let unknown_sentry = !RESPONSES.contains(&resp); // !contains == unknown
let should_process_response = contains_sentry && unknown_sentry;
if should_process_response {
// print to stdout
ferox_print(&resp.as_str(), &PROGRESS_PRINTER);
send_command!(tx_stats, AddToUsizeField(ResourcesDiscovered, 1));
if self.file_task.is_some() {
// -o used, need to send the report to be written out to disk
self.tx_file
.send(Command::Report(resp.clone()))
.with_context(|| {
fmt_err(&format!("Could not send {} to file handler", resp))
})?;
}
}
log::trace!("report complete: {}", resp.url());
if self.config.replay_client.is_some() && should_process_response {
// replay proxy specified/client created and this response's status code is one that
// should be replayed; not using logged_request due to replay proxy client
make_request(
self.config.replay_client.as_ref().unwrap(),
&resp.url(),
self.config.output_level,
tx_stats.clone(),
)
.await
.with_context(|| "Could not replay request through replay proxy")?;
}
if should_process_response {
// add response to RESPONSES for serialization in case of ctrl+c
// placed all by its lonesome like this so that RESPONSES can take ownership
// of the FeroxResponse
// before ownership is transferred, there's no real reason to keep the body anymore
// so we can free that piece of data, reducing memory usage
resp.drop_text();
RESPONSES.insert(*resp);
}
}
Command::Sync(sender) => {
sender.send(true).unwrap_or_default();
}
Command::Exit => {
if self.file_task.is_some() && self.tx_file.send(Command::Exit).is_ok() {
self.file_task.as_mut().unwrap().await??; // wait for death
}
break;
}
_ => {} // no more commands needed
}
}
log::trace!("exit: start");
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
/// try to hit struct field coverage of FileOutHandler
fn struct_fields_of_file_out_handler() {
let (_, rx) = mpsc::unbounded_channel::<Command>();
let config = Arc::new(Configuration::new().unwrap());
let foh = FileOutHandler {
config,
receiver: rx,
};
println!("{:?}", foh);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
/// try to hit struct field coverage of TermOutHandler
async fn struct_fields_of_term_out_handler() {
let (tx, rx) = mpsc::unbounded_channel::<Command>();
let (tx_file, _) = mpsc::unbounded_channel::<Command>();
let config = Arc::new(Configuration::new().unwrap());
let toh = TermOutHandler {
config,
file_task: None,
receiver: rx,
tx_file,
};
println!("{:?}", toh);
tx.send(Command::Exit).unwrap();
}
}
| 33.341463 | 108 | 0.541749 |
72ec8017e818e9cba65faa22c2a85a66a7bb3523 | 2,336 | // Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
use futures::io::AsyncRead;
use futures::stream::StreamExt;
use reqwest::r#async::Chunk;
use reqwest::r#async::Decoder;
use std::cmp::min;
use std::io;
use std::io::Read;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
/// Wraps `reqwest::Decoder` so that it can be exposed as an `AsyncRead` and integrated
/// into resources more easily.
pub struct HttpBody {
decoder: futures::compat::Compat01As03<Decoder>,
chunk: Option<Chunk>,
pos: usize,
}
impl HttpBody {
pub fn from(body: Decoder) -> Self {
Self {
decoder: futures::compat::Compat01As03::new(body),
chunk: None,
pos: 0,
}
}
}
impl Read for HttpBody {
fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
unimplemented!();
}
}
impl AsyncRead for HttpBody {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut [u8],
) -> Poll<Result<usize, io::Error>> {
let mut inner = self.get_mut();
if let Some(chunk) = inner.chunk.take() {
debug!(
"HttpBody Fake Read buf {} chunk {} pos {}",
buf.len(),
chunk.len(),
inner.pos
);
let n = min(buf.len(), chunk.len() - inner.pos);
{
let rest = &chunk[inner.pos..];
buf[..n].clone_from_slice(&rest[..n]);
}
inner.pos += n;
if inner.pos == chunk.len() {
inner.pos = 0;
} else {
inner.chunk = Some(chunk);
}
return Poll::Ready(Ok(n));
} else {
assert_eq!(inner.pos, 0);
}
let p = inner.decoder.poll_next_unpin(cx);
match p {
Poll::Ready(Some(Err(e))) => Poll::Ready(Err(
// TODO Need to map hyper::Error into std::io::Error.
io::Error::new(io::ErrorKind::Other, e),
)),
Poll::Ready(Some(Ok(chunk))) => {
debug!(
"HttpBody Real Read buf {} chunk {} pos {}",
buf.len(),
chunk.len(),
inner.pos
);
let n = min(buf.len(), chunk.len());
buf[..n].clone_from_slice(&chunk[..n]);
if buf.len() < chunk.len() {
inner.pos = n;
inner.chunk = Some(chunk);
}
Poll::Ready(Ok(n))
}
Poll::Ready(None) => Poll::Ready(Ok(0)),
Poll::Pending => Poll::Pending,
}
}
}
| 24.851064 | 87 | 0.545377 |
feeb7da7c09a4b4e96d75ea11ce227f54a23d721 | 1,311 | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(plugin_registrar)]
#![feature(slice_patterns, box_syntax, rustc_private)]
#[macro_use(walk_list)]
extern crate syntax;
extern crate rustc_serialize;
// Load rustc as a plugin to get macros.
#[macro_use]
extern crate rustc;
extern crate rustc_plugin;
#[macro_use]
extern crate log;
mod kythe;
mod pass;
mod visitor;
use kythe::writer::JsonEntryWriter;
use rustc_plugin::Registry;
use rustc::lint::LateLintPassObject;
// Informs the compiler of the existence and implementation of our plugin.
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
let pass = box pass::KytheLintPass::new(box JsonEntryWriter);
reg.register_late_lint_pass(pass as LateLintPassObject);
}
| 29.795455 | 75 | 0.762014 |
ac28f0619c25f0e4cfe1359be785bde97f923e58 | 25,982 | #[doc = "Reader of register UCB0CTLW0_SPI"]
pub type R = crate::R<u16, super::UCB0CTLW0_SPI>;
#[doc = "Writer for register UCB0CTLW0_SPI"]
pub type W = crate::W<u16, super::UCB0CTLW0_SPI>;
#[doc = "Register UCB0CTLW0_SPI `reset()`'s with value 0"]
impl crate::ResetValue for super::UCB0CTLW0_SPI {
type Type = u16;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "0:0\\]
Software reset enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum UCSWRST_A {
#[doc = "0: Disabled. eUSCI_B reset released for operation"]
DISABLE = 0,
#[doc = "1: Enabled. eUSCI_B logic held in reset state"]
ENABLE = 1,
}
impl From<UCSWRST_A> for bool {
#[inline(always)]
fn from(variant: UCSWRST_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `UCSWRST`"]
pub type UCSWRST_R = crate::R<bool, UCSWRST_A>;
impl UCSWRST_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> UCSWRST_A {
match self.bits {
false => UCSWRST_A::DISABLE,
true => UCSWRST_A::ENABLE,
}
}
#[doc = "Checks if the value of the field is `DISABLE`"]
#[inline(always)]
pub fn is_disable(&self) -> bool {
*self == UCSWRST_A::DISABLE
}
#[doc = "Checks if the value of the field is `ENABLE`"]
#[inline(always)]
pub fn is_enable(&self) -> bool {
*self == UCSWRST_A::ENABLE
}
}
#[doc = "Write proxy for field `UCSWRST`"]
pub struct UCSWRST_W<'a> {
w: &'a mut W,
}
impl<'a> UCSWRST_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: UCSWRST_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Disabled. eUSCI_B reset released for operation"]
#[inline(always)]
pub fn disable(self) -> &'a mut W {
self.variant(UCSWRST_A::DISABLE)
}
#[doc = "Enabled. eUSCI_B logic held in reset state"]
#[inline(always)]
pub fn enable(self) -> &'a mut W {
self.variant(UCSWRST_A::ENABLE)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u16) & 0x01);
self.w
}
}
#[doc = "1:1\\]
STE mode select in master mode.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum UCSTEM_A {
#[doc = "0: STE pin is used to prevent conflicts with other masters"]
UCSTEM_0 = 0,
#[doc = "1: STE pin is used to generate the enable signal for a 4-wire slave"]
UCSTEM_1 = 1,
}
impl From<UCSTEM_A> for bool {
#[inline(always)]
fn from(variant: UCSTEM_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `UCSTEM`"]
pub type UCSTEM_R = crate::R<bool, UCSTEM_A>;
impl UCSTEM_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> UCSTEM_A {
match self.bits {
false => UCSTEM_A::UCSTEM_0,
true => UCSTEM_A::UCSTEM_1,
}
}
#[doc = "Checks if the value of the field is `UCSTEM_0`"]
#[inline(always)]
pub fn is_ucstem_0(&self) -> bool {
*self == UCSTEM_A::UCSTEM_0
}
#[doc = "Checks if the value of the field is `UCSTEM_1`"]
#[inline(always)]
pub fn is_ucstem_1(&self) -> bool {
*self == UCSTEM_A::UCSTEM_1
}
}
#[doc = "Write proxy for field `UCSTEM`"]
pub struct UCSTEM_W<'a> {
w: &'a mut W,
}
impl<'a> UCSTEM_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: UCSTEM_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "STE pin is used to prevent conflicts with other masters"]
#[inline(always)]
pub fn ucstem_0(self) -> &'a mut W {
self.variant(UCSTEM_A::UCSTEM_0)
}
#[doc = "STE pin is used to generate the enable signal for a 4-wire slave"]
#[inline(always)]
pub fn ucstem_1(self) -> &'a mut W {
self.variant(UCSTEM_A::UCSTEM_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u16) & 0x01) << 1);
self.w
}
}
#[doc = "7:6\\]
eUSCI_B clock source select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum UCSSEL_A {
#[doc = "0: Reserved"]
UCSSEL_0 = 0,
#[doc = "1: ACLK"]
ACLK = 1,
#[doc = "2: SMCLK"]
SMCLK = 2,
#[doc = "3: SMCLK"]
UCSSEL_3 = 3,
}
impl From<UCSSEL_A> for u8 {
#[inline(always)]
fn from(variant: UCSSEL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `UCSSEL`"]
pub type UCSSEL_R = crate::R<u8, UCSSEL_A>;
impl UCSSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> UCSSEL_A {
match self.bits {
0 => UCSSEL_A::UCSSEL_0,
1 => UCSSEL_A::ACLK,
2 => UCSSEL_A::SMCLK,
3 => UCSSEL_A::UCSSEL_3,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `UCSSEL_0`"]
#[inline(always)]
pub fn is_ucssel_0(&self) -> bool {
*self == UCSSEL_A::UCSSEL_0
}
#[doc = "Checks if the value of the field is `ACLK`"]
#[inline(always)]
pub fn is_aclk(&self) -> bool {
*self == UCSSEL_A::ACLK
}
#[doc = "Checks if the value of the field is `SMCLK`"]
#[inline(always)]
pub fn is_smclk(&self) -> bool {
*self == UCSSEL_A::SMCLK
}
#[doc = "Checks if the value of the field is `UCSSEL_3`"]
#[inline(always)]
pub fn is_ucssel_3(&self) -> bool {
*self == UCSSEL_A::UCSSEL_3
}
}
#[doc = "Write proxy for field `UCSSEL`"]
pub struct UCSSEL_W<'a> {
w: &'a mut W,
}
impl<'a> UCSSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: UCSSEL_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Reserved"]
#[inline(always)]
pub fn ucssel_0(self) -> &'a mut W {
self.variant(UCSSEL_A::UCSSEL_0)
}
#[doc = "ACLK"]
#[inline(always)]
pub fn aclk(self) -> &'a mut W {
self.variant(UCSSEL_A::ACLK)
}
#[doc = "SMCLK"]
#[inline(always)]
pub fn smclk(self) -> &'a mut W {
self.variant(UCSSEL_A::SMCLK)
}
#[doc = "SMCLK"]
#[inline(always)]
pub fn ucssel_3(self) -> &'a mut W {
self.variant(UCSSEL_A::UCSSEL_3)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 6)) | (((value as u16) & 0x03) << 6);
self.w
}
}
#[doc = "8:8\\]
Synchronous mode enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum UCSYNC_A {
#[doc = "0: Asynchronous mode"]
ASYNC = 0,
#[doc = "1: Synchronous mode"]
SYNC = 1,
}
impl From<UCSYNC_A> for bool {
#[inline(always)]
fn from(variant: UCSYNC_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `UCSYNC`"]
pub type UCSYNC_R = crate::R<bool, UCSYNC_A>;
impl UCSYNC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> UCSYNC_A {
match self.bits {
false => UCSYNC_A::ASYNC,
true => UCSYNC_A::SYNC,
}
}
#[doc = "Checks if the value of the field is `ASYNC`"]
#[inline(always)]
pub fn is_async(&self) -> bool {
*self == UCSYNC_A::ASYNC
}
#[doc = "Checks if the value of the field is `SYNC`"]
#[inline(always)]
pub fn is_sync(&self) -> bool {
*self == UCSYNC_A::SYNC
}
}
#[doc = "Write proxy for field `UCSYNC`"]
pub struct UCSYNC_W<'a> {
w: &'a mut W,
}
impl<'a> UCSYNC_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: UCSYNC_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Asynchronous mode"]
#[inline(always)]
pub fn fn_async(self) -> &'a mut W {
self.variant(UCSYNC_A::ASYNC)
}
#[doc = "Synchronous mode"]
#[inline(always)]
pub fn sync(self) -> &'a mut W {
self.variant(UCSYNC_A::SYNC)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u16) & 0x01) << 8);
self.w
}
}
#[doc = "10:9\\]
eUSCI mode\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum UCMODE_A {
#[doc = "0: 3-pin SPI"]
UCMODE_0 = 0,
#[doc = "1: 4-pin SPI with UCxSTE active high: Slave enabled when UCxSTE = 1"]
UCMODE_1 = 1,
#[doc = "2: 4-pin SPI with UCxSTE active low: Slave enabled when UCxSTE = 0"]
UCMODE_2 = 2,
#[doc = "3: I2C mode"]
UCMODE_3 = 3,
}
impl From<UCMODE_A> for u8 {
#[inline(always)]
fn from(variant: UCMODE_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `UCMODE`"]
pub type UCMODE_R = crate::R<u8, UCMODE_A>;
impl UCMODE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> UCMODE_A {
match self.bits {
0 => UCMODE_A::UCMODE_0,
1 => UCMODE_A::UCMODE_1,
2 => UCMODE_A::UCMODE_2,
3 => UCMODE_A::UCMODE_3,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `UCMODE_0`"]
#[inline(always)]
pub fn is_ucmode_0(&self) -> bool {
*self == UCMODE_A::UCMODE_0
}
#[doc = "Checks if the value of the field is `UCMODE_1`"]
#[inline(always)]
pub fn is_ucmode_1(&self) -> bool {
*self == UCMODE_A::UCMODE_1
}
#[doc = "Checks if the value of the field is `UCMODE_2`"]
#[inline(always)]
pub fn is_ucmode_2(&self) -> bool {
*self == UCMODE_A::UCMODE_2
}
#[doc = "Checks if the value of the field is `UCMODE_3`"]
#[inline(always)]
pub fn is_ucmode_3(&self) -> bool {
*self == UCMODE_A::UCMODE_3
}
}
#[doc = "Write proxy for field `UCMODE`"]
pub struct UCMODE_W<'a> {
w: &'a mut W,
}
impl<'a> UCMODE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: UCMODE_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "3-pin SPI"]
#[inline(always)]
pub fn ucmode_0(self) -> &'a mut W {
self.variant(UCMODE_A::UCMODE_0)
}
#[doc = "4-pin SPI with UCxSTE active high: Slave enabled when UCxSTE = 1"]
#[inline(always)]
pub fn ucmode_1(self) -> &'a mut W {
self.variant(UCMODE_A::UCMODE_1)
}
#[doc = "4-pin SPI with UCxSTE active low: Slave enabled when UCxSTE = 0"]
#[inline(always)]
pub fn ucmode_2(self) -> &'a mut W {
self.variant(UCMODE_A::UCMODE_2)
}
#[doc = "I2C mode"]
#[inline(always)]
pub fn ucmode_3(self) -> &'a mut W {
self.variant(UCMODE_A::UCMODE_3)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 9)) | (((value as u16) & 0x03) << 9);
self.w
}
}
#[doc = "11:11\\]
Master mode select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum UCMST_A {
#[doc = "0: Slave mode"]
SLAVE = 0,
#[doc = "1: Master mode"]
MASTER = 1,
}
impl From<UCMST_A> for bool {
#[inline(always)]
fn from(variant: UCMST_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `UCMST`"]
pub type UCMST_R = crate::R<bool, UCMST_A>;
impl UCMST_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> UCMST_A {
match self.bits {
false => UCMST_A::SLAVE,
true => UCMST_A::MASTER,
}
}
#[doc = "Checks if the value of the field is `SLAVE`"]
#[inline(always)]
pub fn is_slave(&self) -> bool {
*self == UCMST_A::SLAVE
}
#[doc = "Checks if the value of the field is `MASTER`"]
#[inline(always)]
pub fn is_master(&self) -> bool {
*self == UCMST_A::MASTER
}
}
#[doc = "Write proxy for field `UCMST`"]
pub struct UCMST_W<'a> {
w: &'a mut W,
}
impl<'a> UCMST_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: UCMST_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Slave mode"]
#[inline(always)]
pub fn slave(self) -> &'a mut W {
self.variant(UCMST_A::SLAVE)
}
#[doc = "Master mode"]
#[inline(always)]
pub fn master(self) -> &'a mut W {
self.variant(UCMST_A::MASTER)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u16) & 0x01) << 11);
self.w
}
}
#[doc = "12:12\\]
Character length\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum UC7BIT_A {
#[doc = "0: 8-bit data"]
_8BIT = 0,
#[doc = "1: 7-bit data"]
_7BIT = 1,
}
impl From<UC7BIT_A> for bool {
#[inline(always)]
fn from(variant: UC7BIT_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `UC7BIT`"]
pub type UC7BIT_R = crate::R<bool, UC7BIT_A>;
impl UC7BIT_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> UC7BIT_A {
match self.bits {
false => UC7BIT_A::_8BIT,
true => UC7BIT_A::_7BIT,
}
}
#[doc = "Checks if the value of the field is `_8BIT`"]
#[inline(always)]
pub fn is_8bit(&self) -> bool {
*self == UC7BIT_A::_8BIT
}
#[doc = "Checks if the value of the field is `_7BIT`"]
#[inline(always)]
pub fn is_7bit(&self) -> bool {
*self == UC7BIT_A::_7BIT
}
}
#[doc = "Write proxy for field `UC7BIT`"]
pub struct UC7BIT_W<'a> {
w: &'a mut W,
}
impl<'a> UC7BIT_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: UC7BIT_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "8-bit data"]
#[inline(always)]
pub fn _8bit(self) -> &'a mut W {
self.variant(UC7BIT_A::_8BIT)
}
#[doc = "7-bit data"]
#[inline(always)]
pub fn _7bit(self) -> &'a mut W {
self.variant(UC7BIT_A::_7BIT)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u16) & 0x01) << 12);
self.w
}
}
#[doc = "13:13\\]
MSB first select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum UCMSB_A {
#[doc = "0: LSB first"]
UCMSB_0 = 0,
#[doc = "1: MSB first"]
UCMSB_1 = 1,
}
impl From<UCMSB_A> for bool {
#[inline(always)]
fn from(variant: UCMSB_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `UCMSB`"]
pub type UCMSB_R = crate::R<bool, UCMSB_A>;
impl UCMSB_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> UCMSB_A {
match self.bits {
false => UCMSB_A::UCMSB_0,
true => UCMSB_A::UCMSB_1,
}
}
#[doc = "Checks if the value of the field is `UCMSB_0`"]
#[inline(always)]
pub fn is_ucmsb_0(&self) -> bool {
*self == UCMSB_A::UCMSB_0
}
#[doc = "Checks if the value of the field is `UCMSB_1`"]
#[inline(always)]
pub fn is_ucmsb_1(&self) -> bool {
*self == UCMSB_A::UCMSB_1
}
}
#[doc = "Write proxy for field `UCMSB`"]
pub struct UCMSB_W<'a> {
w: &'a mut W,
}
impl<'a> UCMSB_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: UCMSB_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "LSB first"]
#[inline(always)]
pub fn ucmsb_0(self) -> &'a mut W {
self.variant(UCMSB_A::UCMSB_0)
}
#[doc = "MSB first"]
#[inline(always)]
pub fn ucmsb_1(self) -> &'a mut W {
self.variant(UCMSB_A::UCMSB_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u16) & 0x01) << 13);
self.w
}
}
#[doc = "14:14\\]
Clock polarity select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum UCCKPL_A {
#[doc = "0: The inactive state is low"]
LOW = 0,
#[doc = "1: The inactive state is high"]
HIGH = 1,
}
impl From<UCCKPL_A> for bool {
#[inline(always)]
fn from(variant: UCCKPL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `UCCKPL`"]
pub type UCCKPL_R = crate::R<bool, UCCKPL_A>;
impl UCCKPL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> UCCKPL_A {
match self.bits {
false => UCCKPL_A::LOW,
true => UCCKPL_A::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == UCCKPL_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == UCCKPL_A::HIGH
}
}
#[doc = "Write proxy for field `UCCKPL`"]
pub struct UCCKPL_W<'a> {
w: &'a mut W,
}
impl<'a> UCCKPL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: UCCKPL_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "The inactive state is low"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(UCCKPL_A::LOW)
}
#[doc = "The inactive state is high"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(UCCKPL_A::HIGH)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u16) & 0x01) << 14);
self.w
}
}
#[doc = "15:15\\]
Clock phase select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum UCCKPH_A {
#[doc = "0: Data is changed on the first UCLK edge and captured on the following edge."]
UCCKPH_0 = 0,
#[doc = "1: Data is captured on the first UCLK edge and changed on the following edge."]
UCCKPH_1 = 1,
}
impl From<UCCKPH_A> for bool {
#[inline(always)]
fn from(variant: UCCKPH_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `UCCKPH`"]
pub type UCCKPH_R = crate::R<bool, UCCKPH_A>;
impl UCCKPH_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> UCCKPH_A {
match self.bits {
false => UCCKPH_A::UCCKPH_0,
true => UCCKPH_A::UCCKPH_1,
}
}
#[doc = "Checks if the value of the field is `UCCKPH_0`"]
#[inline(always)]
pub fn is_ucckph_0(&self) -> bool {
*self == UCCKPH_A::UCCKPH_0
}
#[doc = "Checks if the value of the field is `UCCKPH_1`"]
#[inline(always)]
pub fn is_ucckph_1(&self) -> bool {
*self == UCCKPH_A::UCCKPH_1
}
}
#[doc = "Write proxy for field `UCCKPH`"]
pub struct UCCKPH_W<'a> {
w: &'a mut W,
}
impl<'a> UCCKPH_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: UCCKPH_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Data is changed on the first UCLK edge and captured on the following edge."]
#[inline(always)]
pub fn ucckph_0(self) -> &'a mut W {
self.variant(UCCKPH_A::UCCKPH_0)
}
#[doc = "Data is captured on the first UCLK edge and changed on the following edge."]
#[inline(always)]
pub fn ucckph_1(self) -> &'a mut W {
self.variant(UCCKPH_A::UCCKPH_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u16) & 0x01) << 15);
self.w
}
}
impl R {
#[doc = "Bit 0 - 0:0\\]
Software reset enable"]
#[inline(always)]
pub fn ucswrst(&self) -> UCSWRST_R {
UCSWRST_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - 1:1\\]
STE mode select in master mode."]
#[inline(always)]
pub fn ucstem(&self) -> UCSTEM_R {
UCSTEM_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bits 6:7 - 7:6\\]
eUSCI_B clock source select"]
#[inline(always)]
pub fn ucssel(&self) -> UCSSEL_R {
UCSSEL_R::new(((self.bits >> 6) & 0x03) as u8)
}
#[doc = "Bit 8 - 8:8\\]
Synchronous mode enable"]
#[inline(always)]
pub fn ucsync(&self) -> UCSYNC_R {
UCSYNC_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bits 9:10 - 10:9\\]
eUSCI mode"]
#[inline(always)]
pub fn ucmode(&self) -> UCMODE_R {
UCMODE_R::new(((self.bits >> 9) & 0x03) as u8)
}
#[doc = "Bit 11 - 11:11\\]
Master mode select"]
#[inline(always)]
pub fn ucmst(&self) -> UCMST_R {
UCMST_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 12 - 12:12\\]
Character length"]
#[inline(always)]
pub fn uc7bit(&self) -> UC7BIT_R {
UC7BIT_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 13 - 13:13\\]
MSB first select"]
#[inline(always)]
pub fn ucmsb(&self) -> UCMSB_R {
UCMSB_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 14 - 14:14\\]
Clock polarity select"]
#[inline(always)]
pub fn ucckpl(&self) -> UCCKPL_R {
UCCKPL_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bit 15 - 15:15\\]
Clock phase select"]
#[inline(always)]
pub fn ucckph(&self) -> UCCKPH_R {
UCCKPH_R::new(((self.bits >> 15) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - 0:0\\]
Software reset enable"]
#[inline(always)]
pub fn ucswrst(&mut self) -> UCSWRST_W {
UCSWRST_W { w: self }
}
#[doc = "Bit 1 - 1:1\\]
STE mode select in master mode."]
#[inline(always)]
pub fn ucstem(&mut self) -> UCSTEM_W {
UCSTEM_W { w: self }
}
#[doc = "Bits 6:7 - 7:6\\]
eUSCI_B clock source select"]
#[inline(always)]
pub fn ucssel(&mut self) -> UCSSEL_W {
UCSSEL_W { w: self }
}
#[doc = "Bit 8 - 8:8\\]
Synchronous mode enable"]
#[inline(always)]
pub fn ucsync(&mut self) -> UCSYNC_W {
UCSYNC_W { w: self }
}
#[doc = "Bits 9:10 - 10:9\\]
eUSCI mode"]
#[inline(always)]
pub fn ucmode(&mut self) -> UCMODE_W {
UCMODE_W { w: self }
}
#[doc = "Bit 11 - 11:11\\]
Master mode select"]
#[inline(always)]
pub fn ucmst(&mut self) -> UCMST_W {
UCMST_W { w: self }
}
#[doc = "Bit 12 - 12:12\\]
Character length"]
#[inline(always)]
pub fn uc7bit(&mut self) -> UC7BIT_W {
UC7BIT_W { w: self }
}
#[doc = "Bit 13 - 13:13\\]
MSB first select"]
#[inline(always)]
pub fn ucmsb(&mut self) -> UCMSB_W {
UCMSB_W { w: self }
}
#[doc = "Bit 14 - 14:14\\]
Clock polarity select"]
#[inline(always)]
pub fn ucckpl(&mut self) -> UCCKPL_W {
UCCKPL_W { w: self }
}
#[doc = "Bit 15 - 15:15\\]
Clock phase select"]
#[inline(always)]
pub fn ucckph(&mut self) -> UCCKPH_W {
UCCKPH_W { w: self }
}
}
| 27.847803 | 92 | 0.543338 |
1e42d56bce7a5dc0bf3b43f415fa814e54717388 | 1,202 | #![allow(dead_code)]
use ts_rs::TS;
#[test]
fn newtype() {
#[derive(TS)]
struct Newtype(Vec<i32>);
assert_eq!(Newtype::inline(), "Array<number>");
}
#[test]
fn newtype_nested() {
#[derive(TS)]
struct Newtype(Vec<Vec<i32>>);
assert_eq!(Newtype::inline(), "Array<Array<number>>");
}
#[test]
fn alias() {
type Alias = Vec<String>;
assert_eq!(Alias::inline(), "Array<string>");
}
#[test]
fn alias_nested() {
type Alias = Vec<Vec<String>>;
assert_eq!(Alias::inline(), "Array<Array<string>>");
}
#[test]
fn named() {
#[derive(TS)]
struct Struct {
a: Vec<String>,
}
assert_eq!(Struct::inline(), "{ a: Array<string>, }");
}
#[test]
fn named_nested() {
#[derive(TS)]
struct Struct {
a: Vec<Vec<String>>,
}
assert_eq!(Struct::inline(), "{ a: Array<Array<string>>, }");
}
#[test]
fn tuple() {
#[derive(TS)]
struct Tuple(Vec<i32>, Vec<i32>);
assert_eq!(Tuple::inline(), "[Array<number>, Array<number>]");
}
#[test]
fn tuple_nested() {
#[derive(TS)]
struct Tuple(Vec<Vec<i32>>, Vec<Vec<i32>>);
assert_eq!(
Tuple::inline(),
"[Array<Array<number>>, Array<Array<number>>]"
);
}
| 18.78125 | 66 | 0.560732 |
1ddbf864a4b73b5d406ffb2ff3bfedcb615bb659 | 5,015 | #![deny(missing_docs, warnings, intra_doc_link_resolution_failure)]
#![deny(clippy::all, clippy::pedantic)]
//! Module `sys` contains Rust bindings for mruby (currently version 2.0.1),
//! statically linked with FFI API generated by bindgen.
use std::ffi::CStr;
use std::fmt;
mod args;
#[allow(missing_docs)]
#[allow(non_camel_case_types)]
#[allow(non_upper_case_globals)]
#[allow(non_snake_case)]
#[allow(clippy::all, clippy::pedantic)]
mod ffi {
include!(concat!(env!("OUT_DIR"), "/ffi.rs"));
}
#[path = "ffi_tests.rs"]
#[cfg(test)]
mod ffi_tests;
pub use self::args::*;
pub use self::ffi::*;
/// Version metadata `String` for embedded mruby.
pub fn mruby_version(verbose: bool) -> String {
if verbose {
// Using the unchecked function is safe because these values are C constants
let engine = unsafe { CStr::from_bytes_with_nul_unchecked(MRUBY_RUBY_ENGINE) };
let version = unsafe { CStr::from_bytes_with_nul_unchecked(MRUBY_RUBY_VERSION) };
format!(
"{} {} [{}]",
engine.to_str().expect("mruby engine name"),
version.to_str().expect("mruby engine version"),
env!("CARGO_PKG_VERSION")
)
} else {
env!("CARGO_PKG_VERSION").to_owned()
}
}
/// Methods to describe an [`mrb_state`].
pub trait DescribeState {
/// Wraper around [`fmt::Display`] for [`mrb_state`]. Returns Ruby engine
/// and interpreter version. For example:
///
/// ```text
/// mruby 2.0
/// ```
fn info(&self) -> String;
/// Wrapper around [`fmt::Debug`] for [`mrb_state`]. Returns Ruby engine,
/// interpreter version, engine version, and [`mrb_state`] address. For
/// example:
///
/// ```text
/// mruby 2.0 (v2.0.1 rev c078758) interpreter at 0x7f85b8800000
/// ```
fn debug(&self) -> String;
/// Returns detailed interpreter version including engine version. For
/// example:
///
/// ```text
/// 2.0 (v2.0.1)
/// ```
fn version(&self) -> String;
}
impl DescribeState for *mut mrb_state {
fn info(&self) -> String {
if self.is_null() {
"".to_owned()
} else {
format!("{}", unsafe { &**self })
}
}
fn debug(&self) -> String {
if self.is_null() {
"".to_owned()
} else {
format!("{:?}", unsafe { &**self })
}
}
fn version(&self) -> String {
// Using the unchecked function is safe because these values are C constants
let version = unsafe { CStr::from_bytes_with_nul_unchecked(MRUBY_RUBY_VERSION) };
format!(
"{} (v{}.{}.{})",
version.to_str().expect("mruby engine version"),
MRUBY_RELEASE_MAJOR,
MRUBY_RELEASE_MINOR,
MRUBY_RELEASE_TEENY,
)
}
}
impl fmt::Debug for mrb_state {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Using the unchecked function is safe because these values are C constants
let engine = unsafe { CStr::from_bytes_with_nul_unchecked(MRUBY_RUBY_ENGINE) };
let version = unsafe { CStr::from_bytes_with_nul_unchecked(MRUBY_RUBY_VERSION) };
write!(
f,
"{} {} (v{}.{}.{}) interpreter at {:p}",
engine.to_str().expect("mruby engine name"),
version.to_str().expect("mruby engine version"),
MRUBY_RELEASE_MAJOR,
MRUBY_RELEASE_MINOR,
MRUBY_RELEASE_TEENY,
self
)
}
}
impl fmt::Display for mrb_state {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Using the unchecked function is safe because these values are C constants
let engine = unsafe { CStr::from_bytes_with_nul_unchecked(MRUBY_RUBY_ENGINE) };
let version = unsafe { CStr::from_bytes_with_nul_unchecked(MRUBY_RUBY_VERSION) };
write!(
f,
"{} {}",
engine.to_str().expect("mruby engine name"),
version.to_str().expect("mruby engine version"),
)
}
}
#[cfg(test)]
mod tests {
use crate::sys::{mrb_close, mrb_open, DescribeState};
#[test]
fn interpreter_display() {
unsafe {
let mrb = mrb_open();
assert_eq!(format!("{}", *mrb), "mruby 2.0");
assert_eq!(mrb.info(), "mruby 2.0");
mrb_close(mrb);
}
}
#[test]
fn interpreter_debug() {
unsafe {
let mrb = mrb_open();
assert_eq!(
format!("{:?}", *mrb),
format!("mruby 2.0 (v2.0.1) interpreter at {:p}", &*mrb)
);
assert_eq!(
mrb.debug(),
format!("mruby 2.0 (v2.0.1) interpreter at {:p}", &*mrb)
);
mrb_close(mrb);
}
}
#[test]
fn version() {
unsafe {
let mrb = mrb_open();
assert_eq!(mrb.version(), "2.0 (v2.0.1)");
mrb_close(mrb);
}
}
}
| 28.988439 | 89 | 0.553739 |
386ccf31f135fb3d535fe40543fbfb2619ec7b9d | 14,528 | use std::error;
use std::fmt;
use std::collections::HashMap;
static DEV_ERR_LIST: [(i32, &'static str); 225] = [
// Command Errors
(-100, "Command error"),
(-101, "Invalid character"),
(-102, "Syntax error"),
(-103, "Invalid separator"),
(-104, "Data type error"),
(-108, "Parameter not allowed"),
(-109, "Missing parameter"),
(-110, "Command header error"),
(-111, "Header separator error"),
(-112, "Program mnemonic too long"),
(-113, "Undefined header"),
(-114, "Header suffix out of range"),
(-120, "Numeric data error"),
(-121, "Invalid character in number"),
(-123, "Exponent too large"),
(-123, "Numeric overflow"),
(-124, "Too many digits"),
(-128, "Numeric data not allowed"),
(-130, "Suffix error"),
(-131, "Invalid suffix"),
(-134, "Suffix too long"),
(-138, "Suffix not allowed"),
(-140, "Character data error"),
(-141, "Invalid character data"),
(-144, "Character data too long"),
(-148, "Character data not allowed"),
(-150, "String data error"),
(-151, "Invalid string data"),
(-158, "String data not allowed"),
(-160, "Block data error"),
(-161, "Invalid block data"),
(-168, "Block data not allowed"),
(-170, "Expression error"),
(-171, "Invalid expression"),
(-178, "Expression data not allowed"),
// Execution Errors
(-203, "Command protected; pulsed measurements require option 150"),
(-213, "INIT ignored"),
(-221, "Settings conflict"),
(-221, "Settings conflict; *TRG when TRIG:SOUR BUS not selected; trigger ignored"),
(-221, "Settings conflict; CALC:SCAL:REF 0.0 not compatible with CALC:SCAL:FUNC PCT|PPM|PPB; CALC:SCAL:FUNC set to NULL"),
(-221, "Settings conflict; CALC:STATe or CALC:AVER:STATe OFF set"),
(-221, "Settings conflict; CALC2:TRAN:HIST:STATe OFF set"),
(-221, "Settings conflict; GATE:STAR:DEL:SOUR EVENts on channel 3; GATE:STAR:DEL:SOUR IMM set"),
(-221, "Settings conflict; GATE:STOP:HOLD:SOUR EVENts on channel 3; GATE:STOP:HOLD:SOUR TIM set"),
(-221, "Settings conflict; ROSC:EXT:CHEC can only be sent when ROSC:SOUR EXT selectd;AUTO OFF set"),
(-221, "Settings conflict; SAMP:COUN limited to 100,000 in time stamp function with SENS:TST:RATE SLOW; SAMP:COUN set to 100,000"),
(-221, "Settings conflict; SENS:FREQ:GATE:SOUR on measurement channel; SENS:FREQ:GATE:SOUR set to EXT"),
(-221, "Settings conflict; SENS:FREQ:MODE CONT only valid for frequency/period; SENS:FREQ:MODE AUTO set"),
(-221, "Settings conflict; SENS:GATE:EXT:SOUR on measurement channel; SENS:GATE:EXT:SOUR set to EXT"),
(-221, "Settings conflict; SENS:TINT:GATE:SOUR on measurement channel; SENS:TINT:GATE:SOUR set to EXT"),
(-221, "Settings conflict; SENS:TOT:GATE:SOUR on measurement channel; SENS:TOT:GATE:SOUR set to EXT"),
(-221, "Settings conflict; cannot auto-level input channel used as gate; INP:LEV set to 0V, auto-level off"),
(-221, "Settings conflict; cannot delete state selected and enabled for automatic power-on recall"),
(-221, "Settings conflict; cannot gate time interval-type measurement with baseband channel; SENS:GATE:EXT:SOUR set to BNC"),
(-221, "Settings conflict; cannot have immediate, no-holdoff gate stop for frequency or totalize meas- urements; GATE:STOP:HOLD:SOUR TIME set"),
(-221, "Settings conflict; cannot use READ? with continuous totalize"),
(-221, "Settings conflict; external gating not compatible with gate output; gate output disabled"),
(-221, "Settings conflict; histogram bin width is 0.0; CALC2:TRAN:HIST:RANG:AUTO ON set"),
(-221, "Settings conflict; histogram lower range > upper range; CALC2:TRAN:HIST:RANG:AUTO ON set"),
(-221, "Settings conflict; infinite stop holdoff time for frequency; SENS:FREQ:GATE:SOUR set to TIME"),
(-221, "Settings conflict; infinite stop holdoff time for time interval; SENS:TINT:GATE:SOUR set to IMM"),
(-221, "Settings conflict; input range not compatible with input probe factor; INP:RANG set to 50V range"),
(-221, "Settings conflict; input threshold voltage > input range; threshold clipped to range"),
(-221, "Settings conflict; low reference >= high reference"),
(-221, "Settings conflict; low reference >= high reference; reference values changed to defaults"),
(-221, "Settings conflict; lower limit > upper limit; CALC:LIM:UPP set to CALC:LIM:LOW value"),
(-221, "Settings conflict; lower reference and upper reference have different units"),
(-221, "Settings conflict; stop holdoff < minimum gate time for frequency or totalize; SENSe:GATE:STOP:HOLD:TIME set to minimum"),
(-221, "Settings conflict; trigger source is BUS"),
(-222, "Data out of range"),
(-222, "Data out of range; value clipped to lower limit"),
(-222, "Data out of range; value clipped to upper limit"),
(-223, "Too much data"),
(-224, "Illegal parameter value"),
(-225, "Out of memory; measurement data overrun"),
(-230, "Data corrupt or stale"),
(-240, "Hardware error; CPU board initialization failed"),
(-240, "Hardware error; GPIB interface failed"),
(-240, "Hardware error; cannot communicate with channel 3 hardware"),
(-240, "Hardware error; cannot communicate with measurement hardware"),
(-240, "Hardware error; channel 3 operation failed"),
(-240, "Hardware error; measurement hardware initialization failed"),
(-240, "Hardware error; measurement operation failed"),
(-240, "Hardware error; failed to program measurement FPGA security EEProm"),
(-241, "Hardware missing"),
(-250, "Mass storage error: file read/write error"),
(-252, "Missing media"),
(-254, "Media full"),
(-256, "File or folder name not found"),
(-257, "File name error; invalid character in name"),
(-257, "File name error; relative path not allowed"),
(-257, "File name error; path too long"),
(-257, "File name error; path is a folder name"),
(-257, "File name error; not a folder name"),
(-257, "File name error; drive name missing or not recognized"),
(-257, "File name error; path name missing"),
(-257, "File name error; file or folder already exists"),
(-257, "File name error; folder not empty"),
(-257, "File name error; folder is default folder"),
(-257, "File name error; access denied"),
(-257, "File name error"),
(-257, "File name error; file too large"),
(-257, "File name error; unknown file extension"),
// Device-Specific Errors
(-310, "System error; internal software error"),
(-310, "System error; software initialization failed"),
(-310, "System error; out of memory"),
(-310, "System error; failed to erase calibration data in PIC EEProm"),
(-310, "System error; failed to erase system information in PIC EEProm"),
(-310, "System error; failed to read calibration information from PIC EEProm"),
(-310, "System error; failed to read system information from PIC EEProm"),
(-310, "System error; failed to write calibration information to PIC EEProm"),
(-310, "System error; failed to write system data to PIC EEProm"),
(-310, "System error; I2C Comms Failure, PIC:Ac Power Detect"),
(-310, "System error; I2C Comms Failure, PIC:BatteryFuelGauge"),
(-310, "System error; I2C Comms Failure, PIC:BatteryInfo"),
(-310, "System error; I2C Comms Failure, PIC:OCXO"),
(-310, "System error; I2C Comms Failure, PIC:PwrCondition"),
(-310, "System error; I2C Comms Failure, PIC:PwrOverVolt"),
(-310, "System error; I2C Comms Failure, PIC:PwrUnderVolt"),
(-310, "System error; I2C Comms Failure, PIC:SetOCXOStanby"),
(-310, "System error; I2C Comms Failure, PIC:Temperature"),
(-310, "System error; I2C Comms Failure, PIC:clearPwrCondition"),
(-310, "System error; I2C Comms Failure, PIC:cyclePower"),
(-310, "System error; I2C Comms Failure, PIC:finishPowerdown"),
(-310, "System error; I2C Comms Failure, PIC:picCommunication"),
(-310, "System error; I2C Comms Failure, PIC:setBattStorage"),
(-310, "System error; I2C Comms Failure, PIC:setBatteryPresent"),
(-310, "System error; PIC EEProm access failed"),
(-310, "System error; PIC EEProm failed waiting for unbusy"),
(-311, "Internal software error"),
(-313, "Calibration memory lost"),
(-313, "Calibration memory lost; memory corruption detected"),
(-313, "Calibration memory lost; due to firmware revision change"),
(-314, "Save/recall memory lost; memory corruption detected"),
(-314, "Save/recall memory lost; due to firmware revision change"),
(-315, "Configuration memory lost; memory corruption detected"),
(-315, "Configuration memory lost; due to firmware revision change"),
(-330, "Self-test failed"),
(-350, "Error queue overflow"),
// Query Errors
(-410, "Query INTERRUPTED"),
(-420, "Query UNTERMINATED"),
(-430, "Query DEADLOCKED"),
(-440, "Query UNTERMINATED after indefinite response"),
// Instrument Errors
(100, "Network Error"),
(110, "LXI mDNS Error"),
(201, "Memory lost: stored state"),
(202, "Memory lost: power-on state"),
(203, "Memory lost: stored measurements"),
(263, "Not able to execute while instrument is measuring"),
(291, "Not able to recall state: it is empty"),
(292, "State file size error"),
(293, "State file corrupt"),
(294, "Preference file size error"),
(295, "Preference file corrupt"),
(301, "Input termination protection relay opened"),
(302, "Cannot reset input protection; high voltage present"),
(305, "Not able to perform requested operation"),
(310, "Channel 3 pulse width too short"),
(311, "Channel 3 pulse width too long"),
(312, "Channel 3 pulse width could not be measured"),
(313, "Channel 3 burst frequency could not be measured"),
(314, "Channel 3 pulse ended before gate closed"),
(315, "Channel 3 power too high for operation"),
(316, "Channel 3 power too low for operation"),
(317, "Channel 3 power changed during measurement"),
(318, "Channel 3 input is not pulsed signal"),
(319, "Channel 3 frequency shift detected during measurement"),
(320, "Input signal frequency shift caused internal counter overflow"),
(321, "Measurement timeout occurred"),
(322, "Measurement overflow occurred"),
(514, "Not allowed"),
(514, "Not allowed; Instrument locked by another I/O session"),
(521, "Communications: input buffer overflow"),
(522, "Communications: output buffer overflow"),
(532, "Not able to achieve requested resolution"),
(540, "Cannot use overload as math reference"),
(541, "Cannot use zero as math reference for PCT, PPM, or PPB scaling functions"),
(550, "Not able to execute command in local mode"),
(580, "No valid external timebase"),
(600, "Internal licensing error"),
(601, "License file corrupt or empty"),
(602, "No valid licenses found for this instrument"),
(603, "Some licenses could not be installed"),
(604, "License not found"),
(800, "Nonvolatile memory write failure"),
(810, "State has not been stored"),
(820, "Model and Serial Numbers not restored"),
(821, "Controller and measurement board model numbers do not match"),
(822, "Controller and measurement board serial numbers do not match"),
// Self-Test Errors
(901, "Self Test failed; auto-calibration failure"),
(902, "Self-test failed; main CPU power supply out of range"),
(903, "Self-test failed; real time clock settings lost"),
(904, "Self-test failed; main CPU error accessing boot environment"),
(905, "Self-test failed; failed to read FPGA revision"),
(906, "Self-test failed; FPGA revision is less than expected"),
(907, "Self-test failed; PIC communication failure"),
(908, "Self-test failed; battery test failed"),
(909, "Self-test failed; GPIB test failed"),
(910, "Self-test failed; channel 3 test failed"),
(911, "Self-test failed; front panel revision check failed"),
(912, "Self-test failed; measurement board test failed"),
// Calibration Errors
(701, "Calibration error; security defeated"),
(702, "Calibration error; calibration memory is secured"),
(703, "Calibration error; secure code provided was invalid"),
(704, "Calibration error: secure code too long"),
(705, "Calibration error; calibration aborted"),
(706, "Calibration error: provided value out of range"),
(707, "Calibration error: computed correction factor out of range"),
(708, "Calibration error: signal measurement out of range"),
(709, "Calibration error: no calibration for this function"),
(710, "Calibration error: full scale correction out of range"),
(711, "Calibration error: calibration string too long"),
(712, "Calibration failed"),
(713, "Channel 3 calibration signal not detected"),
(714, "Channel 3 calibration signal power level error"),
(715, "Channel 3 calibration signal frequency error"),
(716, "Channel 3 calibration signal is not CW"),
(717, "Channel 3 calibration timeout occurred"),
(720, "Auto-calibration failed; input signal detected"),
(740, "Calibration data lost: secure state"),
(740, "Calibration information lost: count, security state, security code, string"),
(741, "Calibration data lost: string data"),
(742, "Calibration data lost: corrections"),
(748, "Calibration memory write failure"),
(750, "Calibration data not restored"),
];
lazy_static! {
static ref DEV_ERR_MAP: HashMap<i32, &'static str> = {
let mut m = HashMap::with_capacity(DEV_ERR_LIST.len());
for (code, text) in DEV_ERR_LIST.iter() {
m.insert(*code, *text);
}
m.shrink_to_fit();
m
};
}
#[derive(Clone, Eq, PartialEq)]
pub struct KsDevErr {
code: i32,
}
impl KsDevErr {
pub fn new(code: i32) -> Option<Self> {
if DEV_ERR_MAP.contains_key(&code) {
Some(KsDevErr { code })
} else {
None
}
}
pub fn code(&self) -> i32 {
self.code
}
}
impl fmt::Debug for KsDevErr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}: '{}'", self.code, DEV_ERR_MAP.get(&self.code).unwrap())
}
}
impl fmt::Display for KsDevErr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", DEV_ERR_MAP.get(&self.code).unwrap())
}
}
impl error::Error for KsDevErr {}
| 50.096552 | 148 | 0.663684 |
793a8d9352b78b6dd2a382db2af7511c76acd099 | 7,373 | use core::{fmt, slice};
use managed::ManagedSlice;
use super::{Socket, SocketRef, AnySocket};
#[cfg(feature = "socket-tcp")]
use super::TcpState;
/// An item of a socket set.
///
/// The only reason this struct is public is to allow the socket set storage
/// to be allocated externally.
#[derive(Debug)]
pub struct Item<'a, 'b: 'a> {
socket: Socket<'a, 'b>,
refs: usize
}
/// A handle, identifying a socket in a set.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct Handle(usize);
impl fmt::Display for Handle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "#{}", self.0)
}
}
/// An extensible set of sockets.
///
/// The lifetimes `'b` and `'c` are used when storing a `Socket<'b, 'c>`.
#[derive(Debug)]
pub struct Set<'a, 'b: 'a, 'c: 'a + 'b> {
sockets: ManagedSlice<'a, Option<Item<'b, 'c>>>
}
impl<'a, 'b: 'a, 'c: 'a + 'b> Set<'a, 'b, 'c> {
/// Create a socket set using the provided storage.
pub fn new<SocketsT>(sockets: SocketsT) -> Set<'a, 'b, 'c>
where SocketsT: Into<ManagedSlice<'a, Option<Item<'b, 'c>>>> {
let sockets = sockets.into();
Set {
sockets: sockets
}
}
/// Add a socket to the set with the reference count 1, and return its handle.
///
/// # Panics
/// This function panics if the storage is fixed-size (not a `Vec`) and is full.
pub fn add(&mut self, socket: Socket<'b, 'c>) -> Handle {
fn put<'b, 'c>(index: usize, slot: &mut Option<Item<'b, 'c>>,
mut socket: Socket<'b, 'c>) -> Handle {
net_trace!("[{}]: adding", index);
let handle = Handle(index);
socket.meta_mut().handle = handle;
*slot = Some(Item { socket: socket, refs: 1 });
handle
}
for (index, slot) in self.sockets.iter_mut().enumerate() {
if slot.is_none() {
return put(index, slot, socket)
}
}
match self.sockets {
ManagedSlice::Borrowed(_) => {
panic!("adding a socket to a full SocketSet")
}
#[cfg(any(feature = "std", feature = "alloc"))]
ManagedSlice::Owned(ref mut sockets) => {
sockets.push(None);
let index = sockets.len() - 1;
return put(index, &mut sockets[index], socket)
}
}
}
/// Get a socket from the set by its handle, as mutable.
///
/// # Panics
/// This function may panic if the handle does not belong to this socket set
/// or the socket has the wrong type.
pub fn get<T: AnySocket<'b, 'c>>(&mut self, handle: Handle) -> SocketRef<T> {
match self.sockets[handle.0].as_mut() {
Some(item) => {
T::downcast(SocketRef::new(&mut item.socket))
.expect("handle refers to a socket of a wrong type")
}
None => panic!("handle does not refer to a valid socket")
}
}
/// Remove a socket from the set, without changing its state.
///
/// # Panics
/// This function may panic if the handle does not belong to this socket set.
pub fn remove(&mut self, handle: Handle) -> Socket<'b, 'c> {
net_trace!("[{}]: removing", handle.0);
match self.sockets[handle.0].take() {
Some(item) => item.socket,
None => panic!("handle does not refer to a valid socket")
}
}
/// Increase reference count by 1.
///
/// # Panics
/// This function may panic if the handle does not belong to this socket set.
pub fn retain(&mut self, handle: Handle) {
self.sockets[handle.0]
.as_mut()
.expect("handle does not refer to a valid socket")
.refs += 1
}
/// Decrease reference count by 1.
///
/// # Panics
/// This function may panic if the handle does not belong to this socket set,
/// or if the reference count is already zero.
pub fn release(&mut self, handle: Handle) {
let refs = &mut self.sockets[handle.0]
.as_mut()
.expect("handle does not refer to a valid socket")
.refs;
if *refs == 0 { panic!("decreasing reference count past zero") }
*refs -= 1
}
/// Prune the sockets in this set.
///
/// Pruning affects sockets with reference count 0. Open sockets are closed.
/// Closed sockets are removed and dropped.
pub fn prune(&mut self) {
for (index, item) in self.sockets.iter_mut().enumerate() {
let mut may_remove = false;
if let &mut Some(Item { refs: 0, ref mut socket }) = item {
match socket {
#[cfg(feature = "socket-raw")]
&mut Socket::Raw(_) =>
may_remove = true,
#[cfg(all(feature = "socket-icmp", feature = "proto-ipv4"))]
&mut Socket::Icmp(_) =>
may_remove = true,
#[cfg(feature = "socket-udp")]
&mut Socket::Udp(_) =>
may_remove = true,
#[cfg(feature = "socket-tcp")]
&mut Socket::Tcp(ref mut socket) =>
if socket.state() == TcpState::Closed {
may_remove = true
} else {
socket.close()
},
&mut Socket::__Nonexhaustive(_) => unreachable!()
}
}
if may_remove {
net_trace!("[{}]: pruning", index);
*item = None
}
}
}
/// Iterate every socket in this set.
pub fn iter<'d>(&'d self) -> Iter<'d, 'b, 'c> {
Iter { lower: self.sockets.iter() }
}
/// Iterate every socket in this set, as SocketRef.
pub fn iter_mut<'d>(&'d mut self) -> IterMut<'d, 'b, 'c> {
IterMut { lower: self.sockets.iter_mut() }
}
}
/// Immutable socket set iterator.
///
/// This struct is created by the [iter](struct.SocketSet.html#method.iter)
/// on [socket sets](struct.SocketSet.html).
pub struct Iter<'a, 'b: 'a, 'c: 'a + 'b> {
lower: slice::Iter<'a, Option<Item<'b, 'c>>>
}
impl<'a, 'b: 'a, 'c: 'a + 'b> Iterator for Iter<'a, 'b, 'c> {
type Item = &'a Socket<'b, 'c>;
fn next(&mut self) -> Option<Self::Item> {
while let Some(item_opt) = self.lower.next() {
if let Some(item) = item_opt.as_ref() {
return Some(&item.socket)
}
}
None
}
}
/// Mutable socket set iterator.
///
/// This struct is created by the [iter_mut](struct.SocketSet.html#method.iter_mut)
/// on [socket sets](struct.SocketSet.html).
pub struct IterMut<'a, 'b: 'a, 'c: 'a + 'b> {
lower: slice::IterMut<'a, Option<Item<'b, 'c>>>,
}
impl<'a, 'b: 'a, 'c: 'a + 'b> Iterator for IterMut<'a, 'b, 'c> {
type Item = SocketRef<'a, Socket<'b, 'c>>;
fn next(&mut self) -> Option<Self::Item> {
while let Some(item_opt) = self.lower.next() {
if let Some(item) = item_opt.as_mut() {
return Some(SocketRef::new(&mut item.socket))
}
}
None
}
}
| 33.821101 | 84 | 0.515394 |
39e9d011c7d549bc6e90fe909129cef23973e715 | 51,241 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
use std::fmt::Write;
/// See [`DeleteReportDefinitionInput`](crate::input::DeleteReportDefinitionInput)
pub mod delete_report_definition_input {
/// A builder for [`DeleteReportDefinitionInput`](crate::input::DeleteReportDefinitionInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) report_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>Required. ID of the report to delete.</p>
pub fn report_id(mut self, input: impl Into<std::string::String>) -> Self {
self.report_id = Some(input.into());
self
}
/// <p>Required. ID of the report to delete.</p>
pub fn set_report_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.report_id = input;
self
}
/// Consumes the builder and constructs a [`DeleteReportDefinitionInput`](crate::input::DeleteReportDefinitionInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DeleteReportDefinitionInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::DeleteReportDefinitionInput {
report_id: self.report_id,
})
}
}
}
#[doc(hidden)]
pub type DeleteReportDefinitionInputOperationOutputAlias = crate::operation::DeleteReportDefinition;
#[doc(hidden)]
pub type DeleteReportDefinitionInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl DeleteReportDefinitionInput {
/// Consumes the builder and constructs an Operation<[`DeleteReportDefinition`](crate::operation::DeleteReportDefinition)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::DeleteReportDefinition,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::DeleteReportDefinitionInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
let input_1 = &_input.report_id;
let input_1 = input_1.as_ref().ok_or(
aws_smithy_http::operation::BuildError::MissingField {
field: "report_id",
details: "cannot be empty or unset",
},
)?;
let report_id = aws_smithy_http::label::fmt_string(input_1, false);
if report_id.is_empty() {
return Err(aws_smithy_http::operation::BuildError::MissingField {
field: "report_id",
details: "cannot be empty or unset",
});
}
write!(output, "/reportDefinition/{reportId}", reportId = report_id)
.expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::DeleteReportDefinitionInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("DELETE").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from("");
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::DeleteReportDefinition::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"DeleteReportDefinition",
"applicationcostprofiler",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`DeleteReportDefinitionInput`](crate::input::DeleteReportDefinitionInput)
pub fn builder() -> crate::input::delete_report_definition_input::Builder {
crate::input::delete_report_definition_input::Builder::default()
}
}
/// See [`GetReportDefinitionInput`](crate::input::GetReportDefinitionInput)
pub mod get_report_definition_input {
/// A builder for [`GetReportDefinitionInput`](crate::input::GetReportDefinitionInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) report_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>ID of the report to retrieve.</p>
pub fn report_id(mut self, input: impl Into<std::string::String>) -> Self {
self.report_id = Some(input.into());
self
}
/// <p>ID of the report to retrieve.</p>
pub fn set_report_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.report_id = input;
self
}
/// Consumes the builder and constructs a [`GetReportDefinitionInput`](crate::input::GetReportDefinitionInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetReportDefinitionInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetReportDefinitionInput {
report_id: self.report_id,
})
}
}
}
#[doc(hidden)]
pub type GetReportDefinitionInputOperationOutputAlias = crate::operation::GetReportDefinition;
#[doc(hidden)]
pub type GetReportDefinitionInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl GetReportDefinitionInput {
/// Consumes the builder and constructs an Operation<[`GetReportDefinition`](crate::operation::GetReportDefinition)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetReportDefinition,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::GetReportDefinitionInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
let input_2 = &_input.report_id;
let input_2 = input_2.as_ref().ok_or(
aws_smithy_http::operation::BuildError::MissingField {
field: "report_id",
details: "cannot be empty or unset",
},
)?;
let report_id = aws_smithy_http::label::fmt_string(input_2, false);
if report_id.is_empty() {
return Err(aws_smithy_http::operation::BuildError::MissingField {
field: "report_id",
details: "cannot be empty or unset",
});
}
write!(output, "/reportDefinition/{reportId}", reportId = report_id)
.expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetReportDefinitionInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("GET").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from("");
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetReportDefinition::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetReportDefinition",
"applicationcostprofiler",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`GetReportDefinitionInput`](crate::input::GetReportDefinitionInput)
pub fn builder() -> crate::input::get_report_definition_input::Builder {
crate::input::get_report_definition_input::Builder::default()
}
}
/// See [`ImportApplicationUsageInput`](crate::input::ImportApplicationUsageInput)
pub mod import_application_usage_input {
/// A builder for [`ImportApplicationUsageInput`](crate::input::ImportApplicationUsageInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) source_s3_location: std::option::Option<crate::model::SourceS3Location>,
}
impl Builder {
/// <p>Amazon S3 location to import application usage data from.</p>
pub fn source_s3_location(mut self, input: crate::model::SourceS3Location) -> Self {
self.source_s3_location = Some(input);
self
}
/// <p>Amazon S3 location to import application usage data from.</p>
pub fn set_source_s3_location(
mut self,
input: std::option::Option<crate::model::SourceS3Location>,
) -> Self {
self.source_s3_location = input;
self
}
/// Consumes the builder and constructs a [`ImportApplicationUsageInput`](crate::input::ImportApplicationUsageInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::ImportApplicationUsageInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::ImportApplicationUsageInput {
source_s3_location: self.source_s3_location,
})
}
}
}
#[doc(hidden)]
pub type ImportApplicationUsageInputOperationOutputAlias = crate::operation::ImportApplicationUsage;
#[doc(hidden)]
pub type ImportApplicationUsageInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl ImportApplicationUsageInput {
/// Consumes the builder and constructs an Operation<[`ImportApplicationUsage`](crate::operation::ImportApplicationUsage)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::ImportApplicationUsage,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::ImportApplicationUsageInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/importApplicationUsage").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::ImportApplicationUsageInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/json",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_import_application_usage(
&self,
)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::ImportApplicationUsage::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"ImportApplicationUsage",
"applicationcostprofiler",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`ImportApplicationUsageInput`](crate::input::ImportApplicationUsageInput)
pub fn builder() -> crate::input::import_application_usage_input::Builder {
crate::input::import_application_usage_input::Builder::default()
}
}
/// See [`ListReportDefinitionsInput`](crate::input::ListReportDefinitionsInput)
pub mod list_report_definitions_input {
/// A builder for [`ListReportDefinitionsInput`](crate::input::ListReportDefinitionsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) max_results: std::option::Option<i32>,
}
impl Builder {
/// <p>The token value from a previous call to access the next page of results.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>The token value from a previous call to access the next page of results.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>The maximum number of results to return.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.max_results = Some(input);
self
}
/// <p>The maximum number of results to return.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.max_results = input;
self
}
/// Consumes the builder and constructs a [`ListReportDefinitionsInput`](crate::input::ListReportDefinitionsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::ListReportDefinitionsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::ListReportDefinitionsInput {
next_token: self.next_token,
max_results: self.max_results,
})
}
}
}
#[doc(hidden)]
pub type ListReportDefinitionsInputOperationOutputAlias = crate::operation::ListReportDefinitions;
#[doc(hidden)]
pub type ListReportDefinitionsInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl ListReportDefinitionsInput {
/// Consumes the builder and constructs an Operation<[`ListReportDefinitions`](crate::operation::ListReportDefinitions)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::ListReportDefinitions,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::ListReportDefinitionsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/reportDefinition").expect("formatting should succeed");
Ok(())
}
fn uri_query(
_input: &crate::input::ListReportDefinitionsInput,
mut output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
let mut query = aws_smithy_http::query::Writer::new(&mut output);
if let Some(inner_3) = &_input.next_token {
query.push_kv("nextToken", &aws_smithy_http::query::fmt_string(&inner_3));
}
if let Some(inner_4) = &_input.max_results {
query.push_kv(
"maxResults",
aws_smithy_types::primitive::Encoder::from(*inner_4).encode(),
);
}
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::ListReportDefinitionsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
uri_query(input, &mut uri)?;
Ok(builder.method("GET").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from("");
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::ListReportDefinitions::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"ListReportDefinitions",
"applicationcostprofiler",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`ListReportDefinitionsInput`](crate::input::ListReportDefinitionsInput)
pub fn builder() -> crate::input::list_report_definitions_input::Builder {
crate::input::list_report_definitions_input::Builder::default()
}
}
/// See [`PutReportDefinitionInput`](crate::input::PutReportDefinitionInput)
pub mod put_report_definition_input {
/// A builder for [`PutReportDefinitionInput`](crate::input::PutReportDefinitionInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) report_id: std::option::Option<std::string::String>,
pub(crate) report_description: std::option::Option<std::string::String>,
pub(crate) report_frequency: std::option::Option<crate::model::ReportFrequency>,
pub(crate) format: std::option::Option<crate::model::Format>,
pub(crate) destination_s3_location: std::option::Option<crate::model::S3Location>,
}
impl Builder {
/// <p>Required. ID of the report. You can choose any valid string matching the pattern for the ID.</p>
pub fn report_id(mut self, input: impl Into<std::string::String>) -> Self {
self.report_id = Some(input.into());
self
}
/// <p>Required. ID of the report. You can choose any valid string matching the pattern for the ID.</p>
pub fn set_report_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.report_id = input;
self
}
/// <p>Required. Description of the report.</p>
pub fn report_description(mut self, input: impl Into<std::string::String>) -> Self {
self.report_description = Some(input.into());
self
}
/// <p>Required. Description of the report.</p>
pub fn set_report_description(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.report_description = input;
self
}
/// <p>Required. The cadence to generate the report.</p>
pub fn report_frequency(mut self, input: crate::model::ReportFrequency) -> Self {
self.report_frequency = Some(input);
self
}
/// <p>Required. The cadence to generate the report.</p>
pub fn set_report_frequency(
mut self,
input: std::option::Option<crate::model::ReportFrequency>,
) -> Self {
self.report_frequency = input;
self
}
/// <p>Required. The format to use for the generated report.</p>
pub fn format(mut self, input: crate::model::Format) -> Self {
self.format = Some(input);
self
}
/// <p>Required. The format to use for the generated report.</p>
pub fn set_format(mut self, input: std::option::Option<crate::model::Format>) -> Self {
self.format = input;
self
}
/// <p>Required. Amazon Simple Storage Service (Amazon S3) location where Application Cost Profiler uploads the report.</p>
pub fn destination_s3_location(mut self, input: crate::model::S3Location) -> Self {
self.destination_s3_location = Some(input);
self
}
/// <p>Required. Amazon Simple Storage Service (Amazon S3) location where Application Cost Profiler uploads the report.</p>
pub fn set_destination_s3_location(
mut self,
input: std::option::Option<crate::model::S3Location>,
) -> Self {
self.destination_s3_location = input;
self
}
/// Consumes the builder and constructs a [`PutReportDefinitionInput`](crate::input::PutReportDefinitionInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::PutReportDefinitionInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::PutReportDefinitionInput {
report_id: self.report_id,
report_description: self.report_description,
report_frequency: self.report_frequency,
format: self.format,
destination_s3_location: self.destination_s3_location,
})
}
}
}
#[doc(hidden)]
pub type PutReportDefinitionInputOperationOutputAlias = crate::operation::PutReportDefinition;
#[doc(hidden)]
pub type PutReportDefinitionInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl PutReportDefinitionInput {
/// Consumes the builder and constructs an Operation<[`PutReportDefinition`](crate::operation::PutReportDefinition)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::PutReportDefinition,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::PutReportDefinitionInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/reportDefinition").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::PutReportDefinitionInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/json",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_put_report_definition(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::PutReportDefinition::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"PutReportDefinition",
"applicationcostprofiler",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`PutReportDefinitionInput`](crate::input::PutReportDefinitionInput)
pub fn builder() -> crate::input::put_report_definition_input::Builder {
crate::input::put_report_definition_input::Builder::default()
}
}
/// See [`UpdateReportDefinitionInput`](crate::input::UpdateReportDefinitionInput)
pub mod update_report_definition_input {
/// A builder for [`UpdateReportDefinitionInput`](crate::input::UpdateReportDefinitionInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) report_id: std::option::Option<std::string::String>,
pub(crate) report_description: std::option::Option<std::string::String>,
pub(crate) report_frequency: std::option::Option<crate::model::ReportFrequency>,
pub(crate) format: std::option::Option<crate::model::Format>,
pub(crate) destination_s3_location: std::option::Option<crate::model::S3Location>,
}
impl Builder {
/// <p>Required. ID of the report to update.</p>
pub fn report_id(mut self, input: impl Into<std::string::String>) -> Self {
self.report_id = Some(input.into());
self
}
/// <p>Required. ID of the report to update.</p>
pub fn set_report_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.report_id = input;
self
}
/// <p>Required. Description of the report.</p>
pub fn report_description(mut self, input: impl Into<std::string::String>) -> Self {
self.report_description = Some(input.into());
self
}
/// <p>Required. Description of the report.</p>
pub fn set_report_description(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.report_description = input;
self
}
/// <p>Required. The cadence to generate the report.</p>
pub fn report_frequency(mut self, input: crate::model::ReportFrequency) -> Self {
self.report_frequency = Some(input);
self
}
/// <p>Required. The cadence to generate the report.</p>
pub fn set_report_frequency(
mut self,
input: std::option::Option<crate::model::ReportFrequency>,
) -> Self {
self.report_frequency = input;
self
}
/// <p>Required. The format to use for the generated report.</p>
pub fn format(mut self, input: crate::model::Format) -> Self {
self.format = Some(input);
self
}
/// <p>Required. The format to use for the generated report.</p>
pub fn set_format(mut self, input: std::option::Option<crate::model::Format>) -> Self {
self.format = input;
self
}
/// <p>Required. Amazon Simple Storage Service (Amazon S3) location where Application Cost Profiler uploads the report.</p>
pub fn destination_s3_location(mut self, input: crate::model::S3Location) -> Self {
self.destination_s3_location = Some(input);
self
}
/// <p>Required. Amazon Simple Storage Service (Amazon S3) location where Application Cost Profiler uploads the report.</p>
pub fn set_destination_s3_location(
mut self,
input: std::option::Option<crate::model::S3Location>,
) -> Self {
self.destination_s3_location = input;
self
}
/// Consumes the builder and constructs a [`UpdateReportDefinitionInput`](crate::input::UpdateReportDefinitionInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::UpdateReportDefinitionInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::UpdateReportDefinitionInput {
report_id: self.report_id,
report_description: self.report_description,
report_frequency: self.report_frequency,
format: self.format,
destination_s3_location: self.destination_s3_location,
})
}
}
}
#[doc(hidden)]
pub type UpdateReportDefinitionInputOperationOutputAlias = crate::operation::UpdateReportDefinition;
#[doc(hidden)]
pub type UpdateReportDefinitionInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl UpdateReportDefinitionInput {
/// Consumes the builder and constructs an Operation<[`UpdateReportDefinition`](crate::operation::UpdateReportDefinition)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::UpdateReportDefinition,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::UpdateReportDefinitionInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
let input_5 = &_input.report_id;
let input_5 = input_5.as_ref().ok_or(
aws_smithy_http::operation::BuildError::MissingField {
field: "report_id",
details: "cannot be empty or unset",
},
)?;
let report_id = aws_smithy_http::label::fmt_string(input_5, false);
if report_id.is_empty() {
return Err(aws_smithy_http::operation::BuildError::MissingField {
field: "report_id",
details: "cannot be empty or unset",
});
}
write!(output, "/reportDefinition/{reportId}", reportId = report_id)
.expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::UpdateReportDefinitionInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("PUT").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/json",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_update_report_definition(
&self,
)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::UpdateReportDefinition::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"UpdateReportDefinition",
"applicationcostprofiler",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`UpdateReportDefinitionInput`](crate::input::UpdateReportDefinitionInput)
pub fn builder() -> crate::input::update_report_definition_input::Builder {
crate::input::update_report_definition_input::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UpdateReportDefinitionInput {
/// <p>Required. ID of the report to update.</p>
pub report_id: std::option::Option<std::string::String>,
/// <p>Required. Description of the report.</p>
pub report_description: std::option::Option<std::string::String>,
/// <p>Required. The cadence to generate the report.</p>
pub report_frequency: std::option::Option<crate::model::ReportFrequency>,
/// <p>Required. The format to use for the generated report.</p>
pub format: std::option::Option<crate::model::Format>,
/// <p>Required. Amazon Simple Storage Service (Amazon S3) location where Application Cost Profiler uploads the report.</p>
pub destination_s3_location: std::option::Option<crate::model::S3Location>,
}
impl UpdateReportDefinitionInput {
/// <p>Required. ID of the report to update.</p>
pub fn report_id(&self) -> std::option::Option<&str> {
self.report_id.as_deref()
}
/// <p>Required. Description of the report.</p>
pub fn report_description(&self) -> std::option::Option<&str> {
self.report_description.as_deref()
}
/// <p>Required. The cadence to generate the report.</p>
pub fn report_frequency(&self) -> std::option::Option<&crate::model::ReportFrequency> {
self.report_frequency.as_ref()
}
/// <p>Required. The format to use for the generated report.</p>
pub fn format(&self) -> std::option::Option<&crate::model::Format> {
self.format.as_ref()
}
/// <p>Required. Amazon Simple Storage Service (Amazon S3) location where Application Cost Profiler uploads the report.</p>
pub fn destination_s3_location(&self) -> std::option::Option<&crate::model::S3Location> {
self.destination_s3_location.as_ref()
}
}
impl std::fmt::Debug for UpdateReportDefinitionInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UpdateReportDefinitionInput");
formatter.field("report_id", &self.report_id);
formatter.field("report_description", &self.report_description);
formatter.field("report_frequency", &self.report_frequency);
formatter.field("format", &self.format);
formatter.field("destination_s3_location", &self.destination_s3_location);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PutReportDefinitionInput {
/// <p>Required. ID of the report. You can choose any valid string matching the pattern for the ID.</p>
pub report_id: std::option::Option<std::string::String>,
/// <p>Required. Description of the report.</p>
pub report_description: std::option::Option<std::string::String>,
/// <p>Required. The cadence to generate the report.</p>
pub report_frequency: std::option::Option<crate::model::ReportFrequency>,
/// <p>Required. The format to use for the generated report.</p>
pub format: std::option::Option<crate::model::Format>,
/// <p>Required. Amazon Simple Storage Service (Amazon S3) location where Application Cost Profiler uploads the report.</p>
pub destination_s3_location: std::option::Option<crate::model::S3Location>,
}
impl PutReportDefinitionInput {
/// <p>Required. ID of the report. You can choose any valid string matching the pattern for the ID.</p>
pub fn report_id(&self) -> std::option::Option<&str> {
self.report_id.as_deref()
}
/// <p>Required. Description of the report.</p>
pub fn report_description(&self) -> std::option::Option<&str> {
self.report_description.as_deref()
}
/// <p>Required. The cadence to generate the report.</p>
pub fn report_frequency(&self) -> std::option::Option<&crate::model::ReportFrequency> {
self.report_frequency.as_ref()
}
/// <p>Required. The format to use for the generated report.</p>
pub fn format(&self) -> std::option::Option<&crate::model::Format> {
self.format.as_ref()
}
/// <p>Required. Amazon Simple Storage Service (Amazon S3) location where Application Cost Profiler uploads the report.</p>
pub fn destination_s3_location(&self) -> std::option::Option<&crate::model::S3Location> {
self.destination_s3_location.as_ref()
}
}
impl std::fmt::Debug for PutReportDefinitionInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PutReportDefinitionInput");
formatter.field("report_id", &self.report_id);
formatter.field("report_description", &self.report_description);
formatter.field("report_frequency", &self.report_frequency);
formatter.field("format", &self.format);
formatter.field("destination_s3_location", &self.destination_s3_location);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListReportDefinitionsInput {
/// <p>The token value from a previous call to access the next page of results.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The maximum number of results to return.</p>
pub max_results: std::option::Option<i32>,
}
impl ListReportDefinitionsInput {
/// <p>The token value from a previous call to access the next page of results.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>The maximum number of results to return.</p>
pub fn max_results(&self) -> std::option::Option<i32> {
self.max_results
}
}
impl std::fmt::Debug for ListReportDefinitionsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListReportDefinitionsInput");
formatter.field("next_token", &self.next_token);
formatter.field("max_results", &self.max_results);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ImportApplicationUsageInput {
/// <p>Amazon S3 location to import application usage data from.</p>
pub source_s3_location: std::option::Option<crate::model::SourceS3Location>,
}
impl ImportApplicationUsageInput {
/// <p>Amazon S3 location to import application usage data from.</p>
pub fn source_s3_location(&self) -> std::option::Option<&crate::model::SourceS3Location> {
self.source_s3_location.as_ref()
}
}
impl std::fmt::Debug for ImportApplicationUsageInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ImportApplicationUsageInput");
formatter.field("source_s3_location", &self.source_s3_location);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetReportDefinitionInput {
/// <p>ID of the report to retrieve.</p>
pub report_id: std::option::Option<std::string::String>,
}
impl GetReportDefinitionInput {
/// <p>ID of the report to retrieve.</p>
pub fn report_id(&self) -> std::option::Option<&str> {
self.report_id.as_deref()
}
}
impl std::fmt::Debug for GetReportDefinitionInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetReportDefinitionInput");
formatter.field("report_id", &self.report_id);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteReportDefinitionInput {
/// <p>Required. ID of the report to delete.</p>
pub report_id: std::option::Option<std::string::String>,
}
impl DeleteReportDefinitionInput {
/// <p>Required. ID of the report to delete.</p>
pub fn report_id(&self) -> std::option::Option<&str> {
self.report_id.as_deref()
}
}
impl std::fmt::Debug for DeleteReportDefinitionInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteReportDefinitionInput");
formatter.field("report_id", &self.report_id);
formatter.finish()
}
}
| 44.635017 | 132 | 0.613005 |
0ac6845bc92dbd43ed6e179f32fca00c1ee83b33 | 1,012 | /// Scans the memory of the current process for a pattern of bytes and returns the
/// address if there is any match.
///
/// `address` is the address to start scanning from.
/// `length` is the maximum amount of bytes to scan from the address.
/// `pattern` is the pattern of bytes to find. None is used to indicate a wildcard
pub unsafe fn pattern_scan(
address: *const u8,
length: usize,
pattern: &[Option<u8>],
) -> Option<*const u8> {
for address_index in 0..length {
let addr_offset = address.offset(address_index as isize);
let has_match = pattern.iter().enumerate().all(|(byte_index, &b)| match b {
Some(byte) => {
let addr = addr_offset.offset(byte_index as isize);
let value = std::ptr::read::<u8>(addr);
return value == byte;
}
None => true,
});
if has_match {
return Some(addr_offset);
}
}
None
// std::ptr::null_mut() as *const ()
}
| 32.645161 | 83 | 0.583992 |
1805bb5a1edc71239c25fbffc3fc39676061fa55 | 5,600 | extern crate regex;
use regex::Regex;
use std::collections::{BTreeSet, HashMap, HashSet, VecDeque};
use std::fs::File;
use std::io::BufRead;
use std::io::BufReader;
use std::path::Path;
use std::{thread, time};
fn main() {
let f = File::open("./src/input.txt").unwrap();
let mut file = BufReader::new(&f);
let re = Regex::new(r"^depth: (.*)$").unwrap();
let re2 = Regex::new(r"^target: (.*),(.*)$").unwrap();
let mut linenum = 0;
let mut depth = 0;
let mut targetx = 0;
let mut targety = 0;
for line in file.lines() {
let l = line.unwrap();
linenum += 1;
if linenum == 1 {
let caps = re.captures(&l).unwrap();
depth = caps[1].parse::<usize>().unwrap();
} else {
let caps = re2.captures(&l).unwrap();
targetx = caps[1].parse::<usize>().unwrap();
targety = caps[2].parse::<usize>().unwrap();
}
}
println!("{}, ({},{})", depth, targetx, targety);
let mut geoindex = Vec::with_capacity(targety + 6);
let mut levels = Vec::with_capacity(targety + 6);
for y in 0..targety + 6 {
geoindex.push(Vec::with_capacity(targetx + 6));
levels.push(Vec::with_capacity(targetx + 6));
for x in 0..targetx + 6 {
geoindex[y].push(0);
levels[y].push(0);
}
}
for y in 0..targety + 6 {
for x in 0..targetx + 6 {
if (x == 0 && y == 0) || (x == targetx && y == targety) {
geoindex[y][x] = depth % 20183;
} else if x == 0 {
geoindex[y][x] = (y * 48271 + depth) % 20183;
} else if y == 0 {
geoindex[y][x] = (x * 16807 + depth) % 20183;
} else {
geoindex[y][x] = (geoindex[y][x - 1] * geoindex[y - 1][x] + depth) % 20183;
}
levels[y][x] = geoindex[y][x] % 3;
}
}
let mut part1 = 0;
for y in 0..targety + 1 {
for x in 0..targetx + 1 {
// if x==0
// levels[y][x] = (geoindex[y][x]+depth)%20183%3;
part1 += levels[y][x];
if x == 0 && y == 0 {
print!("M");
continue;
}
if x == targetx && y == targety {
print!("T");
continue;
}
match levels[y][x] {
0 => print!("."),
1 => print!("="),
2 => print!("|"),
_ => panic!("xxx"),
}
}
println!("");
}
println!("part1: {}", part1);
println!(
"part2: {}",
shortest_path(&levels, (0, 0, 1, 0), (targetx as i32, targety as i32))
);
}
fn shortest_path(
levels: &Vec<Vec<usize>>,
source: (i32, i32, i32, i32),
target: (i32, i32),
) -> i32 {
let start = (source.0, source.1, source.2);
let end = (target.0, target.1, 1);
let one_sec = time::Duration::from_millis(1000);
let mut state: HashMap<(i32, i32, i32), i32> = HashMap::new();
state.insert(start, source.3);
let mut q = VecDeque::new();
q.push_back(source);
while !q.is_empty() {
let (x, y, weapon, minute) = q.pop_front().unwrap();
let current = (x, y, weapon);
// println!("current {:?}, state {:?}", current, state.len());
let best = *state.get(¤t).unwrap_or(&0);
if best > 0 && best < minute {
continue;
}
for k in &[(0, 1), (1, 0), (0, -1), (-1, 0)] {
let next = (current.0 + k.0, current.1 + k.1);
if next.0 < 0 || next.1 < 0 || next.0 > target.0 + 5 || next.1 > target.1 + 5 {
continue;
}
if next.0 == 0 && next.1 == 0 {
continue;
}
for (next, v) in get_next(levels, current, next, target) {
let nminute = minute + v;
if next == start {
continue;
}
let best = *state.get(&next).unwrap_or(&0);
if best == 0 || nminute < best {
state.insert(next, nminute);
q.push_back((next.0, next.1, next.2, nminute));
}
}
}
// thread::sleep(one_sec);
}
return *state.get(&(target.0, target.1, 1)).unwrap();
}
fn get_next(
levels: &Vec<Vec<usize>>,
current: (i32, i32, i32),
next: (i32, i32),
target: (i32, i32),
) -> HashMap<(i32, i32, i32), i32> {
let mut r = HashMap::new();
let weapon = current.2;
if next == target {
if weapon != 1 {
r.insert((next.0, next.1, 1), 8);
} else {
r.insert((next.0, next.1, 1), 1);
}
return r;
}
match levels[next.1 as usize][next.0 as usize] {
0 => {
if weapon == 0 {
r.insert((next.0, next.1, 1), 8);
r.insert((next.0, next.1, 2), 8);
} else {
r.insert((next.0, next.1, weapon), 1);
}
}
1 => {
if weapon == 1 {
r.insert((next.0, next.1, 0), 8);
r.insert((next.0, next.1, 2), 8);
} else {
r.insert((next.0, next.1, weapon), 1);
}
}
2 => {
if weapon == 2 {
r.insert((next.0, next.1, 1), 8);
r.insert((next.0, next.1, 0), 8);
} else {
r.insert((next.0, next.1, weapon), 1);
}
}
_ => panic!("xxx"),
}
return r;
}
| 30.939227 | 91 | 0.429821 |
283a3bf234b1e9bbdfaafb038d7d452f0671cfec | 15,555 | use lsp_types::*;
use rnix::{types::*, SyntaxNode, TextRange, TextSize, TokenAtOffset};
use std::{
collections::HashMap,
convert::TryFrom,
fmt::{Debug, Display, Formatter, Result},
path::PathBuf,
rc::Rc,
};
#[derive(Copy, Clone, PartialEq)]
pub enum Datatype {
Lambda,
Variable,
Attribute,
}
impl Display for Datatype {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
write!(
f,
"{}",
match self {
Self::Lambda => "Lambda",
Self::Variable => "Variable",
Self::Attribute => "Attribute",
}
)
}
}
impl Debug for Datatype {
fn fmt(&self, f: &mut Formatter) -> Result {
Display::fmt(self, f)
}
}
pub fn uri_path(uri: &Url) -> Option<PathBuf> {
if uri.scheme() != "file" || uri.has_host() {
return None;
}
Some(PathBuf::from(uri.path()))
}
pub fn lookup_pos(code: &str, pos: Position) -> Option<usize> {
let mut lines = code.split('\n');
let mut offset = 0;
for _ in 0..pos.line {
let line = lines.next()?;
offset += line.len() + 1;
}
lines.next().and_then(|line| {
Some(
offset
+ line
.chars()
.take(usize::try_from(pos.character).ok()?)
.map(char::len_utf8)
.sum::<usize>(),
)
})
}
pub fn offset_to_pos(code: &str, offset: usize) -> Position {
let start_of_line = code[..offset].rfind('\n').map_or(0, |n| n + 1);
Position {
line: code[..start_of_line].chars().filter(|&c| c == '\n').count() as u32,
character: code[start_of_line..offset]
.chars()
.map(|c| c.len_utf16() as u32)
.sum(),
}
}
pub fn range(code: &str, range: TextRange) -> Range {
Range {
start: offset_to_pos(code, usize::from(range.start())),
end: offset_to_pos(code, usize::from(range.end())),
}
}
pub struct CursorInfo {
pub path: Vec<String>,
pub ident: Ident,
pub name: String,
}
impl CursorInfo {
pub fn new(path: Vec<String>, ident: Ident, name: Option<String>) -> CursorInfo {
let myname = match name {
Some(n) => n,
None => String::from((Ident::cast(ident.node().clone()).unwrap()).as_str()),
};
CursorInfo {
path,
ident,
name: myname,
}
}
}
pub fn ident_at(root: &SyntaxNode, offset: usize) -> Option<CursorInfo> {
let mut add = false;
let ident =
match root.token_at_offset(TextSize::try_from(offset).expect("aaah big number scary")) {
TokenAtOffset::None => None,
TokenAtOffset::Single(node) => Ident::cast(node.parent()),
TokenAtOffset::Between(left, right) => {
let result = Ident::cast(left.parent()).or_else(|| Ident::cast(right.parent()));
match result {
Some(_) => result,
None => {
if let Some(sel) = Select::cast(left.parent()) {
add = true;
if let Some(s) = sel.set().and_then(Select::cast) {
Ident::cast(s.index()?)
} else {
Ident::cast(sel.set()?)
}
} else {
None
}
}
}
}
}?;
let parent = ident.node().parent();
if let Some(node) = parent.clone().and_then(Inherit::cast) {
if let Some(node) = node.from() {
if let Some(tok) = node.inner() {
if Ident::cast(tok.clone()).is_some() {
return Some(CursorInfo::new(vec![tok.text().to_string()], ident, None));
} else if let Some(mut attr) = Select::cast(tok) {
let mut result = vec![attr.index()?.to_string()];
while let Some(new) = Select::cast(attr.set()?) {
result.push(Ident::cast(new.index()?)?.as_str().into());
attr = new;
}
result.push(Ident::cast(attr.set()?)?.as_str().into());
result.reverse();
return Some(CursorInfo::new(result, ident, None));
}
}
}
Some(CursorInfo::new(Vec::new(), ident, None))
} else if let Some(attr) = parent.clone().and_then(Key::cast) {
let mut path = Vec::new();
for item in attr.path() {
if item == *ident.node() {
return Some(CursorInfo::new(path, ident, None));
}
path.push(Ident::cast(item)?.as_str().into());
}
panic!("identifier at cursor is somehow not a child of its parent");
} else if let Some(mut index) = parent.and_then(Select::cast) {
let mut path = Vec::new();
while let Some(new) = Select::cast(index.set()?) {
path.push(Ident::cast(new.index()?)?.as_str().into());
index = new;
}
if index.set()? != *ident.node() {
// Only push if not the cursor ident, so that
// a . b
// ^
// is not [a] and a, but rather [] and a
path.push(Ident::cast(index.set()?)?.as_str().into());
}
path.reverse();
if add {
path.push(String::from(ident.as_str()));
}
Some(CursorInfo::new(
path,
ident,
match add {
true => Some(String::from("")),
false => None,
},
))
} else {
Some(CursorInfo::new(Vec::new(), ident, None))
}
}
#[derive(Debug)]
pub struct Var {
pub file: Rc<Url>,
pub set: SyntaxNode,
pub key: SyntaxNode,
pub value: Option<SyntaxNode>,
pub datatype: Datatype,
}
pub fn populate<T: EntryHolder>(
file: &Rc<Url>,
scope: &mut HashMap<String, Var>,
set: &T,
datatype: Datatype,
) -> Option<()> {
for entry in set.entries() {
let attr = entry.key()?;
let mut path = attr.path();
if let Some(ident) = path.next().and_then(Ident::cast) {
if !scope.contains_key(ident.as_str()) {
scope.insert(
ident.as_str().into(),
Var {
file: Rc::clone(file),
set: set.node().to_owned(),
key: ident.node().to_owned(),
value: Some(entry.value()?.to_owned()),
datatype,
},
);
}
}
}
Some(())
}
pub fn scope_for(file: &Rc<Url>, node: SyntaxNode) -> Option<HashMap<String, Var>> {
let mut scope = HashMap::new();
let mut current = Some(node);
while let Some(node) = current {
match ParsedType::try_from(node.clone()) {
Ok(ParsedType::LetIn(let_in)) => {
populate(file, &mut scope, &let_in, Datatype::Variable);
}
Ok(ParsedType::LegacyLet(let_)) => {
populate(file, &mut scope, &let_, Datatype::Variable);
}
Ok(ParsedType::AttrSet(set)) => {
if set.recursive() {
populate(file, &mut scope, &set, Datatype::Attribute);
}
}
Ok(ParsedType::Lambda(lambda)) => match ParsedType::try_from(lambda.arg()?) {
Ok(ParsedType::Ident(ident)) => {
if !scope.contains_key(ident.as_str()) {
scope.insert(
ident.as_str().into(),
Var {
file: Rc::clone(file),
set: lambda.node().clone(),
key: ident.node().clone(),
value: None,
datatype: Datatype::Lambda,
},
);
}
}
Ok(ParsedType::Pattern(pattern)) => {
for entry in pattern.entries() {
let ident = entry.name()?;
if !scope.contains_key(ident.as_str()) {
scope.insert(
ident.as_str().into(),
Var {
file: Rc::clone(file),
set: lambda.node().to_owned(),
key: ident.node().to_owned(),
value: None,
datatype: Datatype::Lambda,
},
);
}
}
if let Some(ident) = pattern.at() {
if !scope.contains_key(ident.as_str()) {
scope.insert(
ident.as_str().into(),
Var {
file: Rc::clone(file),
set: lambda.node().to_owned(),
key: ident.node().to_owned(),
value: None,
datatype: Datatype::Lambda,
},
);
}
}
}
_ => (),
},
_ => (),
}
current = node.parent();
}
Some(scope)
}
pub fn selection_ranges(root: &SyntaxNode, content: &str, pos: Position) -> Option<SelectionRange> {
let pos = lookup_pos(content, pos)?;
let node = root
.token_at_offset(TextSize::try_from(pos).expect("big number goes brrr"))
.left_biased()?;
let mut root = None;
let mut cursor = &mut root;
let mut last = None;
for parent in node.ancestors() {
// De-duplicate
if last.as_ref() == Some(&parent) {
continue;
}
let text_range = parent.text_range();
*cursor = Some(Box::new(SelectionRange {
range: range(content, text_range),
parent: None,
}));
cursor = &mut cursor.as_mut().unwrap().parent;
last = Some(parent);
}
root.map(|b| *b)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_offset_from_nix_expr() {
let expr = "let a = 1; in\nmap (x: a + x)\n[1 2 3 4]";
let start = range(expr, TextRange::new(TextSize::from(0), TextSize::from(1)));
assert_eq!(0, start.start.line);
assert_eq!(0, start.end.line);
assert_eq!(0, start.start.character);
assert_eq!(1, start.end.character);
let actual_pos = range(expr, TextRange::new(TextSize::from(15), TextSize::from(20)));
assert_eq!(1, actual_pos.start.line);
assert_eq!(1, actual_pos.end.line);
assert_eq!(1, actual_pos.start.character);
assert_eq!(6, actual_pos.end.character);
}
#[test]
fn test_offset_across_multiple_lines() {
let expr = "let a = 1; in\nbuiltins.trace a a";
let r = range(expr, TextRange::new(TextSize::from(8), TextSize::from(15)));
assert_eq!(0, r.start.line);
assert_eq!(1, r.end.line);
assert_eq!(8, r.start.character);
assert_eq!(1, r.end.character);
}
#[test]
#[should_panic]
fn test_offset_too_large() {
let expr = "let a = 1;in\na";
range(expr, TextRange::new(TextSize::from(50), TextSize::from(50)));
}
#[test]
fn test_lookup_pos_in_expr() {
let expr = "let a = 1;\nbuiltins.trace a 23";
let pos = lookup_pos(
expr,
Position {
line: 0,
character: 0,
},
);
assert_eq!(0, pos.expect("expected position to be not None!"));
}
#[test]
fn test_lookup_pos_out_of_range() {
let expr = "let a = 1;\na";
let pos_wrong_line = lookup_pos(
expr,
Position {
line: 5,
character: 23,
},
);
assert!(pos_wrong_line.is_none());
// if the character is greater than the length of a line, the offset of the last
// char of the line is returned.
let pos_char_out_of_range = lookup_pos(
expr,
Position {
line: 0,
character: 100,
},
);
assert_eq!(
10,
pos_char_out_of_range.expect("expected position to be not None!")
);
}
#[test]
fn test_populate_scope() {
let expr = "n@{ a, b, c, d }: let a = 1; obj.foo = {}; in a + b";
let root = rnix::parse(expr).node();
let scope = scope_for(
&Rc::new(Url::parse("file:///default.nix").unwrap()),
root.children().next().unwrap(),
);
assert!(scope.is_some());
let scope_entries = scope.unwrap();
assert_eq!(5, scope_entries.keys().len());
assert!(scope_entries
.values()
.into_iter()
.all(|x| x.datatype == Datatype::Lambda));
assert!(vec!["n", "a", "b", "c", "d"]
.into_iter()
.all(|x| scope_entries.contains_key(x)));
let mut iter = root.children().next().unwrap().children();
iter.next();
let scope_let = scope_for(
&Rc::new(Url::parse("file:///default.nix").unwrap()),
iter.next().unwrap(),
);
assert!(scope_let.is_some());
let scope_entries = scope_let.unwrap();
assert_eq!(6, scope_entries.keys().len());
assert_eq!(Datatype::Variable, scope_entries["a"].datatype);
}
#[test]
fn test_populate_scope_legacy_let() {
let expr = "let { a = 1; body = a; }";
let root = rnix::parse(expr).node();
let scope = scope_for(
&Rc::new(Url::parse("file:///default.nix").unwrap()),
root.children().next().unwrap(),
);
assert!(scope.is_some());
let scope_entries = scope.unwrap();
assert_eq!(2, scope_entries.keys().len());
assert!(vec!["a", "body"]
.into_iter()
.all(|x| scope_entries.contains_key(x)));
}
#[test]
fn test_find_ident() {
let expr = "let a = { b = 1; }; in a.b";
let root = rnix::parse(expr).node();
let ident = ident_at(&root, 26);
assert!(ident.is_some());
let ident_ = ident.unwrap();
assert_eq!(vec!["a"], ident_.path);
assert_eq!("b", ident_.name);
}
#[test]
fn test_inherit_ident() {
let expr = "let inherit (a) b; in b";
let root = rnix::parse(expr).node();
let ident = ident_at(&root, 17);
assert!(ident.is_some());
let ident_ = ident.unwrap();
assert_eq!(vec!["a"], ident_.path);
}
#[test]
fn test_ident_attr_path() {
let expr = "a.b";
let root = rnix::parse(expr).node();
let ident = ident_at(&root, 2);
assert!(ident.is_some());
let ident_ = ident.unwrap();
assert_eq!(vec!["a"], ident_.path);
}
}
| 32.072165 | 100 | 0.459724 |
f701e5a040f2e586d146ddfc76064a9aeb2f2fb1 | 2,310 | #![allow(unknown_lints)] // for clippy
use regex::RegexSet;
use rocket::{Outcome, Request};
use rocket::http::Status;
use rocket::request::{self, FromRequest};
use std::net::SocketAddr;
pub struct RequesterInfo<'a> {
pub remote: SocketAddr,
pub user_agent: Option<&'a str>,
pub uri: String,
}
impl<'a, 'r> FromRequest<'a, 'r> for RequesterInfo<'a> {
type Error = ();
fn from_request(req: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
let remote = if let Some(remote) = req.remote() {
remote
} else {
return Outcome::Failure((Status::InternalServerError, ()));
};
let user_agent = req.headers().get_one("User-Agent");
let request_info = RequesterInfo {
remote: remote,
user_agent: user_agent,
uri: req.uri().to_string(),
};
Outcome::Success(request_info)
}
}
/// `CliClient` checks if a known CLI client sends a "plain" text request
///
/// At least curl, httpie, wget send "Accept: */*" by default. This makes it difficult to dispatch the request. This
/// request guard tries to guess, if a request is plain text request by a CLI client. The the heuristic goes like this:
/// 1. Is this as known CLI client, cf. `KNOWN_CLI_CLIENTS`?
/// 2. If yes, is the default Accept header set, i.e., */* set?
/// 3. If yes, then this is a plain text request by a CLI client
/// 4. In any other case, the request is forwarded to higher ranked routes.
pub struct CliClientRequest<'a> {
pub user_agent_header: &'a str,
}
lazy_static! {
static ref KNOWN_CLI_CLIENTS: RegexSet = RegexSet::new(&[
r"curl",
r"HTTPie",
r"HTTP Library",
r"Wget",
]).unwrap();
}
#[allow(trivial_regex)]
impl<'a, 'r> FromRequest<'a, 'r> for CliClientRequest<'a> {
type Error = ();
fn from_request(req: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
let user_agent_header = req.headers().get_one("User-Agent");
let accept_header = req.headers().get_one("Accept");
match (user_agent_header, accept_header) {
(Some(uah), Some("*/*")) if KNOWN_CLI_CLIENTS.is_match(uah) => Outcome::Success(CliClientRequest { user_agent_header: uah }),
_ => Outcome::Forward(())
}
}
}
| 33 | 137 | 0.622078 |
0acf8ad9302a709b2fdebcf878037338d4f9010b | 1,031 | #[macro_use]
extern crate criterion;
extern crate futures;
extern crate rand;
extern crate schluesselwert;
extern crate tempdir;
extern crate tokio;
#[path = "../tests/common/mod.rs"]
mod common;
use schluesselwert::Client;
use common::*;
use criterion::Criterion;
use tokio::executor::current_thread;
use futures::{future, Future};
fn criterion_benchmark(c: &mut Criterion) {
c.bench_function("Set 50MB", |b| {
b.iter(move || {
let (nodes, listen_ports) = create_nodes(4, 20100);
let _nodes_map = setup_nodes(nodes, listen_ports.clone());
let nodes = listen_ports_to_socket_addrs(listen_ports);
let mut client = Client::new(nodes);
let data = generate_random_data_with_size(50 * 1024 * 1024);
current_thread::block_on_all(future::join_all(
data.iter().map(|(k, v)| client.set(k.clone(), v.clone())),
)).unwrap();
})
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| 25.146341 | 75 | 0.647915 |
f8eaf678f925d7bf8c63a0addc13ab611760f323 | 24,139 | use anyhow::{Context, Error};
use graph::blockchain::BlockchainKind;
use graph::data::subgraph::UnifiedMappingApiVersion;
use graph::firehose::endpoints::FirehoseNetworkEndpoints;
use graph::prelude::{
EthereumBlock, EthereumCallCache, LightEthereumBlock, LightEthereumBlockExt, StopwatchMetrics,
};
use graph::slog::debug;
use graph::{
blockchain::{
block_stream::{
BlockStreamEvent, BlockStreamMetrics, BlockWithTriggers, FirehoseError,
FirehoseMapper as FirehoseMapperTrait, TriggersAdapter as TriggersAdapterTrait,
},
firehose_block_stream::FirehoseBlockStream,
polling_block_stream::PollingBlockStream,
Block, BlockHash, BlockPtr, Blockchain, ChainHeadUpdateListener,
IngestorAdapter as IngestorAdapterTrait, IngestorError, TriggerFilter as _,
},
cheap_clone::CheapClone,
components::store::DeploymentLocator,
firehose::bstream,
log::factory::{ComponentLoggerConfig, ElasticComponentLoggerConfig},
prelude::{
async_trait, error, lazy_static, o, serde_json as json, web3::types::H256, BlockNumber,
ChainStore, EthereumBlockWithCalls, Future01CompatExt, Logger, LoggerFactory,
MetricsRegistry, NodeId, SubgraphStore,
},
};
use prost::Message;
use std::collections::HashSet;
use std::iter::FromIterator;
use std::sync::Arc;
use crate::data_source::DataSourceTemplate;
use crate::data_source::UnresolvedDataSourceTemplate;
use crate::RuntimeAdapter;
use crate::{
adapter::EthereumAdapter as _,
codec,
data_source::{DataSource, UnresolvedDataSource},
ethereum_adapter::{
blocks_with_triggers, get_calls, parse_block_triggers, parse_call_triggers,
parse_log_triggers,
},
SubgraphEthRpcMetrics, TriggerFilter,
};
use crate::{network::EthereumNetworkAdapters, EthereumAdapter};
use graph::blockchain::block_stream::{BlockStream, FirehoseCursor};
lazy_static! {
/// Maximum number of blocks to request in each chunk.
static ref MAX_BLOCK_RANGE_SIZE: BlockNumber = std::env::var("GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE")
.unwrap_or("2000".into())
.parse::<BlockNumber>()
.expect("invalid GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE");
/// Ideal number of triggers in a range. The range size will adapt to try to meet this.
static ref TARGET_TRIGGERS_PER_BLOCK_RANGE: u64 = std::env::var("GRAPH_ETHEREUM_TARGET_TRIGGERS_PER_BLOCK_RANGE")
.unwrap_or("100".into())
.parse::<u64>()
.expect("invalid GRAPH_ETHEREUM_TARGET_TRIGGERS_PER_BLOCK_RANGE");
}
/// Celo Mainnet: 42220, Testnet Alfajores: 44787, Testnet Baklava: 62320
const CELO_CHAIN_IDS: [u64; 3] = [42220, 44787, 62320];
pub struct Chain {
logger_factory: LoggerFactory,
name: String,
node_id: NodeId,
registry: Arc<dyn MetricsRegistry>,
firehose_endpoints: Arc<FirehoseNetworkEndpoints>,
eth_adapters: Arc<EthereumNetworkAdapters>,
ancestor_count: BlockNumber,
chain_store: Arc<dyn ChainStore>,
call_cache: Arc<dyn EthereumCallCache>,
subgraph_store: Arc<dyn SubgraphStore>,
chain_head_update_listener: Arc<dyn ChainHeadUpdateListener>,
reorg_threshold: BlockNumber,
pub is_ingestible: bool,
}
impl std::fmt::Debug for Chain {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "chain: ethereum")
}
}
impl Chain {
pub fn new(
logger_factory: LoggerFactory,
name: String,
node_id: NodeId,
registry: Arc<dyn MetricsRegistry>,
chain_store: Arc<dyn ChainStore>,
call_cache: Arc<dyn EthereumCallCache>,
subgraph_store: Arc<dyn SubgraphStore>,
firehose_endpoints: FirehoseNetworkEndpoints,
eth_adapters: EthereumNetworkAdapters,
chain_head_update_listener: Arc<dyn ChainHeadUpdateListener>,
ancestor_count: BlockNumber,
reorg_threshold: BlockNumber,
is_ingestible: bool,
) -> Self {
Chain {
logger_factory,
name,
node_id,
registry,
firehose_endpoints: Arc::new(firehose_endpoints),
eth_adapters: Arc::new(eth_adapters),
ancestor_count,
chain_store,
call_cache,
subgraph_store,
chain_head_update_listener,
reorg_threshold,
is_ingestible,
}
}
}
#[async_trait]
impl Blockchain for Chain {
const KIND: BlockchainKind = BlockchainKind::Ethereum;
type Block = BlockFinality;
type DataSource = DataSource;
type UnresolvedDataSource = UnresolvedDataSource;
type DataSourceTemplate = DataSourceTemplate;
type UnresolvedDataSourceTemplate = UnresolvedDataSourceTemplate;
type TriggersAdapter = TriggersAdapter;
type TriggerData = crate::trigger::EthereumTrigger;
type MappingTrigger = crate::trigger::MappingTrigger;
type TriggerFilter = crate::adapter::TriggerFilter;
type NodeCapabilities = crate::capabilities::NodeCapabilities;
type IngestorAdapter = IngestorAdapter;
type RuntimeAdapter = RuntimeAdapter;
fn triggers_adapter(
&self,
loc: &DeploymentLocator,
capabilities: &Self::NodeCapabilities,
unified_api_version: UnifiedMappingApiVersion,
stopwatch_metrics: StopwatchMetrics,
) -> Result<Arc<Self::TriggersAdapter>, Error> {
let logger = self
.logger_factory
.subgraph_logger(&loc)
.new(o!("component" => "BlockStream"));
let eth_adapter = if capabilities.traces && self.firehose_endpoints.len() > 0 {
debug!(logger, "Removing 'traces' capability requirement for adapter as FirehoseBlockStream will provide the traces");
let adjusted_capabilities = crate::capabilities::NodeCapabilities {
archive: capabilities.archive,
traces: false,
};
self.eth_adapters
.cheapest_with(&adjusted_capabilities)?
.clone()
} else {
self.eth_adapters.cheapest_with(capabilities)?.clone()
};
let ethrpc_metrics = Arc::new(SubgraphEthRpcMetrics::new(self.registry.clone(), &loc.hash));
let adapter = TriggersAdapter {
logger,
ethrpc_metrics,
eth_adapter,
stopwatch_metrics,
chain_store: self.chain_store.cheap_clone(),
unified_api_version,
};
Ok(Arc::new(adapter))
}
async fn new_firehose_block_stream(
&self,
deployment: DeploymentLocator,
start_blocks: Vec<BlockNumber>,
firehose_cursor: Option<String>,
filter: Arc<Self::TriggerFilter>,
metrics: Arc<BlockStreamMetrics>,
unified_api_version: UnifiedMappingApiVersion,
) -> Result<Box<dyn BlockStream<Self>>, Error> {
let requirements = filter.node_capabilities();
let adapter = self
.triggers_adapter(
&deployment,
&requirements,
unified_api_version.clone(),
metrics.stopwatch.clone(),
)
.expect(&format!(
"no adapter for network {} with capabilities {}",
self.name, requirements
));
let firehose_endpoint = match self.firehose_endpoints.random() {
Some(e) => e.clone(),
None => return Err(anyhow::format_err!("no firehose endpoint available",)),
};
let logger = self
.logger_factory
.subgraph_logger(&deployment)
.new(o!("component" => "FirehoseBlockStream"));
let firehose_mapper = Arc::new(FirehoseMapper {});
Ok(Box::new(FirehoseBlockStream::new(
firehose_endpoint,
firehose_cursor,
firehose_mapper,
adapter,
filter,
start_blocks,
logger,
)))
}
async fn new_polling_block_stream(
&self,
deployment: DeploymentLocator,
start_blocks: Vec<BlockNumber>,
subgraph_start_block: Option<BlockPtr>,
filter: Arc<Self::TriggerFilter>,
metrics: Arc<BlockStreamMetrics>,
unified_api_version: UnifiedMappingApiVersion,
) -> Result<Box<dyn BlockStream<Self>>, Error> {
let requirements = filter.node_capabilities();
let adapter = self
.triggers_adapter(
&deployment,
&requirements,
unified_api_version.clone(),
metrics.stopwatch.clone(),
)
.expect(&format!(
"no adapter for network {} with capabilities {}",
self.name, requirements
));
let logger = self
.logger_factory
.subgraph_logger(&deployment)
.new(o!("component" => "BlockStream"));
let chain_store = self.chain_store().clone();
let writable = self
.subgraph_store
.cheap_clone()
.writable(logger.clone(), deployment.id)
.await
.with_context(|| format!("no store for deployment `{}`", deployment.hash))?;
let chain_head_update_stream = self
.chain_head_update_listener
.subscribe(self.name.clone(), logger.clone());
// Special case: Detect Celo and set the threshold to 0, so that eth_getLogs is always used.
// This is ok because Celo blocks are always final. And we _need_ to do this because
// some events appear only in eth_getLogs but not in transaction receipts.
// See also ca0edc58-0ec5-4c89-a7dd-2241797f5e50.
let chain_id = self.eth_adapters.cheapest().unwrap().chain_id().await?;
let reorg_threshold = match CELO_CHAIN_IDS.contains(&chain_id) {
false => self.reorg_threshold,
true => 0,
};
Ok(Box::new(PollingBlockStream::new(
writable,
chain_store,
chain_head_update_stream,
adapter,
self.node_id.clone(),
deployment.hash,
filter,
start_blocks,
reorg_threshold,
logger,
metrics,
*MAX_BLOCK_RANGE_SIZE,
*TARGET_TRIGGERS_PER_BLOCK_RANGE,
unified_api_version,
subgraph_start_block,
)))
}
fn ingestor_adapter(&self) -> Arc<Self::IngestorAdapter> {
let eth_adapter = self.eth_adapters.cheapest().unwrap().clone();
let logger = self
.logger_factory
.component_logger(
"BlockIngestor",
Some(ComponentLoggerConfig {
elastic: Some(ElasticComponentLoggerConfig {
index: String::from("block-ingestor-logs"),
}),
}),
)
.new(o!("provider" => eth_adapter.provider().to_string()));
let adapter = IngestorAdapter {
eth_adapter,
logger,
ancestor_count: self.ancestor_count,
chain_store: self.chain_store.clone(),
};
Arc::new(adapter)
}
fn chain_store(&self) -> Arc<dyn ChainStore> {
self.chain_store.clone()
}
async fn block_pointer_from_number(
&self,
logger: &Logger,
number: BlockNumber,
) -> Result<BlockPtr, IngestorError> {
let eth_adapter = self
.eth_adapters
.cheapest()
.with_context(|| format!("no adapter for chain {}", self.name))?
.clone();
eth_adapter
.block_pointer_from_number(logger, number)
.compat()
.await
}
fn runtime_adapter(&self) -> Arc<Self::RuntimeAdapter> {
Arc::new(RuntimeAdapter {
eth_adapters: self.eth_adapters.cheap_clone(),
call_cache: self.call_cache.cheap_clone(),
})
}
fn is_firehose_supported(&self) -> bool {
self.firehose_endpoints.len() > 0
}
}
/// This is used in `EthereumAdapter::triggers_in_block`, called when re-processing a block for
/// newly created data sources. This allows the re-processing to be reorg safe without having to
/// always fetch the full block data.
#[derive(Clone, Debug)]
pub enum BlockFinality {
/// If a block is final, we only need the header and the triggers.
Final(Arc<LightEthereumBlock>),
// If a block may still be reorged, we need to work with more local data.
NonFinal(EthereumBlockWithCalls),
}
impl BlockFinality {
pub(crate) fn light_block(&self) -> Arc<LightEthereumBlock> {
match self {
BlockFinality::Final(block) => block.cheap_clone(),
BlockFinality::NonFinal(block) => block.ethereum_block.block.cheap_clone(),
}
}
}
impl<'a> From<&'a BlockFinality> for BlockPtr {
fn from(block: &'a BlockFinality) -> BlockPtr {
match block {
BlockFinality::Final(b) => BlockPtr::from(&**b),
BlockFinality::NonFinal(b) => BlockPtr::from(&b.ethereum_block),
}
}
}
impl Block for BlockFinality {
fn ptr(&self) -> BlockPtr {
match self {
BlockFinality::Final(block) => block.block_ptr(),
BlockFinality::NonFinal(block) => block.ethereum_block.block.block_ptr(),
}
}
fn parent_ptr(&self) -> Option<BlockPtr> {
match self {
BlockFinality::Final(block) => block.parent_ptr(),
BlockFinality::NonFinal(block) => block.ethereum_block.block.parent_ptr(),
}
}
fn data(&self) -> Result<json::Value, json::Error> {
// The serialization here very delicately depends on how the
// `ChainStore`'s `blocks` and `ancestor_block` return the data we
// store here. This should be fixed in a better way to ensure we
// serialize/deserialize appropriately.
//
// Commit #d62e9846 inadvertently introduced a variation in how
// chain stores store ethereum blocks in that they now sometimes
// store an `EthereumBlock` that has a `block` field with a
// `LightEthereumBlock`, and sometimes they just store the
// `LightEthereumBlock` directly. That causes issues because the
// code reading from the chain store always expects the JSON data to
// have the form of an `EthereumBlock`.
//
// Even though this bug is fixed now and we always use the
// serialization of an `EthereumBlock`, there are still chain stores
// in existence that used the old serialization form, and we need to
// deal with that when deserializing
//
// see also 7736e440-4c6b-11ec-8c4d-b42e99f52061
match self {
BlockFinality::Final(block) => {
let eth_block = EthereumBlock {
block: block.clone(),
transaction_receipts: vec![],
};
json::to_value(eth_block)
}
BlockFinality::NonFinal(block) => json::to_value(&block.ethereum_block),
}
}
}
pub struct DummyDataSourceTemplate;
pub struct TriggersAdapter {
logger: Logger,
ethrpc_metrics: Arc<SubgraphEthRpcMetrics>,
stopwatch_metrics: StopwatchMetrics,
chain_store: Arc<dyn ChainStore>,
eth_adapter: Arc<EthereumAdapter>,
unified_api_version: UnifiedMappingApiVersion,
}
#[async_trait]
impl TriggersAdapterTrait<Chain> for TriggersAdapter {
async fn scan_triggers(
&self,
from: BlockNumber,
to: BlockNumber,
filter: &TriggerFilter,
) -> Result<Vec<BlockWithTriggers<Chain>>, Error> {
blocks_with_triggers(
self.eth_adapter.clone(),
self.logger.clone(),
self.chain_store.clone(),
self.ethrpc_metrics.clone(),
self.stopwatch_metrics.clone(),
from,
to,
filter,
self.unified_api_version.clone(),
)
.await
}
async fn triggers_in_block(
&self,
logger: &Logger,
block: BlockFinality,
filter: &TriggerFilter,
) -> Result<BlockWithTriggers<Chain>, Error> {
let block = get_calls(
self.eth_adapter.as_ref(),
logger.clone(),
self.ethrpc_metrics.clone(),
filter.requires_traces(),
block,
)
.await?;
match &block {
BlockFinality::Final(_) => {
let block_number = block.number() as BlockNumber;
let blocks = blocks_with_triggers(
self.eth_adapter.clone(),
logger.clone(),
self.chain_store.clone(),
self.ethrpc_metrics.clone(),
self.stopwatch_metrics.clone(),
block_number,
block_number,
filter,
self.unified_api_version.clone(),
)
.await?;
assert!(blocks.len() == 1);
Ok(blocks.into_iter().next().unwrap())
}
BlockFinality::NonFinal(full_block) => {
let mut triggers = Vec::new();
triggers.append(&mut parse_log_triggers(
&filter.log,
&full_block.ethereum_block,
));
triggers.append(&mut parse_call_triggers(&filter.call, &full_block)?);
triggers.append(&mut parse_block_triggers(&filter.block, &full_block));
Ok(BlockWithTriggers::new(block, triggers))
}
}
}
async fn is_on_main_chain(&self, ptr: BlockPtr) -> Result<bool, Error> {
self.eth_adapter
.is_on_main_chain(&self.logger, ptr.clone())
.await
}
fn ancestor_block(
&self,
ptr: BlockPtr,
offset: BlockNumber,
) -> Result<Option<BlockFinality>, Error> {
let block: Option<EthereumBlock> = self
.chain_store
.ancestor_block(ptr, offset)?
.map(json::from_value)
.transpose()?;
Ok(block.map(|block| {
BlockFinality::NonFinal(EthereumBlockWithCalls {
ethereum_block: block,
calls: None,
})
}))
}
async fn parent_ptr(&self, block: &BlockPtr) -> Result<Option<BlockPtr>, Error> {
use futures::stream::Stream;
use graph::prelude::LightEthereumBlockExt;
let blocks = self
.eth_adapter
.load_blocks(
self.logger.cheap_clone(),
self.chain_store.cheap_clone(),
HashSet::from_iter(Some(block.hash_as_h256())),
)
.collect()
.compat()
.await?;
assert_eq!(blocks.len(), 1);
Ok(blocks[0].parent_ptr())
}
}
pub struct FirehoseMapper {}
#[async_trait]
impl FirehoseMapperTrait<Chain> for FirehoseMapper {
async fn to_block_stream_event(
&self,
logger: &Logger,
response: &bstream::BlockResponseV2,
adapter: &TriggersAdapter,
filter: &TriggerFilter,
) -> Result<BlockStreamEvent<Chain>, FirehoseError> {
let step = bstream::ForkStep::from_i32(response.step).unwrap_or_else(|| {
panic!(
"unknown step i32 value {}, maybe you forgot update & re-regenerate the protobuf definitions?",
response.step
)
});
let any_block = response
.block
.as_ref()
.expect("block payload information should always be present");
// Right now, this is done in all cases but in reality, with how the BlockStreamEvent::Revert
// is defined right now, only block hash and block number is necessary. However, this information
// is not part of the actual bstream::BlockResponseV2 payload. As such, we need to decode the full
// block which is useless.
//
// Check about adding basic information about the block in the bstream::BlockResponseV2 or maybe
// define a slimmed down stuct that would decode only a few fields and ignore all the rest.
let block = codec::Block::decode(any_block.value.as_ref())?;
match step {
bstream::ForkStep::StepNew => {
let ethereum_block: EthereumBlockWithCalls = (&block).into();
let block_with_triggers = adapter
.triggers_in_block(logger, BlockFinality::NonFinal(ethereum_block), filter)
.await?;
Ok(BlockStreamEvent::ProcessBlock(
block_with_triggers,
FirehoseCursor::Some(response.cursor.clone()),
))
}
bstream::ForkStep::StepUndo => Ok(BlockStreamEvent::Revert(
BlockPtr {
hash: BlockHash::from(block.hash),
number: block.number as i32,
},
FirehoseCursor::Some(response.cursor.clone()),
Some(BlockPtr {
hash: BlockHash::from(block.header.unwrap().parent_hash),
number: (block.number.checked_sub(1).unwrap() as i32), // Will never receive undo on blocknum 0
}),
)),
bstream::ForkStep::StepIrreversible => {
unreachable!("irreversible step is not handled and should not be requested in the Firehose request")
}
bstream::ForkStep::StepUnknown => {
unreachable!("unknown step should not happen in the Firehose response")
}
}
}
}
pub struct IngestorAdapter {
logger: Logger,
ancestor_count: i32,
eth_adapter: Arc<EthereumAdapter>,
chain_store: Arc<dyn ChainStore>,
}
#[async_trait]
impl IngestorAdapterTrait<Chain> for IngestorAdapter {
fn logger(&self) -> &Logger {
&self.logger
}
fn ancestor_count(&self) -> BlockNumber {
self.ancestor_count
}
async fn latest_block(&self) -> Result<BlockPtr, IngestorError> {
self.eth_adapter
.latest_block_header(&self.logger)
.compat()
.await
.map(|block| block.into())
}
async fn ingest_block(
&self,
block_hash: &BlockHash,
) -> Result<Option<BlockHash>, IngestorError> {
// TODO: H256::from_slice can panic
let block_hash = H256::from_slice(block_hash.as_slice());
// Get the fully populated block
let block = self
.eth_adapter
.block_by_hash(&self.logger, block_hash)
.compat()
.await?
.ok_or_else(|| IngestorError::BlockUnavailable(block_hash))?;
let ethereum_block = self
.eth_adapter
.load_full_block(&self.logger, block)
.await?;
// We need something that implements `Block` to store the block; the
// store does not care whether the block is final or not
let ethereum_block = BlockFinality::NonFinal(EthereumBlockWithCalls {
ethereum_block,
calls: None,
});
// Store it in the database and try to advance the chain head pointer
self.chain_store
.upsert_block(Arc::new(ethereum_block))
.await?;
self.chain_store
.cheap_clone()
.attempt_chain_head_update(self.ancestor_count)
.await
.map(|missing| missing.map(|h256| h256.into()))
.map_err(|e| {
error!(self.logger, "failed to update chain head");
IngestorError::Unknown(e)
})
}
fn chain_head_ptr(&self) -> Result<Option<BlockPtr>, Error> {
self.chain_store.chain_head_ptr()
}
fn cleanup_cached_blocks(&self) -> Result<Option<(i32, usize)>, Error> {
self.chain_store.cleanup_cached_blocks(self.ancestor_count)
}
}
| 34.484286 | 130 | 0.598244 |
26991711c9753eb80234351b427e70944b297763 | 7,727 | use std::{sync::{atomic::{AtomicBool, Ordering}, Arc}, thread, fmt::Display};
use itertools::Itertools;
#[derive(Clone)]
struct Digit{
d: [i64;14],
dset: [SetState;14],
input: Vec<Instruction>,
}
#[derive(Clone, Copy)]
enum SetState {
Set,
Unset
}
impl SetState {
fn val(&self) -> u8 {
match self {
SetState::Set => 1,
SetState::Unset => 0,
}
}
}
impl Digit {
fn new(d: [i64;14], input: Vec<Instruction>) -> Self {
Self{d, dset: [SetState::Unset; 14],input}
}
fn tryout(&mut self, i: usize) {
let g = self.d[i];
for f in 0..9 {
self.d[i] = f;
let mut alu = ALU::new(self.d);
if let Some(r) = alu.execute_all(&self.input).ok() {
println!("{} -> {}", f, r);
}
}
}
fn set(&mut self, i: usize) {
let mut min = i64::max_value();
let mut mc = 0;
let mut e = 0;
for f in 0..9 {
self.d[i] = f;
let mut alu = ALU::new(self.d);
if let Some(r) = alu.execute_all(&self.input).ok() {
if r < min {
min = r;
mc = 0;
e = f;
} else if r == min {
mc += 1;
}
}
}
if mc == 0 {
self.dset[i] = SetState::Set;
self.d[i] = e;
} else {
self.d[i] = 8;
}
}
fn set_all(&mut self) {
let mut i = 14;
while i > 0 {
self.set(i-1);
i -= 1
}
}
fn increment(&mut self) -> bool {
let mut i = 13;
loop {
match self.dset[i] {
SetState::Set if i == 0 => {
return false;
},
SetState::Set => {
i -= 1;
continue;
},
_ => ()
}
self.d[i] -= 1;
if self.d[i] < 0 {
self.d[i] = 8;
i -= 1;
} else {
break;
}
}
true
}
fn calc(&self) -> Option<i64> {
let mut alu = ALU::new(self.d);
alu.execute_all(&self.input).ok()
}
}
impl Display for Digit {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}{}{}{}{}{}{}{}{}{}{}{}{}{}\n{}{}{}{}{}{}{}{}{}{}{}{}{}{}",
self.d[0]+1,
self.d[1]+1,
self.d[2]+1,
self.d[3]+1,
self.d[4]+1,
self.d[5]+1,
self.d[6]+1,
self.d[7]+1,
self.d[8]+1,
self.d[9]+1,
self.d[10]+1,
self.d[11]+1,
self.d[12]+1,
self.d[13]+1,
self.dset[0].val(),
self.dset[1].val(),
self.dset[2].val(),
self.dset[3].val(),
self.dset[4].val(),
self.dset[5].val(),
self.dset[6].val(),
self.dset[7].val(),
self.dset[8].val(),
self.dset[9].val(),
self.dset[10].val(),
self.dset[11].val(),
self.dset[12].val(),
self.dset[13].val(),)
}
}
#[derive(Clone, Copy)]
pub enum Instruction {
Inp(Reference),
Add(Reference, Reference),
Mul(Reference, Reference),
Div(Reference, Reference),
Mod(Reference, Reference),
Eql(Reference, Reference),
}
impl Instruction {
fn parse(s: &str) -> Instruction {
let mut split = s.trim().split_whitespace();
let instr = split.next().unwrap();
match instr {
"inp" => Instruction::Inp(Reference::parse(split.next().unwrap())),
"add" => Instruction::Add(Reference::parse(split.next().unwrap()),Reference::parse(split.next().unwrap())),
"mul" => Instruction::Mul(Reference::parse(split.next().unwrap()),Reference::parse(split.next().unwrap())),
"div" => Instruction::Div(Reference::parse(split.next().unwrap()),Reference::parse(split.next().unwrap())),
"mod" => Instruction::Mod(Reference::parse(split.next().unwrap()),Reference::parse(split.next().unwrap())),
"eql" => Instruction::Eql(Reference::parse(split.next().unwrap()),Reference::parse(split.next().unwrap())),
_ => panic!("impossibru")
}
}
}
#[derive(Clone, Copy)]
pub enum Reference{
W,
X,
Y,
Z,
Num(i64)
}
type Result<T> = std::result::Result<T, usize>;
impl Reference {
fn parse(s: &str) -> Reference {
match s {
"w" => Reference::W,
"x" => Reference::X,
"y" => Reference::Y,
"z" => Reference::Z,
x => Reference::Num(i64::from_str_radix(x, 10).unwrap()),
}
}
}
pub struct ALU {
w: i64,
x: i64,
y: i64,
z: i64,
buf: [i64;14],
index: usize,
}
impl ALU {
fn new(buf: [i64;14]) -> Self {
Self{w: 0, x: 0, y: 0, z: 0, buf, index: 0}
}
fn getn(&self, r: &Reference) -> i64{
match r {
Reference::W => self.w,
Reference::X => self.x,
Reference::Y => self.y,
Reference::Z => self.z,
Reference::Num(n) => *n
}
}
fn setn(&mut self, r: &Reference, n: i64) -> Result<()>{
match r {
Reference::W => self.w = n,
Reference::X => self.x = n,
Reference::Y => self.y = n,
Reference::Z => self.z = n,
Reference::Num(_) => return Err(1)
}
Ok(())
}
fn execute_instr(&mut self, instr: &Instruction) -> Result<()>{
match instr {
Instruction::Inp(r) => {
let i = self.buf[self.index]+1;
self.index += 1;
self.setn(r, i)?
},
Instruction::Add(a, b) => {
let i = self.getn(a) + self.getn(b);
self.setn(a, i)?;
},
Instruction::Mul(a, b) => {
let i = self.getn(a) * self.getn(b);
self.setn(a, i)?;
},
Instruction::Div(a, b) => {
let i = ((self.getn(a) as f64 )/(self.getn(b) as f64).floor()) as i64;
self.setn(a, i)?;
},
Instruction::Mod(a, b) => {
let i = self.getn(a).checked_rem(self.getn(b)).ok_or(2usize)?;
self.setn(a, i)?;
},
Instruction::Eql(a, b) => {
let i = (self.getn(a) == self.getn(b)) as i64;
self.setn(a, i)?;
},
}
Ok(())
}
fn execute_all(&mut self, instructions: &[Instruction]) -> Result<i64> {
for inst in instructions {
self.execute_instr(inst)?
}
Ok(self.z)
}
}
pub fn input_generator(input: &str) -> Vec<Instruction> {
input
.lines()
.map(|l| Instruction::parse(l))
.collect()
}
pub fn solve_part1(input: &[Instruction]) -> i64 {
let mut d = Digit::new([8,8,8,8,8,8,8,8,8,8,8,8,8,8],input.to_owned());
println!("{}", d);
d.tryout(1);
// d.set_all();
// loop {
// // println!("{}\n", d);
// match d.calc() {
// Some(0) => {
// println!("{}", d);
// return 0;
// },
// Some(x) => {
// // println!("{} -> {}", d, x)
// }
// None => ()
// }
// if !d.increment() {
// break;
// }
// }
1337
}
pub fn solve_part2(_input: &[Instruction]) -> usize {
0
}
| 26.644828 | 119 | 0.413873 |
f549daa0f2ebe5dad7821242e8d2ae93dd046856 | 1,177 | use log::{Level, Metadata, Record};
use std::time::Instant;
pub struct Logger {
level: Level,
start_time: Instant,
}
impl Logger {
pub fn init_with_level(level: Level) {
let logger = Logger {
level,
start_time: Instant::now(),
};
log::set_boxed_logger(Box::new(logger)).unwrap();
log::set_max_level(level.to_level_filter());
log::debug!("Logger initialized");
}
pub fn short_level_name(level: Level) -> &'static str {
return match level {
Level::Error => "E",
Level::Warn => "W",
Level::Info => "I",
Level::Debug => "D",
Level::Trace => "T",
};
}
}
impl log::Log for Logger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= self.level
}
fn log(&self, record: &Record) {
if self.enabled(record.metadata()) {
println!(
"[{} {}] {}",
self.start_time.elapsed().as_millis(),
Logger::short_level_name(record.level()),
record.args()
);
}
}
fn flush(&self) {}
}
| 24.020408 | 59 | 0.496177 |
18265e2958b47a1c4e12a16c991d736d28652ac1 | 150 | // Copyright 2021 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
pub mod balance;
pub mod client_builder;
pub mod full_node_api;
pub mod mqtt;
| 18.75 | 38 | 0.773333 |
7240275c8dc7b4e39b0dd36d9bcfb23889a4fd07 | 790 | // errors4.rs
// Make this test pass! Execute `rustlings hint errors4` for hints :)
#[derive(PartialEq, Debug)]
struct PositiveNonzeroInteger(u64);
#[derive(PartialEq, Debug)]
enum CreationError {
Negative,
Zero,
}
impl PositiveNonzeroInteger {
fn new(value: i64) -> Result<PositiveNonzeroInteger, CreationError> {
if value > 0 {
Ok(PositiveNonzeroInteger(value as u64))
} else if value == 0 {
Err(CreationError::Zero)
} else {Err(CreationError::Negative)}
}
}
#[test]
fn test_creation() {
assert!(PositiveNonzeroInteger::new(10).is_ok());
assert_eq!(
Err(CreationError::Negative),
PositiveNonzeroInteger::new(-10)
);
assert_eq!(Err(CreationError::Zero), PositiveNonzeroInteger::new(0));
}
| 24.6875 | 73 | 0.650633 |
9b3b31c6cddeeaf12c410555044f7c9c3cbec97b | 1,085 | use deribit::models::{HeartbeatType, SetHeartbeatRequest, SubscriptionParams, TestRequest};
use deribit::DeribitBuilder;
use dotenv::dotenv;
use env_logger::init;
use failure::Fallible;
use futures::StreamExt;
#[tokio::main]
async fn main() -> Fallible<()> {
let _ = dotenv();
init();
let drb = DeribitBuilder::default().testnet(true).build().unwrap();
let (mut client, mut subscription) = drb.connect().await?;
let resp = client.call(SetHeartbeatRequest::with_interval(10)).await?;
println!("Hearbet response {:?}", resp.await?);
while let Some(Ok(sub)) = subscription.next().await {
if sub.is_heartbeat() {
match sub.params {
SubscriptionParams::Heartbeat { r#type: ty } => match ty {
HeartbeatType::TestRequest => {
println!("Test Requested");
client.call(TestRequest::default()).await?;
}
_ => println!("Heartbeat"),
},
_ => {}
}
}
}
Ok(())
}
| 29.324324 | 91 | 0.55023 |
4b72e4e903bd820c010722e87374ff60c0314849 | 14,500 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::FS_HCCHAR2 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct MPSIZR {
bits: u16,
}
impl MPSIZR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct EPNUMR {
bits: u8,
}
impl EPNUMR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct EPDIRR {
bits: bool,
}
impl EPDIRR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct LSDEVR {
bits: bool,
}
impl LSDEVR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct EPTYPR {
bits: u8,
}
impl EPTYPR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct MCNTR {
bits: u8,
}
impl MCNTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct DADR {
bits: u8,
}
impl DADR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct ODDFRMR {
bits: bool,
}
impl ODDFRMR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CHDISR {
bits: bool,
}
impl CHDISR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CHENAR {
bits: bool,
}
impl CHENAR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Proxy"]
pub struct _MPSIZW<'a> {
w: &'a mut W,
}
impl<'a> _MPSIZW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 2047;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _EPNUMW<'a> {
w: &'a mut W,
}
impl<'a> _EPNUMW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 15;
const OFFSET: u8 = 11;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _EPDIRW<'a> {
w: &'a mut W,
}
impl<'a> _EPDIRW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 15;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _LSDEVW<'a> {
w: &'a mut W,
}
impl<'a> _LSDEVW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 17;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _EPTYPW<'a> {
w: &'a mut W,
}
impl<'a> _EPTYPW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 18;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _MCNTW<'a> {
w: &'a mut W,
}
impl<'a> _MCNTW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 20;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _DADW<'a> {
w: &'a mut W,
}
impl<'a> _DADW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 127;
const OFFSET: u8 = 22;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _ODDFRMW<'a> {
w: &'a mut W,
}
impl<'a> _ODDFRMW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 29;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CHDISW<'a> {
w: &'a mut W,
}
impl<'a> _CHDISW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 30;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CHENAW<'a> {
w: &'a mut W,
}
impl<'a> _CHENAW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 31;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:10 - Maximum packet size"]
#[inline]
pub fn mpsiz(&self) -> MPSIZR {
let bits = {
const MASK: u16 = 2047;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
MPSIZR { bits }
}
#[doc = "Bits 11:14 - Endpoint number"]
#[inline]
pub fn epnum(&self) -> EPNUMR {
let bits = {
const MASK: u8 = 15;
const OFFSET: u8 = 11;
((self.bits >> OFFSET) & MASK as u32) as u8
};
EPNUMR { bits }
}
#[doc = "Bit 15 - Endpoint direction"]
#[inline]
pub fn epdir(&self) -> EPDIRR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 15;
((self.bits >> OFFSET) & MASK as u32) != 0
};
EPDIRR { bits }
}
#[doc = "Bit 17 - Low-speed device"]
#[inline]
pub fn lsdev(&self) -> LSDEVR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 17;
((self.bits >> OFFSET) & MASK as u32) != 0
};
LSDEVR { bits }
}
#[doc = "Bits 18:19 - Endpoint type"]
#[inline]
pub fn eptyp(&self) -> EPTYPR {
let bits = {
const MASK: u8 = 3;
const OFFSET: u8 = 18;
((self.bits >> OFFSET) & MASK as u32) as u8
};
EPTYPR { bits }
}
#[doc = "Bits 20:21 - Multicount"]
#[inline]
pub fn mcnt(&self) -> MCNTR {
let bits = {
const MASK: u8 = 3;
const OFFSET: u8 = 20;
((self.bits >> OFFSET) & MASK as u32) as u8
};
MCNTR { bits }
}
#[doc = "Bits 22:28 - Device address"]
#[inline]
pub fn dad(&self) -> DADR {
let bits = {
const MASK: u8 = 127;
const OFFSET: u8 = 22;
((self.bits >> OFFSET) & MASK as u32) as u8
};
DADR { bits }
}
#[doc = "Bit 29 - Odd frame"]
#[inline]
pub fn oddfrm(&self) -> ODDFRMR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 29;
((self.bits >> OFFSET) & MASK as u32) != 0
};
ODDFRMR { bits }
}
#[doc = "Bit 30 - Channel disable"]
#[inline]
pub fn chdis(&self) -> CHDISR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 30;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CHDISR { bits }
}
#[doc = "Bit 31 - Channel enable"]
#[inline]
pub fn chena(&self) -> CHENAR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 31;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CHENAR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:10 - Maximum packet size"]
#[inline]
pub fn mpsiz(&mut self) -> _MPSIZW {
_MPSIZW { w: self }
}
#[doc = "Bits 11:14 - Endpoint number"]
#[inline]
pub fn epnum(&mut self) -> _EPNUMW {
_EPNUMW { w: self }
}
#[doc = "Bit 15 - Endpoint direction"]
#[inline]
pub fn epdir(&mut self) -> _EPDIRW {
_EPDIRW { w: self }
}
#[doc = "Bit 17 - Low-speed device"]
#[inline]
pub fn lsdev(&mut self) -> _LSDEVW {
_LSDEVW { w: self }
}
#[doc = "Bits 18:19 - Endpoint type"]
#[inline]
pub fn eptyp(&mut self) -> _EPTYPW {
_EPTYPW { w: self }
}
#[doc = "Bits 20:21 - Multicount"]
#[inline]
pub fn mcnt(&mut self) -> _MCNTW {
_MCNTW { w: self }
}
#[doc = "Bits 22:28 - Device address"]
#[inline]
pub fn dad(&mut self) -> _DADW {
_DADW { w: self }
}
#[doc = "Bit 29 - Odd frame"]
#[inline]
pub fn oddfrm(&mut self) -> _ODDFRMW {
_ODDFRMW { w: self }
}
#[doc = "Bit 30 - Channel disable"]
#[inline]
pub fn chdis(&mut self) -> _CHDISW {
_CHDISW { w: self }
}
#[doc = "Bit 31 - Channel enable"]
#[inline]
pub fn chena(&mut self) -> _CHENAW {
_CHENAW { w: self }
}
}
| 25.663717 | 60 | 0.470138 |
f84a089910e8a072f63c236bc586697997171ff6 | 30,371 | //!
//! Cargo compile currently does the following steps:
//!
//! All configurations are already injected as environment variables via the
//! main cargo command
//!
//! 1. Read the manifest
//! 2. Shell out to `cargo-resolve` with a list of dependencies and sources as
//! stdin
//!
//! a. Shell out to `--do update` and `--do list` for each source
//! b. Resolve dependencies and return a list of name/version/source
//!
//! 3. Shell out to `--do download` for each source
//! 4. Shell out to `--do get` for each source, and build up the list of paths
//! to pass to rustc -L
//! 5. Call `cargo-rustc` with the results of the resolver zipped together with
//! the results of the `get`
//!
//! a. Topologically sort the dependencies
//! b. Compile each dependency in order, passing in the -L's pointing at each
//! previously compiled dependency
//!
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
use std::sync::Arc;
use core::compiler::{BuildConfig, BuildContext, Compilation, Context, DefaultExecutor, Executor};
use core::compiler::{CompileMode, Kind, Unit};
use core::profiles::{ProfileFor, Profiles};
use core::resolver::{Method, Resolve};
use core::{Package, Source, Target};
use core::{PackageId, PackageIdSpec, TargetKind, Workspace};
use ops;
use util::config::Config;
use util::{errors::WorkspaceMemberError, lev_distance, profile, CargoResult};
/// Contains information about how a package should be compiled.
#[derive(Debug)]
pub struct CompileOptions<'a> {
pub config: &'a Config,
/// Configuration information for a rustc build
pub build_config: BuildConfig,
/// Extra features to build for the root package
pub features: Vec<String>,
/// Flag whether all available features should be built for the root package
pub all_features: bool,
/// Flag if the default feature should be built for the root package
pub no_default_features: bool,
/// A set of packages to build.
pub spec: Packages,
/// Filter to apply to the root package to select which targets will be
/// built.
pub filter: CompileFilter,
/// Extra arguments to be passed to rustdoc (single target only)
pub target_rustdoc_args: Option<Vec<String>>,
/// The specified target will be compiled with all the available arguments,
/// note that this only accounts for the *final* invocation of rustc
pub target_rustc_args: Option<Vec<String>>,
/// Extra arguments passed to all selected targets for rustdoc.
pub local_rustdoc_args: Option<Vec<String>>,
/// The directory to copy final artifacts to. Note that even if `out_dir` is
/// set, a copy of artifacts still could be found a `target/(debug\release)`
/// as usual.
// Note that, although the cmd-line flag name is `out-dir`, in code we use
// `export_dir`, to avoid confusion with out dir at `target/debug/deps`.
pub export_dir: Option<PathBuf>,
}
impl<'a> CompileOptions<'a> {
pub fn new(config: &'a Config, mode: CompileMode) -> CargoResult<CompileOptions<'a>> {
Ok(CompileOptions {
config,
build_config: BuildConfig::new(config, None, &None, mode)?,
features: Vec::new(),
all_features: false,
no_default_features: false,
spec: ops::Packages::Packages(Vec::new()),
filter: CompileFilter::Default {
required_features_filterable: false,
},
target_rustdoc_args: None,
target_rustc_args: None,
local_rustdoc_args: None,
export_dir: None,
})
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Packages {
Default,
All,
OptOut(Vec<String>),
Packages(Vec<String>),
}
impl Packages {
pub fn from_flags(all: bool, exclude: Vec<String>, package: Vec<String>) -> CargoResult<Self> {
Ok(match (all, exclude.len(), package.len()) {
(false, 0, 0) => Packages::Default,
(false, 0, _) => Packages::Packages(package),
(false, _, _) => bail!("--exclude can only be used together with --all"),
(true, 0, _) => Packages::All,
(true, _, _) => Packages::OptOut(exclude),
})
}
pub fn to_package_id_specs(&self, ws: &Workspace) -> CargoResult<Vec<PackageIdSpec>> {
let specs = match *self {
Packages::All => ws.members()
.map(Package::package_id)
.map(PackageIdSpec::from_package_id)
.collect(),
Packages::OptOut(ref opt_out) => ws.members()
.map(Package::package_id)
.map(PackageIdSpec::from_package_id)
.filter(|p| opt_out.iter().position(|x| *x == p.name()).is_none())
.collect(),
Packages::Packages(ref packages) if packages.is_empty() => {
vec![PackageIdSpec::from_package_id(ws.current()?.package_id())]
}
Packages::Packages(ref packages) => packages
.iter()
.map(|p| PackageIdSpec::parse(p))
.collect::<CargoResult<Vec<_>>>()?,
Packages::Default => ws.default_members()
.map(Package::package_id)
.map(PackageIdSpec::from_package_id)
.collect(),
};
if specs.is_empty() {
if ws.is_virtual() {
bail!(
"manifest path `{}` contains no package: The manifest is virtual, \
and the workspace has no members.",
ws.root().display()
)
}
bail!("no packages to compile")
}
Ok(specs)
}
pub fn get_packages<'ws>(&self, ws: &'ws Workspace) -> CargoResult<Vec<&'ws Package>> {
let packages: Vec<_> = match self {
Packages::Default => ws.default_members().collect(),
Packages::All => ws.members().collect(),
Packages::OptOut(ref opt_out) => ws
.members()
.filter(|pkg| !opt_out.iter().any(|name| pkg.name().as_str() == name))
.collect(),
Packages::Packages(ref pkgs) => pkgs
.iter()
.map(|name| {
ws.members()
.find(|pkg| pkg.name().as_str() == name)
.ok_or_else(|| {
format_err!("package `{}` is not a member of the workspace", name)
})
}).collect::<CargoResult<Vec<_>>>()?,
};
Ok(packages)
}
}
#[derive(Debug)]
pub enum FilterRule {
All,
Just(Vec<String>),
}
#[derive(Debug)]
pub enum CompileFilter {
Default {
/// Flag whether targets can be safely skipped when required-features are not satisfied.
required_features_filterable: bool,
},
Only {
all_targets: bool,
lib: bool,
bins: FilterRule,
examples: FilterRule,
tests: FilterRule,
benches: FilterRule,
},
}
pub fn compile<'a>(
ws: &Workspace<'a>,
options: &CompileOptions<'a>,
) -> CargoResult<Compilation<'a>> {
let exec: Arc<Executor> = Arc::new(DefaultExecutor);
compile_with_exec(ws, options, &exec)
}
/// Like `compile` but allows specifying a custom `Executor` that will be able to intercept build
/// calls and add custom logic. `compile` uses `DefaultExecutor` which just passes calls through.
pub fn compile_with_exec<'a>(
ws: &Workspace<'a>,
options: &CompileOptions<'a>,
exec: &Arc<Executor>,
) -> CargoResult<Compilation<'a>> {
ws.emit_warnings()?;
compile_ws(ws, None, options, exec).map_err(|err| {
// Add member manifest path if possible
let err_string = format!("{}", err);
if let Some(member) = ws
.members()
.find(|m| err_string.contains(&format!("`{}`", m.package_id())))
{
WorkspaceMemberError::new(err, member.manifest_path().into()).into()
} else {
err
}
})
}
pub fn compile_ws<'a>(
ws: &Workspace<'a>,
source: Option<Box<Source + 'a>>,
options: &CompileOptions<'a>,
exec: &Arc<Executor>,
) -> CargoResult<Compilation<'a>> {
let CompileOptions {
config,
ref build_config,
ref spec,
ref features,
all_features,
no_default_features,
ref filter,
ref target_rustdoc_args,
ref target_rustc_args,
ref local_rustdoc_args,
ref export_dir,
} = *options;
let default_arch_kind = if build_config.requested_target.is_some() {
Kind::Target
} else {
Kind::Host
};
let specs = spec.to_package_id_specs(ws)?;
let features = Method::split_features(features);
let method = Method::Required {
dev_deps: ws.require_optional_deps() || filter.need_dev_deps(build_config.mode),
features: &features,
all_features,
uses_default_features: !no_default_features,
};
let resolve = ops::resolve_ws_with_method(ws, source, method, &specs)?;
let (packages, resolve_with_overrides) = resolve;
let to_build_ids = specs.iter()
.map(|s| s.query(resolve_with_overrides.iter()))
.collect::<CargoResult<Vec<_>>>()?;
let mut to_builds = packages.get_many(to_build_ids)?;
// The ordering here affects some error messages coming out of cargo, so
// let's be test and CLI friendly by always printing in the same order if
// there's an error.
to_builds.sort_by_key(|p| p.package_id());
for pkg in to_builds.iter() {
pkg.manifest().print_teapot(ws.config());
}
let (extra_args, extra_args_name) = match (target_rustc_args, target_rustdoc_args) {
(&Some(ref args), _) => (Some(args.clone()), "rustc"),
(_, &Some(ref args)) => (Some(args.clone()), "rustdoc"),
_ => (None, ""),
};
if extra_args.is_some() && to_builds.len() != 1 {
panic!(
"`{}` should not accept multiple `-p` flags",
extra_args_name
);
}
let profiles = ws.profiles();
profiles.validate_packages(&mut config.shell(), &packages)?;
let units = generate_targets(
ws,
profiles,
&to_builds,
filter,
default_arch_kind,
&resolve_with_overrides,
build_config,
)?;
let mut extra_compiler_args = HashMap::new();
if let Some(args) = extra_args {
if units.len() != 1 {
bail!(
"extra arguments to `{}` can only be passed to one \
target, consider filtering\nthe package by passing \
e.g. `--lib` or `--bin NAME` to specify a single target",
extra_args_name
);
}
extra_compiler_args.insert(units[0], args);
}
if let Some(args) = local_rustdoc_args {
for unit in &units {
if unit.mode.is_doc() {
extra_compiler_args.insert(*unit, args.clone());
}
}
}
let ret = {
let _p = profile::start("compiling");
let bcx = BuildContext::new(
ws,
&resolve_with_overrides,
&packages,
config,
&build_config,
profiles,
extra_compiler_args,
)?;
let cx = Context::new(config, &bcx)?;
cx.compile(&units, export_dir.clone(), &exec)?
};
Ok(ret)
}
impl FilterRule {
pub fn new(targets: Vec<String>, all: bool) -> FilterRule {
if all {
FilterRule::All
} else {
FilterRule::Just(targets)
}
}
fn matches(&self, target: &Target) -> bool {
match *self {
FilterRule::All => true,
FilterRule::Just(ref targets) => targets.iter().any(|x| *x == target.name()),
}
}
fn is_specific(&self) -> bool {
match *self {
FilterRule::All => true,
FilterRule::Just(ref targets) => !targets.is_empty(),
}
}
pub fn try_collect(&self) -> Option<Vec<String>> {
match *self {
FilterRule::All => None,
FilterRule::Just(ref targets) => Some(targets.clone()),
}
}
}
impl CompileFilter {
pub fn new(
lib_only: bool,
bins: Vec<String>,
all_bins: bool,
tsts: Vec<String>,
all_tsts: bool,
exms: Vec<String>,
all_exms: bool,
bens: Vec<String>,
all_bens: bool,
all_targets: bool,
) -> CompileFilter {
let rule_bins = FilterRule::new(bins, all_bins);
let rule_tsts = FilterRule::new(tsts, all_tsts);
let rule_exms = FilterRule::new(exms, all_exms);
let rule_bens = FilterRule::new(bens, all_bens);
if all_targets {
CompileFilter::Only {
all_targets: true,
lib: true,
bins: FilterRule::All,
examples: FilterRule::All,
benches: FilterRule::All,
tests: FilterRule::All,
}
} else if lib_only || rule_bins.is_specific() || rule_tsts.is_specific()
|| rule_exms.is_specific() || rule_bens.is_specific()
{
CompileFilter::Only {
all_targets: false,
lib: lib_only,
bins: rule_bins,
examples: rule_exms,
benches: rule_bens,
tests: rule_tsts,
}
} else {
CompileFilter::Default {
required_features_filterable: true,
}
}
}
pub fn need_dev_deps(&self, mode: CompileMode) -> bool {
match mode {
CompileMode::Test | CompileMode::Doctest | CompileMode::Bench => true,
CompileMode::Build | CompileMode::Doc { .. } | CompileMode::Check { .. } => match *self
{
CompileFilter::Default { .. } => false,
CompileFilter::Only {
ref examples,
ref tests,
ref benches,
..
} => examples.is_specific() || tests.is_specific() || benches.is_specific(),
},
CompileMode::RunCustomBuild => panic!("Invalid mode"),
}
}
// this selects targets for "cargo run". for logic to select targets for
// other subcommands, see generate_targets and filter_default_targets
pub fn target_run(&self, target: &Target) -> bool {
match *self {
CompileFilter::Default { .. } => true,
CompileFilter::Only {
lib,
ref bins,
ref examples,
ref tests,
ref benches,
..
} => {
let rule = match *target.kind() {
TargetKind::Bin => bins,
TargetKind::Test => tests,
TargetKind::Bench => benches,
TargetKind::ExampleBin | TargetKind::ExampleLib(..) => examples,
TargetKind::Lib(..) => return lib,
TargetKind::CustomBuild => return false,
};
rule.matches(target)
}
}
}
pub fn is_specific(&self) -> bool {
match *self {
CompileFilter::Default { .. } => false,
CompileFilter::Only { .. } => true,
}
}
}
/// A proposed target.
///
/// Proposed targets are later filtered into actual Units based on whether or
/// not the target requires its features to be present.
#[derive(Debug)]
struct Proposal<'a> {
pkg: &'a Package,
target: &'a Target,
/// Indicates whether or not all required features *must* be present. If
/// false, and the features are not available, then it will be silently
/// skipped. Generally, targets specified by name (`--bin foo`) are
/// required, all others can be silently skipped if features are missing.
requires_features: bool,
mode: CompileMode,
}
/// Generates all the base targets for the packages the user has requested to
/// compile. Dependencies for these targets are computed later in
/// `unit_dependencies`.
fn generate_targets<'a>(
ws: &Workspace,
profiles: &Profiles,
packages: &[&'a Package],
filter: &CompileFilter,
default_arch_kind: Kind,
resolve: &Resolve,
build_config: &BuildConfig,
) -> CargoResult<Vec<Unit<'a>>> {
// Helper for creating a Unit struct.
let new_unit = |pkg: &'a Package, target: &'a Target, target_mode: CompileMode| {
let profile_for = if build_config.mode.is_any_test() {
// NOTE: The ProfileFor here is subtle. If you have a profile
// with `panic` set, the `panic` flag is cleared for
// tests/benchmarks and their dependencies. If we left this
// as an "Any" profile, then the lib would get compiled three
// times (once with panic, once without, and once with
// --test).
//
// This would cause a problem for Doc tests, which would fail
// because `rustdoc` would attempt to link with both libraries
// at the same time. Also, it's probably not important (or
// even desirable?) for rustdoc to link with a lib with
// `panic` set.
//
// As a consequence, Examples and Binaries get compiled
// without `panic` set. This probably isn't a bad deal.
//
// Forcing the lib to be compiled three times during `cargo
// test` is probably also not desirable.
ProfileFor::TestDependency
} else {
ProfileFor::Any
};
let target_mode = match target_mode {
CompileMode::Test => {
if target.is_example() && !filter.is_specific() && !target.tested() {
// Examples are included as regular binaries to verify
// that they compile.
CompileMode::Build
} else {
CompileMode::Test
}
}
CompileMode::Build => match *target.kind() {
TargetKind::Test => CompileMode::Test,
TargetKind::Bench => CompileMode::Bench,
_ => CompileMode::Build,
},
_ => target_mode,
};
// Plugins or proc-macro should be built for the host.
let kind = if target.for_host() {
Kind::Host
} else {
default_arch_kind
};
let profile = profiles.get_profile(
pkg.package_id(),
ws.is_member(pkg),
profile_for,
target_mode,
build_config.release,
);
// Once the profile has been selected for benchmarks, we don't need to
// distinguish between benches and tests. Switching the mode allows
// de-duplication of units that are essentially identical. For
// example, `cargo build --all-targets --release` creates the units
// (lib profile:bench, mode:test) and (lib profile:bench, mode:bench)
// and since these are the same, we want them to be de-duped in
// `unit_dependencies`.
let target_mode = match target_mode {
CompileMode::Bench => CompileMode::Test,
_ => target_mode,
};
Unit {
pkg,
target,
profile,
kind,
mode: target_mode,
}
};
// Create a list of proposed targets.
let mut proposals: Vec<Proposal> = Vec::new();
match *filter {
CompileFilter::Default {
required_features_filterable,
} => {
for pkg in packages {
let default = filter_default_targets(pkg.targets(), build_config.mode);
proposals.extend(default.into_iter().map(|target| Proposal {
pkg,
target,
requires_features: !required_features_filterable,
mode: build_config.mode,
}));
if build_config.mode == CompileMode::Test {
if let Some(t) = pkg
.targets()
.iter()
.find(|t| t.is_lib() && t.doctested() && t.doctestable())
{
proposals.push(Proposal {
pkg,
target: t,
requires_features: false,
mode: CompileMode::Doctest,
});
}
}
}
}
CompileFilter::Only {
all_targets,
lib,
ref bins,
ref examples,
ref tests,
ref benches,
} => {
if lib {
let mut libs = Vec::new();
for pkg in packages {
for target in pkg.targets().iter().filter(|t| t.is_lib()) {
if build_config.mode == CompileMode::Doctest && !target.doctestable() {
ws.config()
.shell()
.warn(format!(
"doc tests are not supported for crate type(s) `{}` in package `{}`",
target.rustc_crate_types().join(", "),
pkg.name()
))?;
} else {
libs.push(Proposal {
pkg,
target,
requires_features: false,
mode: build_config.mode,
});
}
}
}
if !all_targets && libs.is_empty() {
let names = packages.iter().map(|pkg| pkg.name()).collect::<Vec<_>>();
if names.len() == 1 {
bail!("no library targets found in package `{}`", names[0]);
} else {
bail!("no library targets found in packages: {}", names.join(", "));
}
}
proposals.extend(libs);
}
// If --tests was specified, add all targets that would be
// generated by `cargo test`.
let test_filter = match *tests {
FilterRule::All => Target::tested,
FilterRule::Just(_) => Target::is_test,
};
let test_mode = match build_config.mode {
CompileMode::Build => CompileMode::Test,
CompileMode::Check { .. } => CompileMode::Check { test: true },
_ => build_config.mode,
};
// If --benches was specified, add all targets that would be
// generated by `cargo bench`.
let bench_filter = match *benches {
FilterRule::All => Target::benched,
FilterRule::Just(_) => Target::is_bench,
};
let bench_mode = match build_config.mode {
CompileMode::Build => CompileMode::Bench,
CompileMode::Check { .. } => CompileMode::Check { test: true },
_ => build_config.mode,
};
proposals.extend(list_rule_targets(
packages,
bins,
"bin",
Target::is_bin,
build_config.mode,
)?);
proposals.extend(list_rule_targets(
packages,
examples,
"example",
Target::is_example,
build_config.mode,
)?);
proposals.extend(list_rule_targets(
packages,
tests,
"test",
test_filter,
test_mode,
)?);
proposals.extend(list_rule_targets(
packages,
benches,
"bench",
bench_filter,
bench_mode,
)?);
}
}
// Only include targets that are libraries or have all required
// features available.
let mut features_map = HashMap::new();
let mut units = HashSet::new();
for Proposal { pkg, target, requires_features, mode} in proposals {
let unavailable_features = match target.required_features() {
Some(rf) => {
let features = features_map
.entry(pkg)
.or_insert_with(|| resolve_all_features(resolve, pkg.package_id()));
rf.iter().filter(|f| !features.contains(*f)).collect()
}
None => Vec::new(),
};
if target.is_lib() || unavailable_features.is_empty() {
let unit = new_unit(pkg, target, mode);
units.insert(unit);
} else if requires_features {
let required_features = target.required_features().unwrap();
let quoted_required_features: Vec<String> = required_features
.iter()
.map(|s| format!("`{}`", s))
.collect();
bail!(
"target `{}` in package `{}` requires the features: {}\n\
Consider enabling them by passing e.g. `--features=\"{}\"`",
target.name(),
pkg.name(),
quoted_required_features.join(", "),
required_features.join(" ")
);
}
// else, silently skip target.
}
Ok(units.into_iter().collect())
}
fn resolve_all_features(
resolve_with_overrides: &Resolve,
package_id: &PackageId,
) -> HashSet<String> {
let mut features = resolve_with_overrides.features(package_id).clone();
// Include features enabled for use by dependencies so targets can also use them with the
// required-features field when deciding whether to be built or skipped.
for (dep, _) in resolve_with_overrides.deps(package_id) {
for feature in resolve_with_overrides.features(dep) {
features.insert(dep.name().to_string() + "/" + feature);
}
}
features
}
/// Given a list of all targets for a package, filters out only the targets
/// that are automatically included when the user doesn't specify any targets.
fn filter_default_targets(targets: &[Target], mode: CompileMode) -> Vec<&Target> {
match mode {
CompileMode::Bench => targets.iter().filter(|t| t.benched()).collect(),
CompileMode::Test => targets
.iter()
.filter(|t| t.tested() || t.is_example())
.collect(),
CompileMode::Build | CompileMode::Check { .. } => targets
.iter()
.filter(|t| t.is_bin() || t.is_lib())
.collect(),
CompileMode::Doc { .. } => {
// `doc` does lib and bins (bin with same name as lib is skipped).
targets
.iter()
.filter(|t| {
t.documented()
&& (!t.is_bin()
|| !targets.iter().any(|l| l.is_lib() && l.name() == t.name()))
})
.collect()
}
CompileMode::Doctest | CompileMode::RunCustomBuild => panic!("Invalid mode {:?}", mode),
}
}
/// Returns a list of targets based on command-line target selection flags.
/// The return value is a list of `(Package, Target, bool, CompileMode)`
/// tuples. The `bool` value indicates whether or not all required features
/// *must* be present.
fn list_rule_targets<'a>(
packages: &[&'a Package],
rule: &FilterRule,
target_desc: &'static str,
is_expected_kind: fn(&Target) -> bool,
mode: CompileMode,
) -> CargoResult<Vec<Proposal<'a>>> {
let mut result = Vec::new();
match *rule {
FilterRule::All => {
for pkg in packages {
for target in pkg.targets() {
if is_expected_kind(target) {
result.push(Proposal {
pkg,
target,
requires_features: false,
mode,
});
}
}
}
}
FilterRule::Just(ref names) => {
for name in names {
result.extend(find_named_targets(
packages,
name,
target_desc,
is_expected_kind,
mode,
)?);
}
}
}
Ok(result)
}
/// Find the targets for a specifically named target.
fn find_named_targets<'a>(
packages: &[&'a Package],
target_name: &str,
target_desc: &'static str,
is_expected_kind: fn(&Target) -> bool,
mode: CompileMode,
) -> CargoResult<Vec<Proposal<'a>>> {
let mut result = Vec::new();
for pkg in packages {
for target in pkg.targets() {
if target.name() == target_name && is_expected_kind(target) {
result.push(Proposal {
pkg,
target,
requires_features: true,
mode,
});
}
}
}
if result.is_empty() {
let suggestion = packages
.iter()
.flat_map(|pkg| {
pkg.targets()
.iter()
.filter(|target| is_expected_kind(target))
}).map(|target| (lev_distance(target_name, target.name()), target))
.filter(|&(d, _)| d < 4)
.min_by_key(|t| t.0)
.map(|t| t.1);
match suggestion {
Some(s) => bail!(
"no {} target named `{}`\n\nDid you mean `{}`?",
target_desc,
target_name,
s.name()
),
None => bail!("no {} target named `{}`", target_desc, target_name),
}
}
Ok(result)
}
| 35.356228 | 101 | 0.52155 |
22460b8c40c4add52afbc503d5e8e1b7fd7ea155 | 1,550 | use std::fmt::{self, Formatter};
#[derive(Debug)]
pub enum Error {
Qr(qr_code::types::QrError),
Address(bitcoin::util::address::Error),
Secp256k1(bitcoin::secp256k1::Error),
Miniscript(miniscript::Error),
Bmp(qr_code::bmp_monochrome::BmpError),
InvalidAddressType,
MissingChecksum,
MissingMappedKey(String),
OnlyPkh,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Error::Qr(e) => write!(f, "{:?}", e),
Error::Address(e) => write!(f, "{:?}", e),
Error::Miniscript(e) => write!(f, "{:?}", e),
Error::Secp256k1(e) => write!(f, "{:?}", e),
Error::Bmp(e) => write!(f, "{:?}", e),
Error::InvalidAddressType => write!(f, "Valid values: wpkh, wsh, pkh, shwpkh"),
Error::MissingMappedKey(s) => write!(f, "Missing mapped key for alias {}", s),
Error::OnlyPkh => write!(f, "Only *pkh address: wpkh, pkh, shwpkh"),
Error::MissingChecksum => write!(f, "Missing checksum"),
}
}
}
macro_rules! impl_error {
( $from:ty, $to:ident ) => {
impl std::convert::From<$from> for Error {
fn from(err: $from) -> Self {
Error::$to(err)
}
}
};
}
impl_error!(bitcoin::util::address::Error, Address);
impl_error!(miniscript::Error, Miniscript);
impl_error!(bitcoin::secp256k1::Error, Secp256k1);
impl_error!(qr_code::types::QrError, Qr);
impl_error!(qr_code::bmp_monochrome::BmpError, Bmp);
| 32.978723 | 91 | 0.566452 |
28cc7c8a9e4052ec3f1844d3c371dffbeae47acd | 7,310 | use netinfo::{Inode, Connection, TransportType};
use std::collections::{HashMap};
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::io::Cursor;
use std::net::{SocketAddr, Ipv4Addr, Ipv6Addr, IpAddr};
use std::str::FromStr;
use byteorder::*;
use netinfo::error::*;
/// This structure uses the tables in `/proc/net/tcp`, `/proc/net/tcp6`, `/proc/net/udp` and `/proc/net/udp6` files to generate a
/// `Connection -> Inode`-HashMap.
#[derive(Debug)]
pub struct ConnToInodeMap {
/// Each transport type (tcp, udp) has their own `Connection -> Inode` HashMap,
conn_to_inode_map: HashMap<(TransportType, Connection), Inode>,
}
impl ConnToInodeMap {
/// Constructor for `ConnToInodeMap`.
pub fn new() -> ConnToInodeMap {
ConnToInodeMap { conn_to_inode_map: HashMap::new() }
}
/// This function parses an adress of the form "DDCCBBAA:XXXX" or "IIHHGGEEDDCCBBAA:XXXX".
/// `bytes.len()` is the number of bytes in the adress (4 for ipv4; 16 for ipv6) and `bytes`
/// is used for output. The returned value is the port (the XXXX in the input).
///
/// Because the network format is big endian, the order of the bytes has to be reversed afterwards.
fn parse_ip_addr_to_bytes(s: &str, bytes: &mut [u8]) -> Result<u16> {
if s.len() != bytes.len() * 2 + 1 + 4 { return Err(ErrorKind::ProcNetFileHasWrongFormat)?; }
if s.chars().nth(bytes.len() * 2) != Some(':') { return Err(ErrorKind::ProcNetFileHasWrongFormat)?; }
for (i, byte) in bytes.iter_mut().enumerate() {
*byte = u8::from_str_radix(&s[i*2..i*2 + 2], 16).map_err(|_| ErrorKind::ProcNetFileHasWrongFormat)?;
}
let port_start = bytes.len() * 2 + 1;
let port = u16::from_str_radix(&s[port_start..port_start+4], 16).map_err(|_| ErrorKind::ProcNetFileHasWrongFormat)?;
Ok(port)
}
/// Fix endianess for every 4-byte package (network -> host; u32 big endian -> u32 little endian).
fn fix_endianness(bytes: &mut [u8]) -> Result<()> {
assert!(bytes.len() % 4 == 0);
for i in 0..bytes.len() / 4 {
let host = Cursor::new(&mut bytes[i*4..(i+1)*4]).read_u32::<NetworkEndian>()?;
Cursor::new(&mut bytes[i*4..(i+1)*4]).write_u32::<NativeEndian>(host)?;
}
Ok(())
}
/// This function parses an adress of the form "DDCCBBAA:XXXX" or "IIHHGGEEDDCCBBAA:XXXX" to
/// a `SocketAddr`. See `parse_ip_addr_to_bytes` for more details.
fn parse_ip_addr(s: &str) -> Result<SocketAddr> {
if s.len() == 4 * 2 + 1 + 4 {
let mut addr = [0u8; 4];
let port = Self::parse_ip_addr_to_bytes(s, &mut addr[..])?;
Self::fix_endianness(&mut addr[..])?;
Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::from(addr)), port))
} else if s.len() == 16 * 2 + 1 + 4 {
let mut addr = [0u8; 16];
Self::fix_endianness(&mut addr[..])?;
let port = Self::parse_ip_addr_to_bytes(s, &mut addr[..])?;
Ok(SocketAddr::new(IpAddr::V6(Ipv6Addr::from(addr)), port))
} else {
Err(ErrorKind::ProcNetFileHasWrongFormat)?
}
}
/// Parse files like /proc/net/tcp, /proc/net/tcp6, /proc/net/udp, /proc/net/udp6 and return a "connection -> inode" hashmap.
fn parse_net_file(&mut self, path: String) -> Result<HashMap<Connection, Inode>> {
let file = File::open(path.clone()).map_err(|e| ErrorKind::ProcNetFileError(path, e))?;
let reader = BufReader::new(&file);
let mut hash_map = HashMap::new();
for line_res in reader.lines().skip(1) {
let line = line_res?;
let words: Vec<_> = line.split_whitespace().collect();
let local_addr_str = words[1];
let remote_addr_str = words[2];
let local_addr = Self::parse_ip_addr(local_addr_str)?;
let remote_addr = Self::parse_ip_addr(remote_addr_str)?;
let inode = Inode::from_str(words[9]).chain_err(|| ErrorKind::ProcNetFileHasWrongFormat)?;
hash_map.insert(Connection::new(local_addr, remote_addr), inode);
}
Ok(hash_map)
}
/// Test whether connection already exists, then up
fn add_conninode(&mut self, transport_type: TransportType, connection: Connection, inode: Inode) {
self.conn_to_inode_map.insert((transport_type, connection), inode);
}
/// Discard current HashMap and rebuild from `/proc/net/tcp*`
pub fn refresh(&mut self) -> Result<()> {
self.conn_to_inode_map.clear();
let tcp4_hash_map = self.parse_net_file("/proc/net/tcp".to_string())?;
let tcp6_hash_map = self.parse_net_file("/proc/net/tcp6".to_string())?;
let udp4_hash_map = self.parse_net_file("/proc/net/udp".to_string())?;
let udp6_hash_map = self.parse_net_file("/proc/net/udp6".to_string())?;
for (connection, inode) in tcp4_hash_map.into_iter().chain(tcp6_hash_map.into_iter()).filter(|&(_, inode)| inode != 0) {
self.add_conninode(TransportType::Tcp, connection, inode);
}
for (connection, inode) in udp4_hash_map.into_iter().chain(udp6_hash_map.into_iter()).filter(|&(_, inode)| inode != 0) {
self.add_conninode(TransportType::Udp, connection, inode);
}
Ok(())
}
/// Lookup connection in HashMap and return associated inode when found.
fn find_inode_tcp(&self, tt: TransportType, c: Connection) -> Option<Inode> {
self.conn_to_inode_map.get(&(tt, c)).map(|&x| x)
}
/// Lookup connection in HashMap and return associated inode when found.
/// UDP "connections" do not seem to have a remote adress in /proc/net/udp (its always 0.0.0.0:0) -> onesided.
/// UDP "connections" might claim a port but do not have an IP (0.0.0.0:53241) -> port_only.
fn find_inode_udp(&self, tt: TransportType, mut c: Connection, onesided: bool, port_only: bool) -> Option<Inode> {
if onesided { c = c.get_resetted_remote(); }
if port_only { c = c.get_resetted_ip(); }
self.conn_to_inode_map.get(&(tt, c)).map(|&x| x)
}
/// Lookup connection in HashMap and return associated inode when found
pub fn find_inode(&self, tt: TransportType, c: Connection) -> Option<Inode> {
match tt {
TransportType::Udp => {
// try progressively less stricter versions until we find a inode
None
.or_else(|| self.find_inode_udp(tt, c, false, false)) // case 1: remote addr always was 0.0.0.0:0 in /proc/net/udp* so this will probably not work
.or_else(|| self.find_inode_udp(tt, c, true, false)) // case 2: remote is zero, but local has ip and port: this case really happens
.or_else(|| self.find_inode_udp(tt, c, false, true)) // case 3: ip is zero for both remote and local, but port is non-zero: this will probably not happen (see case 1)
.or_else(|| self.find_inode_udp(tt, c, true, true)) // case 4: we only compare local port => this will give the right inode in most cases, but a port can be claimed by two processes (can be ambigous)
}
TransportType::Tcp => {
self.find_inode_tcp(tt, c)
}
}
}
}
| 47.777778 | 221 | 0.61751 |
8703e1bf5ba0c3eb705fe3e0caa5620894e48104 | 2,239 | use {
rayon::iter::{IntoParallelIterator, ParallelIterator},
solana_metrics::MovingStat,
solana_sdk::{transaction::VersionedTransaction, transport::Result as TransportResult},
std::{
net::SocketAddr,
sync::{atomic::AtomicU64, Arc},
},
};
#[derive(Default)]
pub struct ClientStats {
pub total_connections: AtomicU64,
pub connection_reuse: AtomicU64,
pub connection_errors: AtomicU64,
pub zero_rtt_accepts: AtomicU64,
pub zero_rtt_rejects: AtomicU64,
// these will be the last values of these stats
pub congestion_events: MovingStat,
pub tx_streams_blocked_uni: MovingStat,
pub tx_data_blocked: MovingStat,
pub tx_acks: MovingStat,
}
pub trait TpuConnection {
fn new(tpu_addr: SocketAddr) -> Self;
fn tpu_addr(&self) -> &SocketAddr;
fn serialize_and_send_transaction(
&self,
transaction: &VersionedTransaction,
stats: &ClientStats,
) -> TransportResult<()> {
let wire_transaction =
bincode::serialize(transaction).expect("serialize Transaction in send_batch");
self.send_wire_transaction(&wire_transaction, stats)
}
fn send_wire_transaction<T>(
&self,
wire_transaction: T,
stats: &ClientStats,
) -> TransportResult<()>
where
T: AsRef<[u8]>;
fn send_wire_transaction_async(
&self,
wire_transaction: Vec<u8>,
stats: Arc<ClientStats>,
) -> TransportResult<()>;
fn par_serialize_and_send_transaction_batch(
&self,
transactions: &[VersionedTransaction],
stats: &ClientStats,
) -> TransportResult<()> {
let buffers = transactions
.into_par_iter()
.map(|tx| bincode::serialize(&tx).expect("serialize Transaction in send_batch"))
.collect::<Vec<_>>();
self.send_wire_transaction_batch(&buffers, stats)
}
fn send_wire_transaction_batch<T>(
&self,
buffers: &[T],
stats: &ClientStats,
) -> TransportResult<()>
where
T: AsRef<[u8]>;
fn send_wire_transaction_batch_async(
&self,
buffers: Vec<Vec<u8>>,
stats: Arc<ClientStats>,
) -> TransportResult<()>;
}
| 27.304878 | 92 | 0.632872 |
3ac75b9e31c64e094a3d3559e4c9fe1739c4e5bd | 523 | #![warn(missing_docs)]
//! # Optimization Framework
//!
//! - [core]: main elements used by every optimization problem
//! - [metaheuristics]: implementations of consolidated metaheuristics
//! - [analysis]: Tools to gather and visualize metrics to improve understanding of a solver's behavior.
//! - [components]: Common building blocks to implement the problem-dependent code. Stuff here is very unstable right now.
//!
#[allow(missing_docs)]
pub mod components;
pub mod analysis;
pub mod core;
pub mod metaheuristics;
| 32.6875 | 122 | 0.749522 |
1cf23305d697db2f9d4d4be84ba9450c538ea958 | 9,086 | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
pub mod fixture;
mod util;
use criterion::measurement::Measurement;
use crate::util::scan_bencher::ScanBencher;
use crate::util::store::*;
use crate::util::BenchCase;
const ROWS: usize = 5000;
/// 1 interested column, which is PK (which is in the key)
///
/// This kind of scanner is used in SQLs like SELECT COUNT(*).
fn bench_table_scan_primary_key<M>(b: &mut criterion::Bencher<'_, M>, input: &Input<M>)
where
M: Measurement,
{
let (table, store) = fixture::table_with_2_columns(ROWS);
input.0.bench(
b,
&[table["id"].as_column_info()],
&[table.get_record_range_all()],
&store,
(),
);
}
/// 1 interested column, at the front of each row. Each row contains 100 columns.
///
/// This kind of scanner is used in SQLs like `SELECT COUNT(column)`.
fn bench_table_scan_datum_front<M>(b: &mut criterion::Bencher<'_, M>, input: &Input<M>)
where
M: Measurement,
{
let (table, store) = fixture::table_with_multi_columns(ROWS, 100);
input.0.bench(
b,
&[table["col0"].as_column_info()],
&[table.get_record_range_all()],
&store,
(),
);
}
/// 2 interested columns, at the front of each row. Each row contains 100 columns.
fn bench_table_scan_datum_multi_front<M>(b: &mut criterion::Bencher<'_, M>, input: &Input<M>)
where
M: Measurement,
{
let (table, store) = fixture::table_with_multi_columns(ROWS, 100);
input.0.bench(
b,
&[
table["col0"].as_column_info(),
table["col1"].as_column_info(),
],
&[table.get_record_range_all()],
&store,
(),
);
}
/// 1 interested column, at the end of each row. Each row contains 100 columns.
fn bench_table_scan_datum_end<M>(b: &mut criterion::Bencher<'_, M>, input: &Input<M>)
where
M: Measurement,
{
let (table, store) = fixture::table_with_multi_columns(ROWS, 100);
input.0.bench(
b,
&[table["col99"].as_column_info()],
&[table.get_record_range_all()],
&store,
(),
);
}
/// 100 interested columns, all columns in the row are interested (i.e. there are totally 100
/// columns in the row).
fn bench_table_scan_datum_all<M>(b: &mut criterion::Bencher<'_, M>, input: &Input<M>)
where
M: Measurement,
{
let (table, store) = fixture::table_with_multi_columns(ROWS, 100);
input.0.bench(
b,
&table.columns_info(),
&[table.get_record_range_all()],
&store,
(),
);
}
/// 3 columns in the row and the last column is very long but only PK is interested.
fn bench_table_scan_long_datum_primary_key<M>(b: &mut criterion::Bencher<'_, M>, input: &Input<M>)
where
M: Measurement,
{
let (table, store) = fixture::table_with_long_column(ROWS);
input.0.bench(
b,
&[table["id"].as_column_info()],
&[table.get_record_range_all()],
&store,
(),
);
}
/// 3 columns in the row and the last column is very long but a short column is interested.
fn bench_table_scan_long_datum_normal<M>(b: &mut criterion::Bencher<'_, M>, input: &Input<M>)
where
M: Measurement,
{
let (table, store) = fixture::table_with_long_column(ROWS);
input.0.bench(
b,
&[table["foo"].as_column_info()],
&[table.get_record_range_all()],
&store,
(),
);
}
/// 3 columns in the row and the last column is very long and the long column is interested.
fn bench_table_scan_long_datum_long<M>(b: &mut criterion::Bencher<'_, M>, input: &Input<M>)
where
M: Measurement,
{
let (table, store) = fixture::table_with_long_column(ROWS);
input.0.bench(
b,
&[table["bar"].as_column_info()],
&[table.get_record_range_all()],
&store,
(),
);
}
/// 3 columns in the row and the last column is very long and the all columns are interested.
fn bench_table_scan_long_datum_all<M>(b: &mut criterion::Bencher<'_, M>, input: &Input<M>)
where
M: Measurement,
{
let (table, store) = fixture::table_with_long_column(ROWS);
input.0.bench(
b,
&[
table["id"].as_column_info(),
table["foo"].as_column_info(),
table["bar"].as_column_info(),
],
&[table.get_record_range_all()],
&store,
(),
);
}
/// 1 interested column, but the column is missing from each row (i.e. it's default value is
/// used instead). Each row contains totally 10 columns.
fn bench_table_scan_datum_absent<M>(b: &mut criterion::Bencher<'_, M>, input: &Input<M>)
where
M: Measurement,
{
let (table, store) = fixture::table_with_missing_column(ROWS, 10);
input.0.bench(
b,
&[table["col0"].as_column_info()],
&[table.get_record_range_all()],
&store,
(),
);
}
/// 1 interested column, but the column is missing from each row (i.e. it's default value is
/// used instead). Each row contains totally 100 columns.
fn bench_table_scan_datum_absent_large_row<M>(b: &mut criterion::Bencher<'_, M>, input: &Input<M>)
where
M: Measurement,
{
let (table, store) = fixture::table_with_missing_column(ROWS, 100);
input.0.bench(
b,
&[table["col0"].as_column_info()],
&[table.get_record_range_all()],
&store,
(),
);
}
/// 1 interested column, which is PK. However the range given are point ranges.
fn bench_table_scan_point_range<M>(b: &mut criterion::Bencher<'_, M>, input: &Input<M>)
where
M: Measurement,
{
let (table, store) = fixture::table_with_2_columns(ROWS);
let mut ranges = vec![];
for i in 0..=1024 {
ranges.push(table.get_record_range_one(i));
}
input
.0
.bench(b, &[table["id"].as_column_info()], &ranges, &store, ());
}
#[derive(Clone)]
struct Input<M>(Box<dyn ScanBencher<util::TableScanParam, M>>)
where
M: Measurement + 'static;
impl<M> Input<M>
where
M: Measurement + 'static,
{
pub fn new<T: ScanBencher<util::TableScanParam, M> + 'static>(b: T) -> Self {
Self(Box::new(b))
}
}
impl<M> std::fmt::Display for Input<M>
where
M: Measurement + 'static,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0.name())
}
}
pub fn bench<M>(c: &mut criterion::Criterion<M>)
where
M: Measurement + 'static,
{
let mut inputs = vec![
Input::new(util::BatchTableScanNext1024Bencher::<MemStore>::new()),
Input::new(util::TableScanDAGBencher::<RocksStore>::new(false, ROWS)),
Input::new(util::TableScanDAGBencher::<RocksStore>::new(true, ROWS)),
];
if crate::util::bench_level() >= 2 {
let mut additional_inputs = vec![
Input::new(util::BatchTableScanNext1024Bencher::<RocksStore>::new()),
Input::new(util::TableScanDAGBencher::<MemStore>::new(false, ROWS)),
Input::new(util::TableScanDAGBencher::<MemStore>::new(true, ROWS)),
];
inputs.append(&mut additional_inputs);
}
let mut cases = vec![
BenchCase::new("table_scan_primary_key", bench_table_scan_primary_key),
BenchCase::new("table_scan_long_datum_all", bench_table_scan_long_datum_all),
BenchCase::new(
"table_scan_datum_absent_large_row",
bench_table_scan_datum_absent_large_row,
),
];
if crate::util::bench_level() >= 1 {
let mut additional_cases = vec![
BenchCase::new("table_scan_datum_front", bench_table_scan_datum_front),
BenchCase::new("table_scan_datum_all", bench_table_scan_datum_all),
BenchCase::new("table_scan_point_range", bench_table_scan_point_range),
];
cases.append(&mut additional_cases);
}
if crate::util::bench_level() >= 2 {
let mut additional_cases = vec![
BenchCase::new(
"table_scan_datum_multi_front",
bench_table_scan_datum_multi_front,
),
BenchCase::new("table_scan_datum_end", bench_table_scan_datum_end),
BenchCase::new(
"table_scan_long_datum_primary_key",
bench_table_scan_long_datum_primary_key,
),
BenchCase::new(
"table_scan_long_datum_normal",
bench_table_scan_long_datum_normal,
),
BenchCase::new(
"table_scan_long_datum_long",
bench_table_scan_long_datum_long,
),
BenchCase::new("table_scan_datum_absent", bench_table_scan_datum_absent),
];
cases.append(&mut additional_cases);
}
cases.sort();
for case in cases {
let mut group = c.benchmark_group(case.get_name());
for input in inputs.iter() {
group.bench_with_input(
criterion::BenchmarkId::from_parameter(input),
input,
case.get_fn(),
); // TODO: add parameter for each bench
}
group.finish();
}
}
| 29.888158 | 98 | 0.61116 |
d951006c4f5715167252cff11813aa8c41633572 | 45,938 | // Copyright (C) 2020, Cloudflare, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//! Quiche application utilities.
//!
//! This module provides some utility functions that are common to quiche
//! applications.
#[macro_use]
extern crate log;
use std::io::prelude::*;
use std::collections::HashMap;
use std::net;
use std::path;
use quiche::h3::NameValue;
const MAX_JSON_DUMP_PAYLOAD: usize = 10000;
/// Returns a String containing a pretty printed version of the `buf` slice.
pub fn hex_dump(buf: &[u8]) -> String {
let vec: Vec<String> = buf.iter().map(|b| format!("{:02x}", b)).collect();
vec.join("")
}
/// ALPN helpers.
///
/// This module contains constants and functions for working with ALPN.
pub mod alpns {
pub const HTTP_09: [&str; 4] = ["hq-29", "hq-28", "hq-27", "http/0.9"];
pub const HTTP_3: [&str; 3] = ["h3-29", "h3-28", "h3-27"];
pub const SIDUCK: [&str; 2] = ["siduck", "siduck-00"];
pub fn length_prefixed(alpns: &[&str]) -> Vec<u8> {
let mut out = Vec::new();
for s in alpns {
out.push(s.len() as u8);
out.extend_from_slice(s.as_bytes());
}
out
}
}
pub trait Args {
fn with_docopt(docopt: &docopt::Docopt) -> Self;
}
/// Contains commons arguments for creating a quiche QUIC connection.
pub struct CommonArgs {
pub alpns: Vec<u8>,
pub max_data: u64,
pub max_stream_data: u64,
pub max_streams_bidi: u64,
pub max_streams_uni: u64,
pub dump_packet_path: Option<String>,
pub no_grease: bool,
pub cc_algorithm: String,
pub disable_hystart: bool,
pub dgrams_enabled: bool,
pub dgram_count: u64,
pub dgram_data: String,
}
/// Creates a new `CommonArgs` structure using the provided [`Docopt`].
///
/// The `Docopt` usage String needs to include the following:
///
/// --http-version VERSION HTTP version to use.
/// --max-data BYTES Connection-wide flow control limit.
/// --max-stream-data BYTES Per-stream flow control limit.
/// --max-streams-bidi STREAMS Number of allowed concurrent streams.
/// --max-streams-uni STREAMS Number of allowed concurrent streams.
/// --dump-packets PATH Dump the incoming packets in PATH.
/// --no-grease Don't send GREASE.
/// --cc-algorithm NAME Set a congestion control algorithm.
/// --disable-hystart Disable HyStart++.
/// --dgram-proto PROTO DATAGRAM application protocol.
/// --dgram-count COUNT Number of DATAGRAMs to send.
/// --dgram-data DATA DATAGRAM data to send.
///
/// [`Docopt`]: https://docs.rs/docopt/1.1.0/docopt/
impl Args for CommonArgs {
fn with_docopt(docopt: &docopt::Docopt) -> Self {
let args = docopt.parse().unwrap_or_else(|e| e.exit());
let http_version = args.get_str("--http-version");
let dgram_proto = args.get_str("--dgram-proto");
let (alpns, dgrams_enabled) = match (http_version, dgram_proto) {
("HTTP/0.9", "none") =>
(alpns::length_prefixed(&alpns::HTTP_09), false),
("HTTP/0.9", _) =>
panic!("Unsupported HTTP version and DATAGRAM protocol."),
("HTTP/3", "none") => (alpns::length_prefixed(&alpns::HTTP_3), false),
("HTTP/3", "oneway") =>
(alpns::length_prefixed(&alpns::HTTP_3), true),
("all", "none") => (
[
alpns::length_prefixed(&alpns::HTTP_3),
alpns::length_prefixed(&alpns::HTTP_09),
]
.concat(),
false,
),
// SiDuck is it's own application protocol.
(_, "siduck") => (alpns::length_prefixed(&alpns::SIDUCK), true),
(..) => panic!("Unsupported HTTP version and DATAGRAM protocol."),
};
let dgram_count = args.get_str("--dgram-count");
let dgram_count = u64::from_str_radix(dgram_count, 10).unwrap();
let dgram_data = args.get_str("--dgram-data").to_string();
let max_data = args.get_str("--max-data");
let max_data = u64::from_str_radix(max_data, 10).unwrap();
let max_stream_data = args.get_str("--max-stream-data");
let max_stream_data = u64::from_str_radix(max_stream_data, 10).unwrap();
let max_streams_bidi = args.get_str("--max-streams-bidi");
let max_streams_bidi = u64::from_str_radix(max_streams_bidi, 10).unwrap();
let max_streams_uni = args.get_str("--max-streams-uni");
let max_streams_uni = u64::from_str_radix(max_streams_uni, 10).unwrap();
let dump_packet_path = if args.get_str("--dump-packets") != "" {
Some(args.get_str("--dump-packets").to_string())
} else {
None
};
let no_grease = args.get_bool("--no-grease");
let cc_algorithm = args.get_str("--cc-algorithm");
let disable_hystart = args.get_bool("--disable-hystart");
CommonArgs {
alpns,
max_data,
max_stream_data,
max_streams_bidi,
max_streams_uni,
dump_packet_path,
no_grease,
cc_algorithm: cc_algorithm.to_string(),
disable_hystart,
dgrams_enabled,
dgram_count,
dgram_data,
}
}
}
pub struct PartialRequest {
pub req: Vec<u8>,
}
pub struct PartialResponse {
pub headers: Option<Vec<quiche::h3::Header>>,
pub body: Vec<u8>,
pub written: usize,
}
pub struct Client {
pub conn: std::pin::Pin<Box<quiche::Connection>>,
pub http_conn: Option<Box<dyn crate::HttpConn>>,
pub siduck_conn: Option<SiDuckConn>,
pub app_proto_selected: bool,
pub partial_requests: std::collections::HashMap<u64, PartialRequest>,
pub partial_responses: std::collections::HashMap<u64, PartialResponse>,
}
pub type ClientMap = HashMap<Vec<u8>, (net::SocketAddr, Client)>;
/// Makes a buffered writer for a resource with a target URL.
///
/// The file will have the same name as the resource's last path segment value.
/// Multiple requests for the same URL are indicated by the value of `cardinal`,
/// any value "N" greater than 1, will cause ".N" to be appended to the
/// filename.
fn make_resource_writer(
url: &url::Url, target_path: &Option<String>, cardinal: u64,
) -> Option<std::io::BufWriter<std::fs::File>> {
if let Some(tp) = target_path {
let resource =
url.path_segments().map(|c| c.collect::<Vec<_>>()).unwrap();
let mut path = format!("{}/{}", tp, resource.iter().last().unwrap());
if cardinal > 1 {
path = format!("{}.{}", path, cardinal);
}
match std::fs::File::create(&path) {
Ok(f) => return Some(std::io::BufWriter::new(f)),
Err(e) => panic!(
"Error creating file for {}, attempted path was {}: {}",
url, path, e
),
}
}
None
}
fn autoindex(path: path::PathBuf, index: &str) -> path::PathBuf {
if let Some(path_str) = path.to_str() {
if path_str.ends_with('/') {
let path_str = format!("{}{}", path_str, index);
return path::PathBuf::from(&path_str);
}
}
path
}
/// Makes a buffered writer for a qlog.
pub fn make_qlog_writer(
dir: &std::ffi::OsStr, role: &str, id: &str,
) -> std::io::BufWriter<std::fs::File> {
let mut path = std::path::PathBuf::from(dir);
let filename = format!("{}-{}.qlog", role, id);
path.push(filename);
match std::fs::File::create(&path) {
Ok(f) => std::io::BufWriter::new(f),
Err(e) => panic!(
"Error creating qlog file attempted path was {:?}: {}",
path, e
),
}
}
fn dump_json(reqs: &[Http3Request]) {
println!("{{");
println!(" \"entries\": [");
let mut reqs = reqs.iter().peekable();
while let Some(req) = reqs.next() {
println!(" {{");
println!(" \"request\":{{");
println!(" \"headers\":[");
let mut req_hdrs = req.hdrs.iter().peekable();
while let Some(h) = req_hdrs.next() {
println!(" {{");
println!(" \"name\": \"{}\",", h.name());
println!(" \"value\": \"{}\"", h.value());
if req_hdrs.peek().is_some() {
println!(" }},");
} else {
println!(" }}");
}
}
println!(" ]}},");
println!(" \"response\":{{");
println!(" \"headers\":[");
let mut response_hdrs = req.response_hdrs.iter().peekable();
while let Some(h) = response_hdrs.next() {
println!(" {{");
println!(" \"name\": \"{}\",", h.name());
println!(
" \"value\": \"{}\"",
h.value().replace("\"", "\\\"")
);
if response_hdrs.peek().is_some() {
println!(" }},");
} else {
println!(" }}");
}
}
println!(" ],");
println!(" \"body\": {:?}", req.response_body);
println!(" }}");
if reqs.peek().is_some() {
println!("}},");
} else {
println!("}}");
}
}
println!("]");
println!("}}");
}
pub trait HttpConn {
fn send_requests(
&mut self, conn: &mut quiche::Connection, target_path: &Option<String>,
);
fn handle_responses(
&mut self, conn: &mut quiche::Connection, buf: &mut [u8],
req_start: &std::time::Instant,
);
fn report_incomplete(&self, start: &std::time::Instant);
fn handle_requests(
&mut self, conn: &mut std::pin::Pin<Box<quiche::Connection>>,
partial_requests: &mut HashMap<u64, PartialRequest>,
partial_responses: &mut HashMap<u64, PartialResponse>, root: &str,
index: &str, buf: &mut [u8],
) -> quiche::h3::Result<()>;
fn handle_writable(
&mut self, conn: &mut std::pin::Pin<Box<quiche::Connection>>,
partial_responses: &mut HashMap<u64, PartialResponse>, stream_id: u64,
);
}
pub struct SiDuckConn {
quacks_to_make: u64,
quack_contents: String,
quacks_sent: u64,
quacks_acked: u64,
}
impl SiDuckConn {
pub fn new(quacks_to_make: u64, quack_contents: String) -> Self {
Self {
quacks_to_make,
quack_contents,
quacks_sent: 0,
quacks_acked: 0,
}
}
pub fn send_quacks(&mut self, conn: &mut quiche::Connection) {
trace!("sending quacks");
let mut quacks_done = 0;
for _ in self.quacks_sent..self.quacks_to_make {
info!("sending QUIC DATAGRAM with data {:?}", self.quack_contents);
match conn.dgram_send(self.quack_contents.as_bytes()) {
Ok(v) => v,
Err(e) => {
error!("failed to send dgram {:?}", e);
break;
},
}
quacks_done += 1;
}
self.quacks_sent += quacks_done;
}
pub fn handle_quacks(
&mut self, conn: &mut quiche::Connection, buf: &mut [u8],
) -> quiche::h3::Result<()> {
loop {
match conn.dgram_recv(buf) {
Ok(len) => {
let data =
unsafe { std::str::from_utf8_unchecked(&buf[..len]) };
info!("Received DATAGRAM data {:?}", data);
// TODO
if data != "quack" {
match conn.close(true, 0x101, b"only quacks echo") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => panic!("error closing conn: {:?}", e),
}
break;
}
match conn.dgram_send(format!("{}-ack", data).as_bytes()) {
Ok(v) => v,
Err(quiche::Error::Done) => (),
Err(e) => {
error!("failed to send quack ack {:?}", e);
return Err(From::from(e));
},
}
},
Err(quiche::Error::Done) => break,
Err(e) => {
error!("failure receiving DATAGRAM failure {:?}", e);
return Err(From::from(e));
},
}
}
Ok(())
}
pub fn handle_quack_acks(
&mut self, conn: &mut quiche::Connection, buf: &mut [u8],
start: &std::time::Instant,
) {
trace!("handle_quack_acks");
loop {
match conn.dgram_recv(buf) {
Ok(len) => {
let data =
unsafe { std::str::from_utf8_unchecked(&buf[..len]) };
info!("Received DATAGRAM data {:?}", data);
self.quacks_acked += 1;
debug!(
"{}/{} quacks acked",
self.quacks_acked, self.quacks_to_make
);
if self.quacks_acked == self.quacks_to_make {
info!(
"{}/{} dgrams(s) received in {:?}, closing...",
self.quacks_acked,
self.quacks_to_make,
start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => panic!("error closing conn: {:?}", e),
}
break;
}
},
Err(quiche::Error::Done) => {
break;
},
Err(e) => {
error!("failure receiving DATAGRAM failure {:?}", e);
break;
},
}
}
}
pub fn report_incomplete(&self, start: &std::time::Instant) {
if self.quacks_acked != self.quacks_to_make {
error!(
"connection timed out after {:?} and only received {}/{} quack-acks",
start.elapsed(),
self.quacks_acked,
self.quacks_to_make
);
}
}
}
/// Represents an HTTP/0.9 formatted request.
pub struct Http09Request {
url: url::Url,
cardinal: u64,
request_line: String,
stream_id: Option<u64>,
response_writer: Option<std::io::BufWriter<std::fs::File>>,
}
/// Represents an HTTP/3 formatted request.
struct Http3Request {
url: url::Url,
cardinal: u64,
stream_id: Option<u64>,
hdrs: Vec<quiche::h3::Header>,
response_hdrs: Vec<quiche::h3::Header>,
response_body: Vec<u8>,
response_writer: Option<std::io::BufWriter<std::fs::File>>,
}
#[derive(Default)]
pub struct Http09Conn {
stream_id: u64,
reqs_sent: usize,
reqs_complete: usize,
reqs: Vec<Http09Request>,
}
impl Http09Conn {
pub fn with_urls(urls: &[url::Url], reqs_cardinal: u64) -> Box<dyn HttpConn> {
let mut reqs = Vec::new();
for url in urls {
for i in 1..=reqs_cardinal {
let request_line = format!("GET {}\r\n", url.path());
reqs.push(Http09Request {
url: url.clone(),
cardinal: i,
request_line,
stream_id: None,
response_writer: None,
});
}
}
let h_conn = Http09Conn {
stream_id: 0,
reqs_sent: 0,
reqs_complete: 0,
reqs,
};
Box::new(h_conn)
}
}
impl HttpConn for Http09Conn {
fn send_requests(
&mut self, conn: &mut quiche::Connection, target_path: &Option<String>,
) {
let mut reqs_done = 0;
for req in self.reqs.iter_mut().skip(self.reqs_sent) {
match conn.stream_send(
self.stream_id,
req.request_line.as_bytes(),
true,
) {
Ok(v) => v,
Err(quiche::Error::StreamLimit) => {
debug!("not enough stream credits, retry later...");
break;
},
Err(e) => {
error!("failed to send request {:?}", e);
break;
},
};
debug!("sending HTTP request {:?}", req.request_line);
req.stream_id = Some(self.stream_id);
req.response_writer =
make_resource_writer(&req.url, target_path, req.cardinal);
self.stream_id += 4;
reqs_done += 1;
}
self.reqs_sent += reqs_done;
}
fn handle_responses(
&mut self, conn: &mut quiche::Connection, buf: &mut [u8],
req_start: &std::time::Instant,
) {
// Process all readable streams.
for s in conn.readable() {
while let Ok((read, fin)) = conn.stream_recv(s, buf) {
trace!("received {} bytes", read);
let stream_buf = &buf[..read];
trace!(
"stream {} has {} bytes (fin? {})",
s,
stream_buf.len(),
fin
);
let req = self
.reqs
.iter_mut()
.find(|r| r.stream_id == Some(s))
.unwrap();
match &mut req.response_writer {
Some(rw) => {
rw.write_all(&buf[..read]).ok();
},
None => {
print!("{}", unsafe {
std::str::from_utf8_unchecked(&stream_buf)
});
},
}
// The server reported that it has no more data to send on
// a client-initiated
// bidirectional stream, which means
// we got the full response. If all responses are received
// then close the connection.
if &s % 4 == 0 && fin {
self.reqs_complete += 1;
let reqs_count = self.reqs.len();
debug!(
"{}/{} responses received",
self.reqs_complete, reqs_count
);
if self.reqs_complete == reqs_count {
info!(
"{}/{} response(s) received in {:?}, closing...",
self.reqs_complete,
reqs_count,
req_start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => panic!("error closing conn: {:?}", e),
}
break;
}
}
}
}
}
fn report_incomplete(&self, start: &std::time::Instant) {
if self.reqs_complete != self.reqs.len() {
error!(
"connection timed out after {:?} and only completed {}/{} requests",
start.elapsed(),
self.reqs_complete,
self.reqs.len()
);
}
}
fn handle_requests(
&mut self, conn: &mut std::pin::Pin<Box<quiche::Connection>>,
partial_requests: &mut HashMap<u64, PartialRequest>,
partial_responses: &mut HashMap<u64, PartialResponse>, root: &str,
index: &str, buf: &mut [u8],
) -> quiche::h3::Result<()> {
// Process all readable streams.
for s in conn.readable() {
while let Ok((read, fin)) = conn.stream_recv(s, buf) {
trace!("{} received {} bytes", conn.trace_id(), read);
let stream_buf = &buf[..read];
trace!(
"{} stream {} has {} bytes (fin? {})",
conn.trace_id(),
s,
stream_buf.len(),
fin
);
let stream_buf =
if let Some(partial) = partial_requests.get_mut(&s) {
partial.req.extend_from_slice(stream_buf);
if !partial.req.ends_with(b"\r\n") {
return Ok(());
}
&partial.req
} else {
if !stream_buf.ends_with(b"\r\n") {
let request = PartialRequest {
req: stream_buf.to_vec(),
};
partial_requests.insert(s, request);
return Ok(());
}
stream_buf
};
if stream_buf.starts_with(b"GET ") {
let uri = &stream_buf[4..stream_buf.len() - 2];
let uri = String::from_utf8(uri.to_vec()).unwrap();
let uri = String::from(uri.lines().next().unwrap());
let uri = path::Path::new(&uri);
let mut path = path::PathBuf::from(root);
partial_requests.remove(&s);
for c in uri.components() {
if let path::Component::Normal(v) = c {
path.push(v)
}
}
path = autoindex(path, index);
info!(
"{} got GET request for {:?} on stream {}",
conn.trace_id(),
path,
s
);
let body = std::fs::read(path.as_path())
.unwrap_or_else(|_| b"Not Found!\r\n".to_vec());
info!(
"{} sending response of size {} on stream {}",
conn.trace_id(),
body.len(),
s
);
let written = match conn.stream_send(s, &body, true) {
Ok(v) => v,
Err(quiche::Error::Done) => 0,
Err(e) => {
error!(
"{} stream send failed {:?}",
conn.trace_id(),
e
);
return Err(From::from(e));
},
};
if written < body.len() {
let response = PartialResponse {
headers: None,
body,
written,
};
partial_responses.insert(s, response);
}
}
}
}
Ok(())
}
fn handle_writable(
&mut self, conn: &mut std::pin::Pin<Box<quiche::Connection>>,
partial_responses: &mut HashMap<u64, PartialResponse>, stream_id: u64,
) {
trace!("{} stream {} is writable", conn.trace_id(), stream_id);
if !partial_responses.contains_key(&stream_id) {
return;
}
let resp = partial_responses.get_mut(&stream_id).unwrap();
let body = &resp.body[resp.written..];
let written = match conn.stream_send(stream_id, &body, true) {
Ok(v) => v,
Err(quiche::Error::Done) => 0,
Err(e) => {
error!("{} stream send failed {:?}", conn.trace_id(), e);
return;
},
};
resp.written += written;
if resp.written == resp.body.len() {
partial_responses.remove(&stream_id);
}
}
}
pub struct Http3DgramSender {
dgram_count: u64,
pub dgram_content: String,
pub flow_id: u64,
pub dgrams_sent: u64,
}
impl Http3DgramSender {
pub fn new(dgram_count: u64, dgram_content: String, flow_id: u64) -> Self {
Self {
dgram_count,
dgram_content,
flow_id,
dgrams_sent: 0,
}
}
}
pub struct Http3Conn {
h3_conn: quiche::h3::Connection,
reqs_sent: usize,
reqs_complete: usize,
reqs: Vec<Http3Request>,
body: Option<Vec<u8>>,
dump_json: bool,
dgram_sender: Option<Http3DgramSender>,
}
impl Http3Conn {
#[allow(clippy::too_many_arguments)]
pub fn with_urls(
conn: &mut quiche::Connection, urls: &[url::Url], reqs_cardinal: u64,
req_headers: &[String], body: &Option<Vec<u8>>, method: &str,
dump_json: bool, dgram_sender: Option<Http3DgramSender>,
) -> Box<dyn HttpConn> {
let mut reqs = Vec::new();
for url in urls {
for i in 1..=reqs_cardinal {
let authority = match url.port() {
Some(port) => format!("{}:{}", url.host_str().unwrap(), port),
None => url.host_str().unwrap().to_string(),
};
let mut hdrs = vec![
quiche::h3::Header::new(":method", &method),
quiche::h3::Header::new(":scheme", url.scheme()),
quiche::h3::Header::new(":authority", &authority),
quiche::h3::Header::new(
":path",
&url[url::Position::BeforePath..],
),
quiche::h3::Header::new("user-agent", "quiche"),
];
// Add custom headers to the request.
for header in req_headers {
let header_split: Vec<&str> =
header.splitn(2, ": ").collect();
if header_split.len() != 2 {
panic!("malformed header provided - \"{}\"", header);
}
hdrs.push(quiche::h3::Header::new(
header_split[0],
header_split[1],
));
}
if body.is_some() {
hdrs.push(quiche::h3::Header::new(
"content-length",
&body.as_ref().unwrap().len().to_string(),
));
}
reqs.push(Http3Request {
url: url.clone(),
cardinal: i,
hdrs,
response_hdrs: Vec::new(),
response_body: Vec::new(),
stream_id: None,
response_writer: None,
});
}
}
let h_conn = Http3Conn {
h3_conn: quiche::h3::Connection::with_transport(
conn,
&quiche::h3::Config::new().unwrap(),
)
.unwrap(),
reqs_sent: 0,
reqs_complete: 0,
reqs,
body: body.as_ref().map(|b| b.to_vec()),
dump_json,
dgram_sender,
};
Box::new(h_conn)
}
pub fn with_conn(
conn: &mut quiche::Connection, dgram_sender: Option<Http3DgramSender>,
) -> Box<dyn HttpConn> {
let h_conn = Http3Conn {
h3_conn: quiche::h3::Connection::with_transport(
conn,
&quiche::h3::Config::new().unwrap(),
)
.unwrap(),
reqs_sent: 0,
reqs_complete: 0,
reqs: Vec::new(),
body: None,
dump_json: false,
dgram_sender,
};
Box::new(h_conn)
}
/// Builds an HTTP/3 response given a request.
fn build_h3_response(
root: &str, index: &str, request: &[quiche::h3::Header],
) -> (Vec<quiche::h3::Header>, Vec<u8>, String) {
let mut file_path = path::PathBuf::from(root);
let mut scheme = "";
let mut host = "";
let mut path = "";
let mut method = "";
let mut priority = "";
// Parse some of the request headers.
for hdr in request {
match hdr.name() {
":scheme" => {
scheme = hdr.value();
},
":authority" | "host" => {
host = hdr.value();
},
":path" => {
path = hdr.value();
},
":method" => {
method = hdr.value();
},
"priority" => {
priority = hdr.value();
},
_ => (),
}
}
if scheme != "http" && scheme != "https" {
let headers = vec![
quiche::h3::Header::new(":status", &"400".to_string()),
quiche::h3::Header::new("server", "quiche"),
];
return (headers, b"Invalid scheme".to_vec(), priority.to_string());
}
let url = format!("{}://{}{}", scheme, host, path);
let url = url::Url::parse(&url).unwrap();
let pathbuf = path::PathBuf::from(url.path());
let pathbuf = autoindex(pathbuf, index);
// Priority query string takes precedence over the header.
// So replace the header with one built here.
let mut query_priority = "".to_string();
for param in url.query_pairs() {
if param.0 == "u" {
query_priority.push_str(&format!("{}={},", param.0, param.1));
}
if param.0 == "i" && param.1 == "1" {
query_priority.push_str("i,");
}
}
if !query_priority.is_empty() {
priority = &query_priority;
}
let (status, body) = match method {
"GET" => {
for c in pathbuf.components() {
if let path::Component::Normal(v) = c {
file_path.push(v)
}
}
match std::fs::read(file_path.as_path()) {
Ok(data) => (200, data),
Err(_) => (404, b"Not Found!".to_vec()),
}
},
_ => (405, Vec::new()),
};
let headers = vec![
quiche::h3::Header::new(":status", &status.to_string()),
quiche::h3::Header::new("server", "quiche"),
quiche::h3::Header::new("content-length", &body.len().to_string()),
quiche::h3::Header::new("priority", &priority),
];
(headers, body, priority.to_string())
}
}
impl HttpConn for Http3Conn {
fn send_requests(
&mut self, conn: &mut quiche::Connection, target_path: &Option<String>,
) {
let mut reqs_done = 0;
for req in self.reqs.iter_mut().skip(self.reqs_sent) {
let s = match self.h3_conn.send_request(
conn,
&req.hdrs,
self.body.is_none(),
) {
Ok(v) => v,
Err(quiche::h3::Error::TransportError(
quiche::Error::StreamLimit,
)) => {
debug!("not enough stream credits, retry later...");
break;
},
Err(quiche::h3::Error::StreamBlocked) => {
debug!("stream is blocked, retry later...");
break;
},
Err(e) => {
error!("failed to send request {:?}", e);
break;
},
};
debug!("sending HTTP request {:?}", req.hdrs);
req.stream_id = Some(s);
req.response_writer =
make_resource_writer(&req.url, target_path, req.cardinal);
if let Some(body) = &self.body {
if let Err(e) = self.h3_conn.send_body(conn, s, body, true) {
error!("failed to send request body {:?}", e);
break;
}
}
reqs_done += 1;
}
self.reqs_sent += reqs_done;
if let Some(ds) = self.dgram_sender.as_mut() {
let mut dgrams_done = 0;
for _ in ds.dgrams_sent..ds.dgram_count {
info!(
"sending HTTP/3 DATAGRAM on flow_id={} with data {:?}",
ds.flow_id,
ds.dgram_content.as_bytes()
);
match self.h3_conn.send_dgram(
conn,
0,
ds.dgram_content.as_bytes(),
) {
Ok(v) => v,
Err(e) => {
error!("failed to send dgram {:?}", e);
break;
},
}
dgrams_done += 1;
}
ds.dgrams_sent += dgrams_done;
}
}
fn handle_responses(
&mut self, conn: &mut quiche::Connection, buf: &mut [u8],
req_start: &std::time::Instant,
) {
loop {
match self.h3_conn.poll(conn) {
Ok((stream_id, quiche::h3::Event::Headers { list, .. })) => {
debug!(
"got response headers {:?} on stream id {}",
list, stream_id
);
let req = self
.reqs
.iter_mut()
.find(|r| r.stream_id == Some(stream_id))
.unwrap();
req.response_hdrs = list;
},
Ok((stream_id, quiche::h3::Event::Data)) => {
if let Ok(read) = self.h3_conn.recv_body(conn, stream_id, buf)
{
debug!(
"got {} bytes of response data on stream {}",
read, stream_id
);
let req = self
.reqs
.iter_mut()
.find(|r| r.stream_id == Some(stream_id))
.unwrap();
let len = std::cmp::min(
read,
MAX_JSON_DUMP_PAYLOAD - req.response_body.len(),
);
req.response_body.extend_from_slice(&buf[..len]);
match &mut req.response_writer {
Some(rw) => {
rw.write_all(&buf[..read]).ok();
},
None =>
if !self.dump_json {
print!("{}", unsafe {
std::str::from_utf8_unchecked(
&buf[..read],
)
});
},
}
}
},
Ok((_stream_id, quiche::h3::Event::Finished)) => {
self.reqs_complete += 1;
let reqs_count = self.reqs.len();
debug!(
"{}/{} responses received",
self.reqs_complete, reqs_count
);
if self.reqs_complete == reqs_count {
info!(
"{}/{} response(s) received in {:?}, closing...",
self.reqs_complete,
reqs_count,
req_start.elapsed()
);
if self.dump_json {
dump_json(&self.reqs);
}
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => panic!("error closing conn: {:?}", e),
}
break;
}
},
Ok((_flow_id, quiche::h3::Event::Datagram)) => {
let (len, flow_id, flow_id_len) =
self.h3_conn.recv_dgram(conn, buf).unwrap();
info!(
"Received DATAGRAM flow_id={} len={} data={:?}",
flow_id,
len,
buf[flow_id_len..len].to_vec()
);
},
Err(quiche::h3::Error::Done) => {
break;
},
Err(e) => {
error!("HTTP/3 processing failed: {:?}", e);
break;
},
}
}
}
fn report_incomplete(&self, start: &std::time::Instant) {
if self.reqs_complete != self.reqs.len() {
error!(
"connection timed out after {:?} and only completed {}/{} requests",
start.elapsed(),
self.reqs_complete,
self.reqs.len()
);
if self.dump_json {
dump_json(&self.reqs);
}
}
}
fn handle_requests(
&mut self, conn: &mut std::pin::Pin<Box<quiche::Connection>>,
_partial_requests: &mut HashMap<u64, PartialRequest>,
partial_responses: &mut HashMap<u64, PartialResponse>, root: &str,
index: &str, buf: &mut [u8],
) -> quiche::h3::Result<()> {
// Process HTTP events.
loop {
match self.h3_conn.poll(conn) {
Ok((stream_id, quiche::h3::Event::Headers { list, .. })) => {
info!(
"{} got request {:?} on stream id {}",
conn.trace_id(),
&list,
stream_id
);
// We decide the response based on headers alone, so
// stop reading the request stream so that any body
// is ignored and pointless Data events are not
// generated.
conn.stream_shutdown(stream_id, quiche::Shutdown::Read, 0)
.unwrap();
let (headers, body, priority) =
Http3Conn::build_h3_response(root, index, &list);
match self.h3_conn.send_response_with_priority(
conn, stream_id, &headers, &priority, false,
) {
Ok(v) => v,
Err(quiche::h3::Error::StreamBlocked) => {
let response = PartialResponse {
headers: Some(headers),
body,
written: 0,
};
partial_responses.insert(stream_id, response);
continue;
},
Err(e) => {
error!(
"{} stream send failed {:?}",
conn.trace_id(),
e
);
break;
},
}
let written = match self
.h3_conn
.send_body(conn, stream_id, &body, true)
{
Ok(v) => v,
Err(quiche::h3::Error::Done) => 0,
Err(e) => {
error!(
"{} stream send failed {:?}",
conn.trace_id(),
e
);
break;
},
};
if written < body.len() {
let response = PartialResponse {
headers: None,
body,
written,
};
partial_responses.insert(stream_id, response);
}
},
Ok((stream_id, quiche::h3::Event::Data)) => {
info!(
"{} got data on stream id {}",
conn.trace_id(),
stream_id
);
},
Ok((_stream_id, quiche::h3::Event::Finished)) => (),
Ok((_, quiche::h3::Event::Datagram)) => {
let (len, flow_id, flow_id_len) =
self.h3_conn.recv_dgram(conn, buf).unwrap();
info!(
"Received DATAGRAM flow_id={} data={:?}",
flow_id,
&buf[flow_id_len..len].to_vec()
);
},
Err(quiche::h3::Error::Done) => {
break;
},
Err(e) => {
error!("{} HTTP/3 error {:?}", conn.trace_id(), e);
return Err(e);
},
}
}
if let Some(ds) = self.dgram_sender.as_mut() {
let mut dgrams_done = 0;
for _ in ds.dgrams_sent..ds.dgram_count {
info!(
"sending HTTP/3 DATAGRAM on flow_id={} with data {:?}",
ds.flow_id,
ds.dgram_content.as_bytes()
);
match self.h3_conn.send_dgram(
conn,
0,
ds.dgram_content.as_bytes(),
) {
Ok(v) => v,
Err(e) => {
error!("failed to send dgram {:?}", e);
break;
},
}
dgrams_done += 1;
}
ds.dgrams_sent += dgrams_done;
}
Ok(())
}
fn handle_writable(
&mut self, conn: &mut std::pin::Pin<Box<quiche::Connection>>,
partial_responses: &mut HashMap<u64, PartialResponse>, stream_id: u64,
) {
debug!("{} stream {} is writable", conn.trace_id(), stream_id);
if !partial_responses.contains_key(&stream_id) {
return;
}
let resp = partial_responses.get_mut(&stream_id).unwrap();
if let Some(ref headers) = resp.headers {
match self.h3_conn.send_response(conn, stream_id, &headers, false) {
Ok(_) => (),
Err(quiche::h3::Error::StreamBlocked) => {
return;
},
Err(e) => {
error!("{} stream send failed {:?}", conn.trace_id(), e);
return;
},
}
}
resp.headers = None;
let body = &resp.body[resp.written..];
let written = match self.h3_conn.send_body(conn, stream_id, body, true) {
Ok(v) => v,
Err(quiche::h3::Error::Done) => {
return;
},
Err(e) => {
error!("{} stream send failed {:?}", conn.trace_id(), e);
return;
},
};
resp.written += written;
if resp.written == resp.body.len() {
partial_responses.remove(&stream_id);
}
}
}
| 31.421341 | 85 | 0.434303 |
87316fd5aa9817629cd098fdf6d72076bd2e8976 | 918 | use std::iter::Enumerate;
use std::iter::Peekable;
pub struct TemplateLoop<I>
where
I: Iterator,
{
iter: Peekable<Enumerate<I>>,
}
impl<I> TemplateLoop<I>
where
I: Iterator,
{
#[inline]
pub fn new(iter: I) -> Self {
TemplateLoop {
iter: iter.enumerate().peekable(),
}
}
}
impl<I> Iterator for TemplateLoop<I>
where
I: Iterator,
{
type Item = (<I as Iterator>::Item, LoopItem);
#[inline]
fn next(&mut self) -> Option<(<I as Iterator>::Item, LoopItem)> {
self.iter.next().map(|(index, item)| {
(
item,
LoopItem {
index,
first: index == 0,
last: self.iter.peek().is_none(),
},
)
})
}
}
#[derive(Copy, Clone)]
pub struct LoopItem {
pub index: usize,
pub first: bool,
pub last: bool,
}
| 18.36 | 69 | 0.492375 |
3ae44e4a1b5ce8872195fef24f9000717803258f | 19,130 | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Buffering wrappers for I/O traits
use cmp;
use container::Container;
use io::{Reader, Writer, Stream, Buffer, DEFAULT_BUF_SIZE, IoResult};
use iter::ExactSize;
use ops::Drop;
use option::{Some, None, Option};
use result::{Ok, Err};
use vec::{OwnedVector, ImmutableVector, MutableVector};
use vec;
/// Wraps a Reader and buffers input from it
///
/// It can be excessively inefficient to work directly with a `Reader` or
/// `Writer`. Every call to `read` or `write` on `TcpStream` results in a
/// system call, for example. This module provides structures that wrap
/// `Readers`, `Writers`, and `Streams` and buffer input and output to them.
///
/// # Example
///
/// ```rust
/// use std::io::{BufferedReader, File};
///
/// let file = File::open(&Path::new("message.txt"));
/// let mut reader = BufferedReader::new(file);
///
/// let mut buf = [0, ..100];
/// match reader.read(buf) {
/// Ok(nread) => println!("Read {} bytes", nread),
/// Err(e) => println!("error reading: {}", e)
/// }
/// ```
pub struct BufferedReader<R> {
priv inner: R,
priv buf: ~[u8],
priv pos: uint,
priv cap: uint,
priv eof: bool,
}
impl<R: Reader> BufferedReader<R> {
/// Creates a new `BufferedReader` with the specified buffer capacity
pub fn with_capacity(cap: uint, inner: R) -> BufferedReader<R> {
// It's *much* faster to create an uninitialized buffer than it is to
// fill everything in with 0. This buffer is entirely an implementation
// detail and is never exposed, so we're safe to not initialize
// everything up-front. This allows creation of BufferedReader instances
// to be very cheap (large mallocs are not nearly as expensive as large
// callocs).
let mut buf = vec::with_capacity(cap);
unsafe { buf.set_len(cap); }
BufferedReader {
inner: inner,
buf: buf,
pos: 0,
cap: 0,
eof: false,
}
}
/// Creates a new `BufferedReader` with a default buffer capacity
pub fn new(inner: R) -> BufferedReader<R> {
BufferedReader::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Gets a reference to the underlying reader.
///
/// This type does not expose the ability to get a mutable reference to the
/// underlying reader because that could possibly corrupt the buffer.
pub fn get_ref<'a>(&'a self) -> &'a R { &self.inner }
/// Unwraps this buffer, returning the underlying reader.
///
/// Note that any leftover data in the internal buffer is lost.
pub fn unwrap(self) -> R { self.inner }
}
impl<R: Reader> Buffer for BufferedReader<R> {
fn fill<'a>(&'a mut self) -> IoResult<&'a [u8]> {
if self.pos == self.cap {
self.cap = try!(self.inner.read(self.buf));
self.pos = 0;
}
Ok(self.buf.slice(self.pos, self.cap))
}
fn consume(&mut self, amt: uint) {
self.pos += amt;
assert!(self.pos <= self.cap);
}
}
impl<R: Reader> Reader for BufferedReader<R> {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
let nread = {
let available = try!(self.fill());
let nread = cmp::min(available.len(), buf.len());
vec::bytes::copy_memory(buf, available.slice_to(nread));
nread
};
self.pos += nread;
Ok(nread)
}
}
/// Wraps a Writer and buffers output to it
///
/// This writer will be flushed when it is dropped.
///
/// # Example
///
/// ```rust
/// # #[allow(unused_must_use)];
/// use std::io::{BufferedWriter, File};
///
/// let file = File::open(&Path::new("message.txt"));
/// let mut writer = BufferedWriter::new(file);
///
/// writer.write_str("hello, world");
/// writer.flush();
/// ```
pub struct BufferedWriter<W> {
priv inner: Option<W>,
priv buf: ~[u8],
priv pos: uint
}
impl<W: Writer> BufferedWriter<W> {
/// Creates a new `BufferedWriter` with the specified buffer capacity
pub fn with_capacity(cap: uint, inner: W) -> BufferedWriter<W> {
// See comments in BufferedReader for why this uses unsafe code.
let mut buf = vec::with_capacity(cap);
unsafe { buf.set_len(cap); }
BufferedWriter {
inner: Some(inner),
buf: buf,
pos: 0
}
}
/// Creates a new `BufferedWriter` with a default buffer capacity
pub fn new(inner: W) -> BufferedWriter<W> {
BufferedWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
}
fn flush_buf(&mut self) -> IoResult<()> {
if self.pos != 0 {
let ret = self.inner.get_mut_ref().write(self.buf.slice_to(self.pos));
self.pos = 0;
ret
} else {
Ok(())
}
}
/// Gets a reference to the underlying writer.
///
/// This type does not expose the ability to get a mutable reference to the
/// underlying reader because that could possibly corrupt the buffer.
pub fn get_ref<'a>(&'a self) -> &'a W { self.inner.get_ref() }
/// Unwraps this buffer, returning the underlying writer.
///
/// The buffer is flushed before returning the writer.
pub fn unwrap(mut self) -> W {
// FIXME(#12628): is failing the right thing to do if flushing fails?
self.flush_buf().unwrap();
self.inner.take_unwrap()
}
}
impl<W: Writer> Writer for BufferedWriter<W> {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
if self.pos + buf.len() > self.buf.len() {
try!(self.flush_buf());
}
if buf.len() > self.buf.len() {
self.inner.get_mut_ref().write(buf)
} else {
let dst = self.buf.mut_slice_from(self.pos);
vec::bytes::copy_memory(dst, buf);
self.pos += buf.len();
Ok(())
}
}
fn flush(&mut self) -> IoResult<()> {
self.flush_buf().and_then(|()| self.inner.get_mut_ref().flush())
}
}
#[unsafe_destructor]
impl<W: Writer> Drop for BufferedWriter<W> {
fn drop(&mut self) {
if self.inner.is_some() {
// FIXME(#12628): should this error be ignored?
let _ = self.flush_buf();
}
}
}
/// Wraps a Writer and buffers output to it, flushing whenever a newline (`0x0a`,
/// `'\n'`) is detected.
///
/// This writer will be flushed when it is dropped.
pub struct LineBufferedWriter<W> {
priv inner: BufferedWriter<W>,
}
impl<W: Writer> LineBufferedWriter<W> {
/// Creates a new `LineBufferedWriter`
pub fn new(inner: W) -> LineBufferedWriter<W> {
// Lines typically aren't that long, don't use a giant buffer
LineBufferedWriter {
inner: BufferedWriter::with_capacity(1024, inner)
}
}
/// Gets a reference to the underlying writer.
///
/// This type does not expose the ability to get a mutable reference to the
/// underlying reader because that could possibly corrupt the buffer.
pub fn get_ref<'a>(&'a self) -> &'a W { self.inner.get_ref() }
/// Unwraps this buffer, returning the underlying writer.
///
/// The internal buffer is flushed before returning the writer.
pub fn unwrap(self) -> W { self.inner.unwrap() }
}
impl<W: Writer> Writer for LineBufferedWriter<W> {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
match buf.iter().rposition(|&b| b == '\n' as u8) {
Some(i) => {
try!(self.inner.write(buf.slice_to(i + 1)));
try!(self.inner.flush());
try!(self.inner.write(buf.slice_from(i + 1)));
Ok(())
}
None => self.inner.write(buf),
}
}
fn flush(&mut self) -> IoResult<()> { self.inner.flush() }
}
struct InternalBufferedWriter<W>(BufferedWriter<W>);
impl<W> InternalBufferedWriter<W> {
fn get_mut_ref<'a>(&'a mut self) -> &'a mut BufferedWriter<W> {
let InternalBufferedWriter(ref mut w) = *self;
return w;
}
}
impl<W: Reader> Reader for InternalBufferedWriter<W> {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
self.get_mut_ref().inner.get_mut_ref().read(buf)
}
}
/// Wraps a Stream and buffers input and output to and from it.
///
/// The output half will be flushed when this stream is dropped.
///
/// # Example
///
/// ```rust
/// # #[allow(unused_must_use)];
/// use std::io::{BufferedStream, File};
///
/// let file = File::open(&Path::new("message.txt"));
/// let mut stream = BufferedStream::new(file);
///
/// stream.write("hello, world".as_bytes());
/// stream.flush();
///
/// let mut buf = [0, ..100];
/// match stream.read(buf) {
/// Ok(nread) => println!("Read {} bytes", nread),
/// Err(e) => println!("error reading: {}", e)
/// }
/// ```
pub struct BufferedStream<S> {
priv inner: BufferedReader<InternalBufferedWriter<S>>
}
impl<S: Stream> BufferedStream<S> {
/// Creates a new buffered stream with explicitly listed capacities for the
/// reader/writer buffer.
pub fn with_capacities(reader_cap: uint, writer_cap: uint, inner: S)
-> BufferedStream<S> {
let writer = BufferedWriter::with_capacity(writer_cap, inner);
let internal_writer = InternalBufferedWriter(writer);
let reader = BufferedReader::with_capacity(reader_cap,
internal_writer);
BufferedStream { inner: reader }
}
/// Creates a new buffered stream with the default reader/writer buffer
/// capacities.
pub fn new(inner: S) -> BufferedStream<S> {
BufferedStream::with_capacities(DEFAULT_BUF_SIZE, DEFAULT_BUF_SIZE,
inner)
}
/// Gets a reference to the underlying stream.
///
/// This type does not expose the ability to get a mutable reference to the
/// underlying reader because that could possibly corrupt the buffer.
pub fn get_ref<'a>(&'a self) -> &'a S {
let InternalBufferedWriter(ref w) = self.inner.inner;
w.get_ref()
}
/// Unwraps this buffer, returning the underlying stream.
///
/// The internal buffer is flushed before returning the stream. Any leftover
/// data in the read buffer is lost.
pub fn unwrap(self) -> S {
let InternalBufferedWriter(w) = self.inner.inner;
w.unwrap()
}
}
impl<S: Stream> Buffer for BufferedStream<S> {
fn fill<'a>(&'a mut self) -> IoResult<&'a [u8]> { self.inner.fill() }
fn consume(&mut self, amt: uint) { self.inner.consume(amt) }
}
impl<S: Stream> Reader for BufferedStream<S> {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
self.inner.read(buf)
}
}
impl<S: Stream> Writer for BufferedStream<S> {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
self.inner.inner.get_mut_ref().write(buf)
}
fn flush(&mut self) -> IoResult<()> {
self.inner.inner.get_mut_ref().flush()
}
}
#[cfg(test)]
mod test {
extern crate test;
use io;
use prelude::*;
use super::*;
use super::super::mem::{MemReader, MemWriter, BufReader};
use Harness = self::test::BenchHarness;
/// A type, free to create, primarily intended for benchmarking creation of
/// wrappers that, just for construction, don't need a Reader/Writer that
/// does anything useful. Is equivalent to `/dev/null` in semantics.
#[deriving(Clone,Eq,Ord)]
pub struct NullStream;
impl Reader for NullStream {
fn read(&mut self, _: &mut [u8]) -> io::IoResult<uint> {
Err(io::standard_error(io::EndOfFile))
}
}
impl Writer for NullStream {
fn write(&mut self, _: &[u8]) -> io::IoResult<()> { Ok(()) }
}
/// A dummy reader intended at testing short-reads propagation.
pub struct ShortReader {
priv lengths: ~[uint],
}
impl Reader for ShortReader {
fn read(&mut self, _: &mut [u8]) -> io::IoResult<uint> {
match self.lengths.shift() {
Some(i) => Ok(i),
None => Err(io::standard_error(io::EndOfFile))
}
}
}
#[test]
fn test_buffered_reader() {
let inner = MemReader::new(~[0, 1, 2, 3, 4]);
let mut reader = BufferedReader::with_capacity(2, inner);
let mut buf = [0, 0, 0];
let nread = reader.read(buf);
assert_eq!(Ok(2), nread);
assert_eq!(buf.as_slice(), &[0, 1, 0]);
let mut buf = [0];
let nread = reader.read(buf);
assert_eq!(Ok(1), nread);
assert_eq!(buf.as_slice(), &[2]);
let mut buf = [0, 0, 0];
let nread = reader.read(buf);
assert_eq!(Ok(1), nread);
assert_eq!(buf.as_slice(), &[3, 0, 0]);
let nread = reader.read(buf);
assert_eq!(Ok(1), nread);
assert_eq!(buf.as_slice(), &[4, 0, 0]);
assert!(reader.read(buf).is_err());
}
#[test]
fn test_buffered_writer() {
let inner = MemWriter::new();
let mut writer = BufferedWriter::with_capacity(2, inner);
writer.write([0, 1]).unwrap();
assert_eq!(writer.get_ref().get_ref(), &[]);
writer.write([2]).unwrap();
assert_eq!(writer.get_ref().get_ref(), &[0, 1]);
writer.write([3]).unwrap();
assert_eq!(writer.get_ref().get_ref(), &[0, 1]);
writer.flush().unwrap();
assert_eq!(&[0, 1, 2, 3], writer.get_ref().get_ref());
writer.write([4]).unwrap();
writer.write([5]).unwrap();
assert_eq!(&[0, 1, 2, 3], writer.get_ref().get_ref());
writer.write([6]).unwrap();
assert_eq!(&[0, 1, 2, 3, 4, 5],
writer.get_ref().get_ref());
writer.write([7, 8]).unwrap();
assert_eq!(&[0, 1, 2, 3, 4, 5, 6],
writer.get_ref().get_ref());
writer.write([9, 10, 11]).unwrap();
assert_eq!(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
writer.get_ref().get_ref());
writer.flush().unwrap();
assert_eq!(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
writer.get_ref().get_ref());
}
#[test]
fn test_buffered_writer_inner_flushes() {
let mut w = BufferedWriter::with_capacity(3, MemWriter::new());
w.write([0, 1]).unwrap();
assert_eq!(&[], w.get_ref().get_ref());
let w = w.unwrap();
assert_eq!(&[0, 1], w.get_ref());
}
// This is just here to make sure that we don't infinite loop in the
// newtype struct autoderef weirdness
#[test]
fn test_buffered_stream() {
struct S;
impl io::Writer for S {
fn write(&mut self, _: &[u8]) -> io::IoResult<()> { Ok(()) }
}
impl io::Reader for S {
fn read(&mut self, _: &mut [u8]) -> io::IoResult<uint> {
Err(io::standard_error(io::EndOfFile))
}
}
let mut stream = BufferedStream::new(S);
let mut buf = [];
assert!(stream.read(buf).is_err());
stream.write(buf).unwrap();
stream.flush().unwrap();
}
#[test]
fn test_read_until() {
let inner = MemReader::new(~[0, 1, 2, 1, 0]);
let mut reader = BufferedReader::with_capacity(2, inner);
assert_eq!(reader.read_until(0), Ok(~[0]));
assert_eq!(reader.read_until(2), Ok(~[1, 2]));
assert_eq!(reader.read_until(1), Ok(~[1]));
assert_eq!(reader.read_until(8), Ok(~[0]));
assert!(reader.read_until(9).is_err());
}
#[test]
fn test_line_buffer() {
let mut writer = LineBufferedWriter::new(MemWriter::new());
writer.write([0]).unwrap();
assert_eq!(writer.get_ref().get_ref(), &[]);
writer.write([1]).unwrap();
assert_eq!(writer.get_ref().get_ref(), &[]);
writer.flush().unwrap();
assert_eq!(writer.get_ref().get_ref(), &[0, 1]);
writer.write([0, '\n' as u8, 1, '\n' as u8, 2]).unwrap();
assert_eq!(writer.get_ref().get_ref(),
&[0, 1, 0, '\n' as u8, 1, '\n' as u8]);
writer.flush().unwrap();
assert_eq!(writer.get_ref().get_ref(),
&[0, 1, 0, '\n' as u8, 1, '\n' as u8, 2]);
writer.write([3, '\n' as u8]).unwrap();
assert_eq!(writer.get_ref().get_ref(),
&[0, 1, 0, '\n' as u8, 1, '\n' as u8, 2, 3, '\n' as u8]);
}
#[test]
fn test_read_line() {
let in_buf = MemReader::new(bytes!("a\nb\nc").to_owned());
let mut reader = BufferedReader::with_capacity(2, in_buf);
assert_eq!(reader.read_line(), Ok(~"a\n"));
assert_eq!(reader.read_line(), Ok(~"b\n"));
assert_eq!(reader.read_line(), Ok(~"c"));
assert!(reader.read_line().is_err());
}
#[test]
fn test_lines() {
let in_buf = MemReader::new(bytes!("a\nb\nc").to_owned());
let mut reader = BufferedReader::with_capacity(2, in_buf);
let mut it = reader.lines();
assert_eq!(it.next(), Some(Ok(~"a\n")));
assert_eq!(it.next(), Some(Ok(~"b\n")));
assert_eq!(it.next(), Some(Ok(~"c")));
assert_eq!(it.next(), None);
}
#[test]
fn test_short_reads() {
let inner = ShortReader{lengths: ~[0, 1, 2, 0, 1, 0]};
let mut reader = BufferedReader::new(inner);
let mut buf = [0, 0];
assert_eq!(reader.read(buf), Ok(0));
assert_eq!(reader.read(buf), Ok(1));
assert_eq!(reader.read(buf), Ok(2));
assert_eq!(reader.read(buf), Ok(0));
assert_eq!(reader.read(buf), Ok(1));
assert_eq!(reader.read(buf), Ok(0));
assert!(reader.read(buf).is_err());
}
#[test]
fn read_char_buffered() {
let buf = [195u8, 159u8];
let mut reader = BufferedReader::with_capacity(1, BufReader::new(buf));
assert_eq!(reader.read_char(), Ok('ß'));
}
#[test]
fn test_chars() {
let buf = [195u8, 159u8, 'a' as u8];
let mut reader = BufferedReader::with_capacity(1, BufReader::new(buf));
let mut it = reader.chars();
assert_eq!(it.next(), Some(Ok('ß')));
assert_eq!(it.next(), Some(Ok('a')));
assert_eq!(it.next(), None);
}
#[bench]
fn bench_buffered_reader(bh: &mut Harness) {
bh.iter(|| {
BufferedReader::new(NullStream)
});
}
#[bench]
fn bench_buffered_writer(bh: &mut Harness) {
bh.iter(|| {
BufferedWriter::new(NullStream)
});
}
#[bench]
fn bench_buffered_stream(bh: &mut Harness) {
bh.iter(|| {
BufferedStream::new(NullStream);
});
}
}
| 31.989967 | 82 | 0.56827 |
1a62f51badb835aa8bcb38a1f2ff8813ff7e4e4c | 3,240 | // pathfinder/demo/magicleap/src/mocked_c_api.rs
//
// Copyright © 2019 The Pathfinder Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A mocked Rust implementation of the Magic Leap C API, to allow it to build without the ML SDK
#![allow(unused_variables)]
#![allow(dead_code)]
#![allow(non_snake_case)]
use crate::c_api::MLCoordinateFrameUID;
use crate::c_api::MLGraphicsClipExtentsInfoArray;
use crate::c_api::MLGraphicsFrameParams;
use crate::c_api::MLGraphicsOptions;
use crate::c_api::MLGraphicsRenderTargetsInfo;
use crate::c_api::MLGraphicsVirtualCameraInfoArray;
use crate::c_api::MLHandle;
use crate::c_api::MLHeadTrackingStaticData;
use crate::c_api::MLLogLevel;
use crate::c_api::MLResult;
use crate::c_api::MLSnapshotPtr;
use crate::c_api::MLTransform;
use std::os::raw::c_char;
pub unsafe fn MLGraphicsCreateClientGL(options: *const MLGraphicsOptions, gl_context: MLHandle, graphics_client : &mut MLHandle) -> MLResult {
unimplemented!()
}
pub unsafe fn MLGraphicsDestroyClient(graphics_client: *mut MLHandle) -> MLResult {
unimplemented!()
}
pub unsafe fn MLHeadTrackingCreate(tracker: *mut MLHandle) -> MLResult {
unimplemented!()
}
pub unsafe fn MLHeadTrackingGetStaticData(head_tracker: MLHandle, data: *mut MLHeadTrackingStaticData) -> MLResult {
unimplemented!()
}
pub unsafe fn MLPerceptionGetSnapshot(snapshot: *mut MLSnapshotPtr) -> MLResult {
unimplemented!()
}
pub unsafe fn MLSnapshotGetTransform(snapshot: MLSnapshotPtr, id: *const MLCoordinateFrameUID, transform: *mut MLTransform) -> MLResult {
unimplemented!()
}
pub unsafe fn MLPerceptionReleaseSnapshot(snapshot: MLSnapshotPtr) -> MLResult {
unimplemented!()
}
pub unsafe fn MLLifecycleSetReadyIndication() -> MLResult {
unimplemented!()
}
pub unsafe fn MLGraphicsGetClipExtents(graphics_client: MLHandle, array: *mut MLGraphicsClipExtentsInfoArray) -> MLResult {
unimplemented!()
}
pub unsafe fn MLGraphicsGetRenderTargets(graphics_client: MLHandle, targets: *mut MLGraphicsRenderTargetsInfo) -> MLResult {
unimplemented!()
}
pub unsafe fn MLGraphicsInitFrameParams(params: *mut MLGraphicsFrameParams) -> MLResult {
unimplemented!()
}
pub unsafe fn MLGraphicsBeginFrame(graphics_client: MLHandle, params: *const MLGraphicsFrameParams, frame_handle: *mut MLHandle, virtual_camera_array: *mut MLGraphicsVirtualCameraInfoArray) -> MLResult {
unimplemented!()
}
pub unsafe fn MLGraphicsEndFrame(graphics_client: MLHandle, frame_handle: MLHandle) -> MLResult {
unimplemented!()
}
pub unsafe fn MLGraphicsSignalSyncObjectGL(graphics_client: MLHandle, sync_object: MLHandle) -> MLResult {
unimplemented!()
}
pub unsafe fn MLGetResultString(result_code: MLResult) -> *const c_char {
unimplemented!()
}
pub unsafe fn MLLoggingLogLevelIsEnabled(lvl: MLLogLevel) -> bool {
unimplemented!()
}
pub unsafe fn MLLoggingLog(lvl: MLLogLevel, tag: *const c_char, message: *const c_char) {
unimplemented!()
}
| 32.727273 | 203 | 0.766667 |
182807513e8ad4cb2cde17909945a4eaf52db95c | 4,184 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::CPUIRQSEL23 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = "Possible values of the field `EV`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EVR {
#[doc = "CRYPTO result available interupt event, the corresponding flag is found here CRYPTO:IRQSTAT.RESULT_AVAIL. Controlled by CRYPTO:IRQSTAT.RESULT_AVAIL"]
CRYPTO_RESULT_AVAIL_IRQ,
#[doc = r" Reserved"]
_Reserved(u8),
}
impl EVR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
EVR::CRYPTO_RESULT_AVAIL_IRQ => 93,
EVR::_Reserved(bits) => bits,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> EVR {
match value {
93 => EVR::CRYPTO_RESULT_AVAIL_IRQ,
i => EVR::_Reserved(i),
}
}
#[doc = "Checks if the value of the field is `CRYPTO_RESULT_AVAIL_IRQ`"]
#[inline]
pub fn is_crypto_result_avail_irq(&self) -> bool {
*self == EVR::CRYPTO_RESULT_AVAIL_IRQ
}
}
#[doc = "Values that can be written to the field `EV`"]
pub enum EVW {
#[doc = "CRYPTO result available interupt event, the corresponding flag is found here CRYPTO:IRQSTAT.RESULT_AVAIL. Controlled by CRYPTO:IRQSTAT.RESULT_AVAIL"]
CRYPTO_RESULT_AVAIL_IRQ,
}
impl EVW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
EVW::CRYPTO_RESULT_AVAIL_IRQ => 93,
}
}
}
#[doc = r" Proxy"]
pub struct _EVW<'a> {
w: &'a mut W,
}
impl<'a> _EVW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: EVW) -> &'a mut W {
unsafe { self.bits(variant._bits()) }
}
#[doc = "CRYPTO result available interupt event, the corresponding flag is found here CRYPTO:IRQSTAT.RESULT_AVAIL. Controlled by CRYPTO:IRQSTAT.RESULT_AVAIL"]
#[inline]
pub fn crypto_result_avail_irq(self) -> &'a mut W {
self.variant(EVW::CRYPTO_RESULT_AVAIL_IRQ)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 127;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:6 - 6:0\\] Read only selection value"]
#[inline]
pub fn ev(&self) -> EVR {
EVR::_from({
const MASK: u8 = 127;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 93 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:6 - 6:0\\] Read only selection value"]
#[inline]
pub fn ev(&mut self) -> _EVW {
_EVW { w: self }
}
}
| 27.708609 | 162 | 0.548279 |
3939c36dd1373eb44a6cf627a9af68f04cf090a4 | 5,679 | use std::fs;
use std::io::Read;
use serde_json;
use search::backends::rocksdb::RocksDBStore;
use uuid::Uuid;
use index::Index;
use index::metadata::IndexMetadata;
use index::metadata::parse::parse as parse_index_metadata;
use api::persistent;
use api::iron::prelude::*;
use api::iron::status;
use api::router::Router;
use api::utils::json_response;
pub fn view_get_index(req: &mut Request) -> IronResult<Response> {
let ref system = get_system!(req);
let ref index_name = read_path_parameter!(req, "index").unwrap_or("");
// Get index
let cluster_metadata = system.metadata.read().unwrap();
let index = get_index_or_404!(cluster_metadata, *index_name);
// Serialise index metadata
let json = {
match serde_json::to_value(&index.metadata) {
Ok(json) => json,
Err(_) => {
return Ok(json_response(status::InternalServerError, json!({
"message": "unable to serialise index metadata"
})));
}
}
};
return Ok(json_response(status::Ok, json));
}
pub fn view_put_index(req: &mut Request) -> IronResult<Response> {
let ref system = get_system!(req);
let ref index_name = read_path_parameter!(req, "index").unwrap_or("");
// Lock cluster metadata
let mut cluster_metadata = system.metadata.write().unwrap();
// Find index
let index_ref = cluster_metadata.names.find_canonical(&index_name);
match index_ref {
Some(_) => {
// Update existing index
// TODO
info!(system.log, "updated index"; "index" => *index_name);
}
None => {
// Load metadata
let mut metadata = IndexMetadata::default();
match json_from_request_body!(req).map(|data| parse_index_metadata(&mut metadata, data)) {
Some(Ok(())) | None => {}
Some(Err(_)) => {
// TODO: better error
return Ok(json_response(status::BadRequest, json!({"message": "Couldn't parse index settings"})));
}
}
// Create index
let mut indices_dir = system.get_indices_dir();
indices_dir.push(index_name);
let index = Index::new(Uuid::new_v4(), index_name.clone().to_owned(), metadata, RocksDBStore::create(indices_dir).unwrap());
index.metadata.read().unwrap().save(index.metadata_path()).unwrap();
let index_ref = cluster_metadata.insert_index(index);
// If there's an alias with the new indexes name, delete it.
let alias_deleted = cluster_metadata.names.delete_alias_whole(index_name).unwrap();
if alias_deleted {
info!(system.log, "deleted alias"; "alias" => format!("{}", index_name), "reason" => "replaced by index");
}
// Register canonical name
cluster_metadata.names.insert_canonical(index_name.clone().to_owned(), index_ref).unwrap();
info!(system.log, "created index"; "index" => *index_name);
}
}
return Ok(json_response(status::Ok, json!({"acknowledged": true})));
}
pub fn view_delete_index(req: &mut Request) -> IronResult<Response> {
let ref system = get_system!(req);
let ref index_selector = read_path_parameter!(req, "index").unwrap_or("");
// Lock cluster metadata
let mut cluster_metadata = system.metadata.write().unwrap();
// Make sure the index exists
get_index_or_404!(cluster_metadata, *index_selector);
// Remove indices
for index_ref in cluster_metadata.names.find(*index_selector) {
// Get the index name
let index_name = {
if let Some(index) = cluster_metadata.indices.get(&index_ref) {
index.canonical_name().to_string()
} else {
// Index doesn't exist
continue;
}
};
// Remove index from array
cluster_metadata.indices.remove(&index_ref);
// Delete canonical name
cluster_metadata.names.delete_canonical(&index_name, index_ref).unwrap();
// Delete file
let mut indices_dir = system.get_indices_dir();
indices_dir.push(&index_name);
match fs::remove_dir_all(&indices_dir) {
Ok(()) => {},
Err(e) => {
warn!(system.log, "failed to delete index data"; "index" => format!("{}", index_name), "error" => format!("{}", e));
}
}
info!(system.log, "deleted index"; "index" => index_name);
// Delete aliases
let alias_names = cluster_metadata.names.iter_index_aliases(index_ref).map(|n| n.to_string()).collect::<Vec<String>>();
for alias_name in alias_names {
let alias_deleted = cluster_metadata.names.delete_alias(&alias_name, index_ref).unwrap();
// If this was the only index being referenced by the alias, the alias would be deleted
if alias_deleted {
info!(system.log, "deleted alias"; "alias" => format!("{}", alias_name), "reason" => "no indices left");
}
}
}
return Ok(json_response(status::Ok, json!({"acknowledged": true})));
}
pub fn view_post_refresh_index(_req: &mut Request) -> IronResult<Response> {
// let ref system = get_system!(req);
// let ref index_name = read_path_parameter!(req, "index").unwrap_or("");
// Lock index array
// TODO
// let mut indices = system.indices.write().unwrap();
// TODO: {"_shards":{"total":10,"successful":5,"failed":0}}
return Ok(json_response(status::Ok, json!({"acknowledged": true})));
}
| 35.055556 | 136 | 0.599753 |
bff7e234385854a60e875b429e1daee12e24f2e6 | 267 | //! Module defining the data structures for the PoE API responses.
pub mod currency;
mod item;
mod label;
mod price;
mod stash;
pub use self::currency::Currency;
pub use self::item::*;
pub use self::label::Label;
pub use self::price::Price;
pub use self::stash::*;
| 19.071429 | 66 | 0.719101 |
9bf5b51bd72a9d3bda76dfaa3c9aa4bb3a593eb6 | 21,034 | // Copyright 2020 Ant Group. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
//! File node for RAFS format
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::ffi::{OsStr, OsString};
use std::fmt;
use std::fs::{self, File};
use std::io::prelude::*;
use std::os::linux::fs::MetadataExt;
use std::os::unix::ffi::OsStrExt;
use std::path::{Component, Path, PathBuf};
use std::str;
use std::str::FromStr;
use nix::sys::stat;
use rafs::RafsIoWriter;
use anyhow::{Context, Error, Result};
use sha2::digest::Digest;
use sha2::Sha256;
use nydus_utils::{
digest::{self, DigestHasher, RafsDigest},
div_round_up, try_round_up_4k, ByteSize,
};
use super::blob::BlobBufferWriter;
use rafs::metadata::layout::*;
use rafs::metadata::*;
use storage::compress;
const ROOT_PATH_NAME: &[u8] = &[b'/'];
pub const OCISPEC_WHITEOUT_PREFIX: &str = ".wh.";
pub const OCISPEC_WHITEOUT_OPAQUE: &str = ".wh..wh..opq";
pub const OVERLAYFS_WHITEOUT_OPAQUE: &str = "trusted.overlay.opaque";
#[derive(Clone, Debug, PartialEq)]
pub enum WhiteoutType {
OciOpaque,
OciRemoval,
OverlayFsOpaque,
OverlayFsRemoval,
}
impl WhiteoutType {
pub fn is_removal(&self) -> bool {
*self == WhiteoutType::OciRemoval || *self == WhiteoutType::OverlayFsRemoval
}
}
#[derive(PartialEq)]
pub enum WhiteoutSpec {
/// https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts
Oci,
/// "whiteouts and opaque directories" in https://www.kernel.org/doc/Documentation/filesystems/overlayfs.txt
Overlayfs,
}
impl FromStr for WhiteoutSpec {
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
match s {
"oci" => Ok(Self::Oci),
"overlayfs" => Ok(Self::Overlayfs),
_ => Err(anyhow!("invalid whiteout spec")),
}
}
}
#[allow(dead_code)]
#[derive(Clone, Debug, PartialEq)]
pub enum Overlay {
Lower,
UpperAddition,
UpperOpaque,
UpperRemoval,
UpperModification,
}
impl Overlay {
pub fn lower_layer(&self) -> bool {
self == &Overlay::Lower
}
}
impl fmt::Display for Overlay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Overlay::Lower => write!(f, "LOWER"),
Overlay::UpperAddition => write!(f, "ADDED"),
Overlay::UpperOpaque => write!(f, "OPAQUED"),
Overlay::UpperRemoval => write!(f, "REMOVED"),
Overlay::UpperModification => write!(f, "MODIFIED"),
}
}
}
#[derive(Default)]
pub struct ChunkCountMap {
/// Store the number of chunks in blob, it's HashMap<blob_index, chunk_count>.
chunks: HashMap<u32, u32>,
}
impl ChunkCountMap {
/// Allocate a count index sequentially by the index of blob table.
pub fn alloc_index(&mut self, blob_index: u32) -> Result<u32> {
match self.chunks.entry(blob_index) {
Entry::Occupied(entry) => {
let chunk_count = entry.into_mut();
let index = *chunk_count;
*chunk_count = index.checked_add(1).ok_or_else(|| {
Error::msg("the number of chunks in blob exceeds the u32 limit")
})?;
Ok(index)
}
Entry::Vacant(entry) => {
entry.insert(1);
Ok(0)
}
}
}
/// Get the number of counts in a blob by the index of blob table.
pub fn count(&self, blob_index: u32) -> Option<&u32> {
self.chunks.get(&blob_index)
}
}
impl fmt::Display for Node {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{} {:?}: index {} ino {} real_ino {} i_parent {} child_index {} child_count {} i_nlink {} i_size {} i_name_size {} i_symlink_size {} has_xattr {} link {:?}",
self.file_type(),
self.rootfs(),
self.index,
self.inode.i_ino,
self.real_ino,
self.inode.i_parent,
self.inode.i_child_index,
self.inode.i_child_count,
self.inode.i_nlink,
self.inode.i_size,
self.inode.i_name_size,
self.inode.i_symlink_size,
self.inode.has_xattr(),
self.symlink,
)
}
}
#[derive(Clone)]
pub struct Node {
pub index: u64,
/// Inode number in local filesystem
pub real_ino: Inode,
/// dev number is required because a source root directory can have multiple
/// partitions mounted. Files from different partition can have unique inode number.
pub dev: u64,
/// device ID (if special file), describes the device that this file (inode) represents.
pub rdev: u64,
/// Overlay type for layered build
pub overlay: Overlay,
/// Absolute path to root directory where start to build image.
/// For example: /home/source
pub source: PathBuf,
/// Absolute path to each file within build context directory.
/// Together with `source`, we can easily get relative path to `source`.
/// For example: /home/source/foo/bar
pub path: PathBuf,
/// Define a disk inode structure to persist to disk.
pub inode: OndiskInode,
/// Chunks info list of regular file
pub chunks: Vec<OndiskChunkInfo>,
/// Symlink info of symlink file
pub symlink: Option<OsString>,
/// Xattr list of file
pub xattrs: XAttrs,
pub explicit_uidgid: bool,
}
impl Node {
pub fn new(
source: PathBuf,
path: PathBuf,
overlay: Overlay,
explicit_uidgid: bool,
) -> Result<Node> {
let mut node = Node {
index: 0,
real_ino: 0,
dev: u64::MAX,
rdev: u64::MAX,
source,
path,
overlay,
inode: OndiskInode::new(),
chunks: Vec::new(),
symlink: None,
xattrs: XAttrs::default(),
explicit_uidgid,
};
node.build_inode().context("failed to build inode")?;
Ok(node)
}
fn build_inode_xattr(&mut self) -> Result<()> {
let file_xattrs = match xattr::list(&self.path) {
Ok(x) => x,
Err(e) => {
if e.raw_os_error() == Some(libc::EOPNOTSUPP) {
return Ok(());
} else {
return Err(anyhow!("failed to list xattr of {:?}", self.path));
}
}
};
for key in file_xattrs {
let value = xattr::get(&self.path, &key)
.context(format!("failed to get xattr {:?} of {:?}", key, self.path))?;
self.xattrs.add(key, value.unwrap_or_default());
}
if !self.xattrs.is_empty() {
self.inode.i_flags |= RafsInodeFlags::XATTR;
}
Ok(())
}
pub fn remove_xattr(&mut self, key: &OsStr) {
self.xattrs.remove(key);
if self.xattrs.is_empty() {
self.inode.i_flags.remove(RafsInodeFlags::XATTR);
}
}
#[allow(clippy::too_many_arguments)]
pub fn dump_blob(
&mut self,
blob_writer: &mut BlobBufferWriter,
blob_hash: &mut Sha256,
compress_offset: &mut u64,
decompress_offset: &mut u64,
blob_cache_size: &mut u64,
compressed_blob_size: &mut u64,
chunk_cache: &mut HashMap<RafsDigest, OndiskChunkInfo>,
chunk_count_map: &mut ChunkCountMap,
compressor: compress::Algorithm,
digester: digest::Algorithm,
blob_index: u32,
aligned_chunk: bool,
) -> Result<usize> {
if self.is_dir() {
return Ok(0);
}
if self.is_symlink() {
self.inode.i_digest =
RafsDigest::from_buf(self.symlink.as_ref().unwrap().as_bytes(), digester);
return Ok(0);
} else if self.is_special() {
self.inode.i_digest = RafsDigest::hasher(digester).digest_finalize();
return Ok(0);
}
let file_size = self.inode.i_size;
let mut blob_size = 0usize;
let mut inode_hasher = RafsDigest::hasher(digester);
let mut file = File::open(&self.path)
.with_context(|| format!("failed to open node file {:?}", self.path))?;
for i in 0..self.inode.i_child_count {
// Init chunk info
let mut chunk = OndiskChunkInfo::new();
// FIXME: Should not assume that block size must be the default one.
// Use the configured value instead!
let file_offset = i as u64 * RAFS_DEFAULT_BLOCK_SIZE;
let chunk_size = if i == self.inode.i_child_count - 1 {
file_size - (RAFS_DEFAULT_BLOCK_SIZE * i as u64)
} else {
RAFS_DEFAULT_BLOCK_SIZE
};
// Read chunk data
// TODO: Hopefully, we don't have to allocate memory from heap each time.
// and the `usize` type restriction won't bother us anymore.
let mut chunk_data = vec![0; chunk_size as usize];
file.read_exact(&mut chunk_data)
.with_context(|| format!("failed to read node file {:?}", self.path))?;
// Calculate chunk digest
// TODO: check for hole chunks. One possible way is to always save
// a global hole chunk and check for digest duplication
chunk.block_id = RafsDigest::from_buf(chunk_data.as_slice(), digester);
// Calculate inode digest
inode_hasher.digest_update(chunk.block_id.as_ref());
// Deduplicate chunk if we found a same one from chunk cache
if let Some(cached_chunk) = chunk_cache.get(&chunk.block_id) {
// hole cached_chunk can have zero decompress size
if cached_chunk.decompress_size == 0
|| cached_chunk.decompress_size == chunk_size as u32
{
chunk.clone_from(&cached_chunk);
chunk.file_offset = file_offset;
self.chunks.push(chunk);
trace!(
"\t\tbuilding duplicated chunk: {} compressor {}",
chunk,
compressor
);
// The chunks of hardlink should be always deduplicated, so don't
// trace this situation here.
if !self.is_hardlink() {
event_tracer!("dedup_decompressed_size", +chunk_size);
event_tracer!("dedup_chunks", +1);
}
continue;
}
}
// Compress chunk data
let (compressed, is_compressed) = compress::compress(&chunk_data, compressor)
.with_context(|| format!("failed to compress node file {:?}", self.path))?;
let compressed_size = compressed.len();
if is_compressed {
chunk.flags |= RafsChunkFlags::COMPRESSED;
}
chunk.blob_index = blob_index;
chunk.file_offset = file_offset;
chunk.compress_offset = *compress_offset;
chunk.decompress_offset = *decompress_offset;
chunk.compress_size = compressed_size as u32;
chunk.decompress_size = chunk_size as u32;
chunk.index = chunk_count_map.alloc_index(blob_index)?;
blob_size += compressed_size;
// Move cursor to offset of next chunk
*compress_offset += compressed_size as u64;
let aligned_chunk_size = if aligned_chunk {
// Safe to unwrap since we can't have such a large chunk
// and conversion between u64 values is safe.
try_round_up_4k(chunk_size).unwrap()
} else {
chunk_size
};
*blob_cache_size = *decompress_offset + chunk_size;
*compressed_blob_size += compressed_size as u64;
*decompress_offset += aligned_chunk_size;
// Calculate blob hash
blob_hash.update(&compressed);
// Dump compressed chunk data to blob
event_tracer!("blob_decompressed_size", +chunk_size);
event_tracer!("blob_compressed_size", +compressed_size);
blob_writer
.write_all(&compressed)
.context("failed to write blob")?;
// Cache chunk digest info
chunk_cache.insert(chunk.block_id, chunk);
self.chunks.push(chunk);
trace!("\t\tbuilding chunk: {} compressor {}", chunk, compressor,);
}
// Finish inode digest calculation
self.inode.i_digest = inode_hasher.digest_finalize();
Ok(blob_size)
}
pub fn dump_bootstrap(&mut self, f_bootstrap: &mut RafsIoWriter) -> Result<usize> {
let mut node_size = 0;
// Dump inode info
let name = self.name();
let inode = OndiskInodeWrapper {
name,
symlink: self.symlink.as_deref(),
inode: &self.inode,
};
let inode_size = inode
.store(f_bootstrap)
.context("failed to dump inode to bootstrap")?;
node_size += inode_size;
// Dump inode xattr
if !self.xattrs.is_empty() {
let xattr_size = self
.xattrs
.store(f_bootstrap)
.context("failed to dump xattr to bootstrap")?;
node_size += xattr_size;
}
// Dump chunk info
if self.is_reg() && self.inode.i_child_count as usize != self.chunks.len() {
bail!("invalid chunks count {}: {}", self.chunks.len(), self);
}
for chunk in &mut self.chunks {
let chunk_size = chunk
.store(f_bootstrap)
.context("failed to dump chunk info to bootstrap")?;
node_size += chunk_size;
}
Ok(node_size)
}
fn build_inode_stat(&mut self) -> Result<()> {
let meta = self.meta()?;
self.inode.i_mode = meta.st_mode();
if self.explicit_uidgid {
self.inode.i_uid = meta.st_uid();
self.inode.i_gid = meta.st_gid();
}
self.inode.i_mtime = meta.st_mtime() as u64;
self.inode.i_mtime_nsec = meta.st_mtime_nsec() as u32;
self.inode.i_projid = 0;
self.inode.i_size = meta.st_size();
// Ignore actual nlink value and calculate from rootfs directory instead
self.inode.i_nlink = 1;
// Xattr paris are located into bootstrap rather than blob, however, we should
// also reflect the size they consume like other file system. We don't
// directly use local file's metadata for `i_block` since we are purchasing
// "reproducible build" which means nydus image can be built from anywhere with
// the unique image built.
// TODO: The real size occupied within blob is compressed. Therefore, the
// sum of all chunks' size should be more accurate. But we don't know the size
// right now since compression is not acted yet. Try to make this accurate later.
self.inode.i_blocks =
div_round_up(self.inode.i_size + self.xattrs.aligned_size() as u64, 512);
self.inode.i_rdev = meta.st_rdev() as u32;
self.real_ino = meta.st_ino();
self.dev = meta.st_dev();
self.rdev = meta.st_rdev();
Ok(())
}
fn build_inode(&mut self) -> Result<()> {
self.inode.set_name_size(self.name().byte_size());
// NOTE: Always retrieve xattr before attr so that we can know
// the size of xattr pairs.
self.build_inode_xattr()?;
self.build_inode_stat()
.with_context(|| format!("failed to build inode {:?}", self.path))?;
if self.is_reg() {
self.inode.i_child_count = self.chunk_count() as u32;
} else if self.is_symlink() {
self.inode.i_flags |= RafsInodeFlags::SYMLINK;
let target_path = fs::read_link(&self.path)?;
self.symlink = Some(target_path.into());
self.inode
.set_symlink_size(self.symlink.as_ref().unwrap().byte_size());
}
Ok(())
}
pub fn meta(&self) -> Result<impl MetadataExt> {
self.path
.symlink_metadata()
.with_context(|| format!("failed to get metadata from {:?}", self.path))
}
/// Generate the path relative to original rootfs.
/// For example:
/// `/absolute/path/to/rootfs/file` after converting `/file`
pub fn rootfs(&self) -> PathBuf {
if let Ok(rootfs) = self.path.strip_prefix(&self.source) {
Path::new("/").join(rootfs)
} else {
// Compatible with path `/`
self.path.clone()
}
}
pub fn is_dir(&self) -> bool {
self.inode.i_mode & libc::S_IFMT == libc::S_IFDIR
}
pub fn is_symlink(&self) -> bool {
self.inode.i_mode & libc::S_IFMT == libc::S_IFLNK
}
pub fn is_reg(&self) -> bool {
self.inode.i_mode & libc::S_IFMT == libc::S_IFREG
}
pub fn is_special(&self) -> bool {
self.inode.i_mode & (libc::S_IFBLK | libc::S_IFCHR | libc::S_IFIFO) != 0
}
pub fn is_hardlink(&self) -> bool {
self.inode.i_nlink > 1
}
pub fn chunk_count(&self) -> usize {
if !self.is_reg() {
return 0;
}
div_round_up(self.inode.i_size, RAFS_DEFAULT_BLOCK_SIZE) as usize
}
pub fn file_type(&self) -> &str {
let mut file_type = "";
if self.is_symlink() {
file_type = "symlink";
} else if self.is_dir() {
file_type = "dir"
} else if self.is_reg() {
if self.is_hardlink() {
file_type = "hardlink";
} else {
file_type = "file";
}
}
file_type
}
pub fn name(&self) -> &OsStr {
if self.path == self.source {
OsStr::from_bytes(ROOT_PATH_NAME)
} else {
// Safe to unwrap because `path` should be returned from `path()` which is canonicalized
self.path.file_name().unwrap()
}
}
pub fn origin_name(&self, t: &WhiteoutType) -> Option<&OsStr> {
if let Some(name) = self.name().to_str() {
if *t == WhiteoutType::OciRemoval {
// the whiteout filename prefixes the basename of the path to be deleted with ".wh.".
return Some(OsStr::from_bytes(
name[OCISPEC_WHITEOUT_PREFIX.len()..].as_bytes(),
));
} else if *t == WhiteoutType::OverlayFsRemoval {
// the whiteout file has the same name as the file to be deleted.
return Some(name.as_ref());
}
}
None
}
pub fn path_vec(&self) -> Vec<OsString> {
self.rootfs()
.components()
.map(|comp| match comp {
Component::RootDir => OsString::from("/"),
Component::Normal(name) => name.to_os_string(),
_ => OsString::new(),
})
.collect::<Vec<_>>()
}
pub fn is_overlayfs_whiteout(&self, spec: &WhiteoutSpec) -> bool {
if *spec != WhiteoutSpec::Overlayfs {
return false;
}
(self.inode.i_mode & libc::S_IFMT == libc::S_IFCHR)
&& stat::major(self.rdev) == 0
&& stat::minor(self.rdev) == 0
}
pub fn is_overlayfs_opaque(&self, spec: &WhiteoutSpec) -> bool {
if *spec != WhiteoutSpec::Overlayfs {
return false;
}
// A directory is made opaque by setting the xattr
// "trusted.overlay.opaque" to "y".
if let Some(v) = self.xattrs.get(&OsString::from(OVERLAYFS_WHITEOUT_OPAQUE)) {
if let Ok(v) = std::str::from_utf8(v.as_slice()) {
return v == "y";
}
}
false
}
pub fn whiteout_type(&self, spec: &WhiteoutSpec) -> Option<WhiteoutType> {
if self.overlay == Overlay::Lower {
return None;
}
match spec {
WhiteoutSpec::Oci => {
if let Some(name) = self.name().to_str() {
if name == OCISPEC_WHITEOUT_OPAQUE {
return Some(WhiteoutType::OciOpaque);
} else if name.starts_with(OCISPEC_WHITEOUT_PREFIX) {
return Some(WhiteoutType::OciRemoval);
}
}
}
WhiteoutSpec::Overlayfs => {
if self.is_overlayfs_whiteout(spec) {
return Some(WhiteoutType::OverlayFsRemoval);
} else if self.is_overlayfs_opaque(spec) {
return Some(WhiteoutType::OverlayFsOpaque);
}
}
}
None
}
}
| 33.229068 | 170 | 0.55415 |
8915334d950a71614db334a6e0a1322dfd5ee095 | 847 | use pyo3::prelude::*;
use wasmer_engines::OpaqueCompiler;
/// The LLVM compiler, designed for the `wasmer` Python package (a
/// WebAssembly runtime).
///
/// Please check the documentation of `wasmer.engine` to learn more.
#[pymodule]
fn wasmer_compiler_llvm(_py: Python, module: &PyModule) -> PyResult<()> {
module.add_class::<Compiler>()?;
Ok(())
}
/// The LLVM compiler.
///
/// ## Example
///
/// ```py
/// from wasmer import engine, Store
/// from wasmer_compiler_cranelift import Compiler
///
/// store = Store(engine.JIT(Compiler))
/// ```
#[pyclass]
struct Compiler {}
#[pymethods]
impl Compiler {
/// Please don't use it. Internal use only.
#[staticmethod]
fn into_opaque_compiler() -> OpaqueCompiler {
OpaqueCompiler::raw_with_compiler(wasmer_compiler_llvm::LLVM::default(), "llvm".to_string())
}
}
| 22.891892 | 100 | 0.668241 |
118ec7c6038591917992865b20ec904048823baf | 1,780 | // Copyright 2019 Jeremy Wall
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::btree_map;
use std::collections::BTreeMap;
use std::path::PathBuf;
use std::rc::Rc;
use super::translate::OpsMap;
use super::Error;
use super::OpPointer;
/// A Cache of Op codes.
pub struct Ops {
ops: BTreeMap<PathBuf, Rc<OpsMap>>,
}
impl Ops {
pub fn new() -> Self {
Self {
ops: BTreeMap::new(),
}
}
pub fn entry<'a, P: Into<PathBuf>>(&'a mut self, path: P) -> Entry<'a> {
Entry(self.ops.entry(path.into()))
}
}
pub struct Entry<'a>(btree_map::Entry<'a, PathBuf, Rc<OpsMap>>);
impl<'a> Entry<'a> {
pub fn get_pointer_or_else<F: FnOnce() -> Result<OpsMap, Error>, P: Into<PathBuf>>(
self,
f: F,
path: P,
) -> Result<OpPointer, Error> {
let cached = match self.0 {
btree_map::Entry::Occupied(e) => e.get().clone(),
btree_map::Entry::Vacant(e) => {
// TODO(jwall) Check a file cache for the opcodes before
let v = Rc::new(f()?);
e.insert(v.clone());
v
}
};
let mut ptr = OpPointer::new(cached);
ptr.set_path(path.into());
Ok(ptr)
}
}
| 28.709677 | 87 | 0.597753 |
e938b736be039a46010672b05e79a45767eaca79 | 3,170 | // Copyright lowRISC contributors.
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
//! Schema for configuration files, exact encoding json/xml to be worked out.
use crate::io::gpio::{PinMode, PullMode};
use serde::Deserialize;
/// Configuration of a particular GPIO pin.
#[derive(Deserialize, Clone, Debug)]
pub struct PinConfiguration {
/// The user-visible name of the GPIO pin.
pub name: String,
/// The input/output mode of the GPIO pin.
pub mode: Option<PinMode>,
/// The default/initial level of the pin (true means high).
pub level: Option<bool>,
/// Whether the pin has pullup/down resistor enabled.
pub pull_mode: Option<PullMode>,
/// Name of a pin defined by the transport (or a lower level
/// PinConfiguration).
pub alias_of: Option<String>,
}
/// Configuration of a particular GPIO pin.
#[derive(Deserialize, Clone, Debug)]
pub struct StrappingConfiguration {
/// The user-visible name of the strapping combination.
pub name: String,
/// List of GPIO pin configurations (the alias_of) field should not be used in these.
#[serde(default)]
pub pins: Vec<PinConfiguration>,
}
/// Parity configuration for UART communication.
#[derive(Deserialize, Clone, Debug)]
pub enum UartParity {
None,
Even,
Odd,
Mark,
Space,
}
/// Stop bits configuration for UART communication.
#[derive(Deserialize, Clone, Debug)]
pub enum UartStopBits {
Stop1,
Stop1_5,
Stop2,
}
/// Configuration of a particular UART port.
#[derive(Deserialize, Clone, Debug)]
pub struct UartConfiguration {
/// The user-visible name of the UART.
pub name: String,
/// Data communication rate in bits/second.
pub baudrate: Option<u32>,
/// Parity configuration for UART communication.
pub parity: Option<UartParity>,
/// Stop bits configuration for UART communication.
pub stopbits: Option<UartStopBits>,
/// Name of the UART as defined by the transport.
pub alias_of: Option<String>,
}
/// Configuration of a particular SPI controller port.
#[derive(Deserialize, Clone, Debug)]
pub struct SpiConfiguration {
/// The user-visible name of the SPI controller port.
pub name: String,
/// Name of the SPI controller as defined by the transport.
pub alias_of: Option<String>,
}
/// Representation of the complete and unresolved content of a single
/// confguration file.
#[derive(Deserialize, Clone, Debug)]
pub struct ConfigurationFile {
/// Optional specification of transport backend, for which this
/// configuration applies (to be implemented).
pub interface: Option<String>,
/// List of names of other configuration files to include recursively.
#[serde(default)]
pub includes: Vec<String>,
/// List of GPIO pin configurations.
#[serde(default)]
pub pins: Vec<PinConfiguration>,
/// List of named sets of additional GPIO pin configurations (pullup/pulldown).
#[serde(default)]
pub strappings: Vec<StrappingConfiguration>,
/// List of UART configurations.
#[serde(default)]
pub uarts: Vec<UartConfiguration>,
}
| 32.020202 | 89 | 0.700946 |
9153126fd61781cb1841d6b43ce50da959926d6c | 873 | use regex::Regex;
use crate::tipos::{Ambiente, Uf};
/// Valida chave de nota usando regra `^[0-9]{44}$` extraída de arquivo "tiposBasico_v4.00.xsd" da SEFAZ.
pub fn validar_chave(chave: &str) -> bool {
Regex::new("^[0-9]{44}$")
.map(|re| re.is_match(chave))
.unwrap_or_default()
}
/// Valida número de recibo usando regra `^[0-9]{15}$` extraída de arquivo "tiposBasico_v4.00.xsd" da SEFAZ.
pub fn validar_recibo(chave: &str) -> bool {
Regex::new("^[0-9]{15}$")
.map(|re| re.is_match(chave))
.unwrap_or_default()
}
/// Valida se UF é está presente na lista de UFs válidas.
#[inline]
pub fn validar_uf(uf: &str) -> bool {
Uf::from_str(uf).is_some()
}
/// Valida se Ambiente é está presente na lista de Ambientes válidos.
#[inline]
pub fn validar_ambiente(ambiente: &str) -> bool {
Ambiente::from_str(ambiente).is_some()
}
| 29.1 | 108 | 0.648339 |
1670eed0d2bd956c691c59ee09f64d42ba590a8d | 3,206 | use std::cell::RefCell;
use std::rc::Rc;
use types::*;
use event::EventType;
use instruction::Instruction;
pub type Block = Vec<Instruction>;
pub type Code = Vec<Block>;
pub type OrgRef = Rc<RefCell<Organism>>;
#[derive(Debug)]
pub struct Organism {
pub id: CellID,
pub step: Step,
pub x: Coord,
pub y: Coord,
pub nrg: Energy,
code: Code,
cb: CB,
ip: IP,
is_running: bool,
call: Vec<(CB, IP)>,
data: Vec<Number>,
}
impl Organism {
pub fn new(id: CellID, x: Coord, y: Coord, step: Step, nrg: Energy, code: Code) -> Organism {
Organism {
id: id,
step: step,
x: x,
y: y,
nrg: nrg,
code: code,
ip: 0,
cb: 0,
is_running: true,
call: Vec::new(),
data: Vec::new(),
}
}
pub fn step(&mut self) -> Option<EventType> {
let res;
if self.is_running() {
let instr = self.get_current_instr();
res = instr.exec(self);
if res.is_none() {
self.step += 1;
}
} else {
res = Some(EventType::Zombify);
}
// add code to check if this cell should die/zombify
// e.g. used too much energy
res
}
fn get_current_instr(&self) -> Instruction {
if self.ip < self.code[self.cb].len() {
self.code[self.cb][self.ip]
} else {
Instruction::Ret
}
}
fn is_running(&self) -> bool {
self.is_running
}
pub fn next_instr(&mut self) {
self.ip += 1;
}
pub fn call(&mut self, cb: Number) {
assert!(self.is_running());
if cb >= 0 && cb < self.code.len() as i64 {
let cb = cb as usize;
self.call.push((self.cb,self.ip));
self.ip = 0;
self.cb = cb;
}
}
// loop is a keyword
pub fn looop(&mut self) {
assert!(self.is_running());
self.ip = 0;
}
pub fn ret(&mut self) {
assert!(self.is_running());
if self.call.is_empty() {
self.is_running = false;
} else {
let (cb, ip) = self.call.pop().unwrap();
self.ip = ip;
self.cb = cb;
}
}
pub fn push(&mut self, n: Number) {
self.data.push(n);
}
pub fn push_all(&mut self, ns: Vec<Number>) {
self.data.push_all(&*ns);
}
pub fn pop(&mut self, n: Number) -> Option<Vec<Number>> {
assert!(n >= 0);
let n = n as usize;
if self.data.len() >= n {
let temp = self.data.clone();
let (base, split) = temp.split_at(self.data.len() - n);
self.data = base.to_vec();
Some(split.to_vec())
} else {
None
}
}
pub fn peek(&mut self, n: Number) -> Option<Vec<Number>> {
assert!(n >= 0);
let n = n as usize;
if self.data.len() >= n {
let temp = self.data.clone();
let (_, split) = temp.split_at(self.data.len() - n);
Some(split.to_vec())
} else {
None
}
}
}
| 23.573529 | 97 | 0.467873 |
5b796c1a3272cc632a34340fdb8c25eeb3dfe777 | 9,286 | use std::collections::HashMap;
use async_trait::async_trait;
use dapr::proto::{common::v1 as common_v1, runtime::v1 as dapr_v1};
use prost_types::Any;
use tonic::{transport::Channel as TonicChannel, Request};
use crate::dapr::*;
use crate::error::Error;
pub struct Client<T>(T);
impl<T: DaprInterface> Client<T> {
/// Connect to a Dapr enabled app.
///
/// # Arguments
///
/// * `addr` - Address of gRPC server to connect to.
pub async fn connect(addr: String) -> Result<Self, Error> {
Ok(Client(T::connect(addr).await?))
}
/// Invoke a method in a Dapr enabled app.
///
/// # Arguments
///
/// * `app_id` - Id of the application running.
/// * `method_name` - Name of the method to invoke.
/// * `data` - Required. Bytes value or data required to invoke service.
pub async fn invoke_service<I, M>(
&mut self,
app_id: I,
method_name: M,
data: Option<Any>,
) -> Result<InvokeServiceResponse, Error>
where
I: Into<String>,
M: Into<String>,
{
self.0
.invoke_service(InvokeServiceRequest {
id: app_id.into(),
message: common_v1::InvokeRequest {
method: method_name.into(),
data,
..Default::default()
}
.into(),
})
.await
}
/// Invoke an Dapr output binding.
///
/// # Arguments
///
/// * `name` - The name of the output binding to invoke.
/// * `data` - The data which will be sent to the output binding.
pub async fn invoke_binding<S>(
&mut self,
name: S,
data: Vec<u8>,
) -> Result<InvokeBindingResponse, Error>
where
S: Into<String>,
{
self.0
.invoke_binding(InvokeBindingRequest {
name: name.into(),
data,
..Default::default()
})
.await
}
/// Publish a payload to multiple consumers who are listening on a topic.
///
/// Dapr guarantees at least once semantics for this endpoint.
///
/// # Arguments
///
/// * `pubsub_name` - Name of the pubsub component
/// * `topic` - Pubsub topic.
/// * `data` - The data which will be published to topic.
pub async fn publish_event<S>(
&mut self,
pubsub_name: S,
topic: S,
data: Vec<u8>,
) -> Result<(), Error>
where
S: Into<String>,
{
self.0
.publish_event(PublishEventRequest {
pubsub_name: pubsub_name.into(),
topic: topic.into(),
data,
})
.await
}
/// Get the secret for a specific key.
///
/// # Arguments
///
/// * `store_name` - The name of secret store.
/// * `key` - The name of secret key.
pub async fn get_secret<S>(&mut self, store_name: S, key: S) -> Result<GetSecretResponse, Error>
where
S: Into<String>,
{
self.0
.get_secret(GetSecretRequest {
store_name: store_name.into(),
key: key.into(),
..Default::default()
})
.await
}
/// Get the state for a specific key.
///
/// # Arguments
///
/// * `store_name` - The name of state store.
/// * `key` - The key of the desired state.
pub async fn get_state<S>(
&mut self,
store_name: S,
key: S,
metadata: Option<HashMap<String, String>>,
) -> Result<GetStateResponse, Error>
where
S: Into<String>,
{
let mut mdata = HashMap::<String, String>::new();
if let Some(m) = metadata {
mdata = m;
}
self.0
.get_state(GetStateRequest {
store_name: store_name.into(),
key: key.into(),
metadata: mdata,
..Default::default()
})
.await
}
/// Save an array of state objects.
///
/// # Arguments
///
/// * `store_name` - The name of state store.
/// * `states` - The array of the state key values.
pub async fn save_state<I, K>(&mut self, store_name: K, states: I) -> Result<(), Error>
where
I: IntoIterator<Item = (K, Vec<u8>)>,
K: Into<String>,
{
self.0
.save_state(SaveStateRequest {
store_name: store_name.into(),
states: states.into_iter().map(|pair| pair.into()).collect(),
})
.await
}
/// Delete the state for a specific key.
///
/// # Arguments
///
/// * `store_name` - The name of state store.
/// * `key` - The key of the desired state.
pub async fn delete_state<S>(
&mut self,
store_name: S,
key: S,
metadata: Option<HashMap<String, String>>,
) -> Result<(), Error>
where
S: Into<String>,
{
let mut mdata = HashMap::<String, String>::new();
if let Some(m) = metadata {
mdata = m;
}
self.0
.delete_state(DeleteStateRequest {
store_name: store_name.into(),
key: key.into(),
metadata: mdata,
..Default::default()
})
.await
}
}
#[async_trait]
pub trait DaprInterface: Sized {
async fn connect(addr: String) -> Result<Self, Error>;
async fn publish_event(&mut self, request: PublishEventRequest) -> Result<(), Error>;
async fn invoke_service(
&mut self,
request: InvokeServiceRequest,
) -> Result<InvokeServiceResponse, Error>;
async fn invoke_binding(
&mut self,
request: InvokeBindingRequest,
) -> Result<InvokeBindingResponse, Error>;
async fn get_secret(&mut self, request: GetSecretRequest) -> Result<GetSecretResponse, Error>;
async fn get_state(&mut self, request: GetStateRequest) -> Result<GetStateResponse, Error>;
async fn save_state(&mut self, request: SaveStateRequest) -> Result<(), Error>;
async fn delete_state(&mut self, request: DeleteStateRequest) -> Result<(), Error>;
}
#[async_trait]
impl DaprInterface for dapr_v1::dapr_client::DaprClient<TonicChannel> {
async fn connect(addr: String) -> Result<Self, Error> {
Ok(dapr_v1::dapr_client::DaprClient::connect(addr).await?)
}
async fn invoke_service(
&mut self,
request: InvokeServiceRequest,
) -> Result<InvokeServiceResponse, Error> {
Ok(self
.invoke_service(Request::new(request))
.await?
.into_inner())
}
async fn invoke_binding(
&mut self,
request: InvokeBindingRequest,
) -> Result<InvokeBindingResponse, Error> {
Ok(self
.invoke_binding(Request::new(request))
.await?
.into_inner())
}
async fn publish_event(&mut self, request: PublishEventRequest) -> Result<(), Error> {
Ok(self
.publish_event(Request::new(request))
.await?
.into_inner())
}
async fn get_secret(&mut self, request: GetSecretRequest) -> Result<GetSecretResponse, Error> {
Ok(self.get_secret(Request::new(request)).await?.into_inner())
}
async fn get_state(&mut self, request: GetStateRequest) -> Result<GetStateResponse, Error> {
Ok(self.get_state(Request::new(request)).await?.into_inner())
}
async fn save_state(&mut self, request: SaveStateRequest) -> Result<(), Error> {
Ok(self.save_state(Request::new(request)).await?.into_inner())
}
async fn delete_state(&mut self, request: DeleteStateRequest) -> Result<(), Error> {
Ok(self.delete_state(Request::new(request)).await?.into_inner())
}
}
/// A request from invoking a service
pub type InvokeServiceRequest = dapr_v1::InvokeServiceRequest;
/// A response from invoking a service
pub type InvokeServiceResponse = common_v1::InvokeResponse;
/// A request from invoking a binding
pub type InvokeBindingRequest = dapr_v1::InvokeBindingRequest;
/// A reponse from invoking a binding
pub type InvokeBindingResponse = dapr_v1::InvokeBindingResponse;
/// A request for publishing event
pub type PublishEventRequest = dapr_v1::PublishEventRequest;
/// A request for getting state
pub type GetStateRequest = dapr_v1::GetStateRequest;
/// A response from getting state
pub type GetStateResponse = dapr_v1::GetStateResponse;
/// A request for saving state
pub type SaveStateRequest = dapr_v1::SaveStateRequest;
/// A request for deleting state
pub type DeleteStateRequest = dapr_v1::DeleteStateRequest;
/// A request for getting secret
pub type GetSecretRequest = dapr_v1::GetSecretRequest;
/// A response from getting secret
pub type GetSecretResponse = dapr_v1::GetSecretResponse;
/// A tonic based gRPC client
pub type TonicClient = dapr_v1::dapr_client::DaprClient<TonicChannel>;
impl<K> From<(K, Vec<u8>)> for common_v1::StateItem
where
K: Into<String>,
{
fn from((key, value): (K, Vec<u8>)) -> Self {
common_v1::StateItem {
key: key.into(),
value,
..Default::default()
}
}
}
| 29.201258 | 100 | 0.573229 |
119485c916377d261cdc028e3d1f599e966ab40b | 1,236 | #![no_std]
#![no_main]
use board::prelude::*;
use board_kontrolir as board;
use avr_hal_generic::hal::digital::v2::ToggleableOutputPin;
use board_kontrolir::keypad::{Key, Keypad};
use panic_halt as _;
#[board::entry]
fn main() -> ! {
let dp = board::Peripherals::take().unwrap();
let mut pins = board::Pins::new(dp.PORTB, dp.PORTC, dp.PORTD, dp.PORTE);
let mut serial = board::Serial::new(
dp.USART0,
pins.rxd,
pins.txd.into_output(&mut pins.ddr),
57600,
);
let mut led = pins.led.into_output(&mut pins.ddr);
let mut kbd = Keypad::new(
pins.row1,
pins.row2,
pins.row3,
pins.row4,
pins.row5,
pins.row6,
pins.row7,
pins.col1,
pins.col2,
pins.col3,
pins.col4,
pins.col5,
pins.col6,
pins.col7,
&mut pins.ddr,
);
ufmt::uwriteln!(&mut serial, "Hello from Arduino!\r").void_unwrap();
loop {
board::delay_ms(100);
if let Some(code) = kbd.scancode() {
ufmt::uwriteln!(&mut serial, "Scancode {:?} ({}) \r", Key::from(code), code)
.void_unwrap();
led.toggle().void_unwrap();
}
}
}
| 22.071429 | 88 | 0.540453 |
f85401da55960bf3e6827ddcd039258d94ad1f1c | 544 | #[macro_use]
extern crate criterion;
extern crate tract_linalg;
use criterion::Criterion;
fn ssigmoid(c: &mut Criterion, n: usize) {
c.bench_function(&format!("ssigmoid_{}", n), move |be| {
let mut s = (0..n).map(|i| i as f32 / 10.0).collect::<Vec<f32>>();
let ref op = (tract_linalg::ops().ssigmoid)();
be.iter(|| op.run(&mut s));
});
}
fn bs(c: &mut Criterion) {
ssigmoid(c, 4);
ssigmoid(c, 8);
ssigmoid(c, 128);
ssigmoid(c, 1024);
}
criterion_group!(benches, bs);
criterion_main!(benches);
| 23.652174 | 74 | 0.601103 |
5607d03bf90fe6d907d51cb5d2e31bfe8b6fec4a | 54,336 | // Copyright (C) 2015-2018 Swift Navigation Inc.
// Contact: https://support.swiftnav.com
//
// This source is subject to the license found in the file 'LICENSE' which must
// be be distributed together with this source. All other rights reserved.
//
// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
// EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
//****************************************************************************
// Automatically generated from yaml/swiftnav/sbp/piksi.yaml
// with generate.py. Please do not hand edit!
//****************************************************************************/
//! System health, configuration, and diagnostic messages specific to
//! the Piksi L1 receiver, including a variety of legacy messages that
//! may no longer be used.
//!
extern crate byteorder;
#[allow(unused_imports)]
use self::byteorder::{LittleEndian, ReadBytesExt};
#[cfg(feature = "sbp_serde")]
use serde::{Deserialize, Serialize};
use super::gnss::*;
#[allow(unused_imports)]
use crate::SbpString;
/// Receiver-to-base station latency
///
/// Statistics on the latency of observations received from the base
/// station. As observation packets are received their GPS time is
/// compared to the current GPS time calculated locally by the
/// receiver to give a precise measurement of the end-to-end
/// communication latency in the system.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct Latency {
/// Average latency
pub avg: i32,
/// Minimum latency
pub lmin: i32,
/// Maximum latency
pub lmax: i32,
/// Smoothed estimate of the current latency
pub current: i32,
}
impl Latency {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<Latency, crate::Error> {
Ok( Latency{
avg: _buf.read_i32::<LittleEndian>()?,
lmin: _buf.read_i32::<LittleEndian>()?,
lmax: _buf.read_i32::<LittleEndian>()?,
current: _buf.read_i32::<LittleEndian>()?,
} )
}
pub fn parse_array(buf: &mut &[u8]) -> Result<Vec<Latency>, crate::Error> {
let mut v = Vec::new();
while buf.len() > 0 {
v.push(Latency::parse(buf)?);
}
Ok(v)
}
pub fn parse_array_limit(buf: &mut &[u8], n: usize) -> Result<Vec<Latency>, crate::Error> {
let mut v = Vec::new();
for _ in 0..n {
v.push(Latency::parse(buf)?);
}
Ok(v)
}
}
impl crate::serialize::SbpSerialize for Latency {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.avg.append_to_sbp_buffer(buf);
self.lmin.append_to_sbp_buffer(buf);
self.lmax.append_to_sbp_buffer(buf);
self.current.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.avg.sbp_size();
size += self.lmin.sbp_size();
size += self.lmax.sbp_size();
size += self.current.sbp_size();
size
}
}
/// Legacy message to load satellite almanac (host => Piksi)
///
/// This is a legacy message for sending and loading a satellite
/// alamanac onto the Piksi's flash memory from the host.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgAlmanac {
pub sender_id: Option<u16>,
}
impl MsgAlmanac {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgAlmanac, crate::Error> {
Ok( MsgAlmanac{
sender_id: None,
} )
}
}
impl super::SBPMessage for MsgAlmanac {
fn get_message_type(&self) -> u16 {
105
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgAlmanac {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {}
fn sbp_size(&self) -> usize {
0
}
}
/// Cell modem information update message
///
/// If a cell modem is present on a piksi device, this message
/// will be send periodically to update the host on the status
/// of the modem and its various parameters.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgCellModemStatus {
pub sender_id: Option<u16>,
/// Received cell signal strength in dBm, zero translates to unknown
pub signal_strength: i8,
/// BER as reported by the modem, zero translates to unknown
pub signal_error_rate: f32,
/// Unspecified data TBD for this schema
pub reserved: Vec<u8>,
}
impl MsgCellModemStatus {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgCellModemStatus, crate::Error> {
Ok( MsgCellModemStatus{
sender_id: None,
signal_strength: _buf.read_i8()?,
signal_error_rate: _buf.read_f32::<LittleEndian>()?,
reserved: crate::parser::read_u8_array(_buf)?,
} )
}
}
impl super::SBPMessage for MsgCellModemStatus {
fn get_message_type(&self) -> u16 {
190
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgCellModemStatus {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.signal_strength.append_to_sbp_buffer(buf);
self.signal_error_rate.append_to_sbp_buffer(buf);
self.reserved.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.signal_strength.sbp_size();
size += self.signal_error_rate.sbp_size();
size += self.reserved.sbp_size();
size
}
}
/// Command output
///
/// Returns the standard output and standard error of the
/// command requested by MSG_COMMAND_REQ.
/// The sequence number can be used to filter for filtering
/// the correct command.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgCommandOutput {
pub sender_id: Option<u16>,
/// Sequence number
pub sequence: u32,
/// Line of standard output or standard error
pub line: SbpString,
}
impl MsgCommandOutput {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgCommandOutput, crate::Error> {
Ok( MsgCommandOutput{
sender_id: None,
sequence: _buf.read_u32::<LittleEndian>()?,
line: crate::parser::read_string(_buf)?,
} )
}
}
impl super::SBPMessage for MsgCommandOutput {
fn get_message_type(&self) -> u16 {
188
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgCommandOutput {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.sequence.append_to_sbp_buffer(buf);
self.line.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.sequence.sbp_size();
size += self.line.sbp_size();
size
}
}
/// Execute a command (host => device)
///
/// Request the recipient to execute an command.
/// Output will be sent in MSG_LOG messages, and the exit
/// code will be returned with MSG_COMMAND_RESP.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgCommandReq {
pub sender_id: Option<u16>,
/// Sequence number
pub sequence: u32,
/// Command line to execute
pub command: SbpString,
}
impl MsgCommandReq {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgCommandReq, crate::Error> {
Ok( MsgCommandReq{
sender_id: None,
sequence: _buf.read_u32::<LittleEndian>()?,
command: crate::parser::read_string(_buf)?,
} )
}
}
impl super::SBPMessage for MsgCommandReq {
fn get_message_type(&self) -> u16 {
184
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgCommandReq {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.sequence.append_to_sbp_buffer(buf);
self.command.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.sequence.sbp_size();
size += self.command.sbp_size();
size
}
}
/// Exit code from executed command (device => host)
///
/// The response to MSG_COMMAND_REQ with the return code of
/// the command. A return code of zero indicates success.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgCommandResp {
pub sender_id: Option<u16>,
/// Sequence number
pub sequence: u32,
/// Exit code
pub code: i32,
}
impl MsgCommandResp {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgCommandResp, crate::Error> {
Ok( MsgCommandResp{
sender_id: None,
sequence: _buf.read_u32::<LittleEndian>()?,
code: _buf.read_i32::<LittleEndian>()?,
} )
}
}
impl super::SBPMessage for MsgCommandResp {
fn get_message_type(&self) -> u16 {
185
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgCommandResp {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.sequence.append_to_sbp_buffer(buf);
self.code.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.sequence.sbp_size();
size += self.code.sbp_size();
size
}
}
/// Legacy message for CW interference channel (Piksi => host)
///
/// This is an unused legacy message for result reporting from the
/// CW interference channel on the SwiftNAP. This message will be
/// removed in a future release.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgCwResults {
pub sender_id: Option<u16>,
}
impl MsgCwResults {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgCwResults, crate::Error> {
Ok( MsgCwResults{
sender_id: None,
} )
}
}
impl super::SBPMessage for MsgCwResults {
fn get_message_type(&self) -> u16 {
192
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgCwResults {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {}
fn sbp_size(&self) -> usize {
0
}
}
/// Legacy message for CW interference channel (host => Piksi)
///
/// This is an unused legacy message from the host for starting
/// the CW interference channel on the SwiftNAP. This message will
/// be removed in a future release.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgCwStart {
pub sender_id: Option<u16>,
}
impl MsgCwStart {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgCwStart, crate::Error> {
Ok( MsgCwStart{
sender_id: None,
} )
}
}
impl super::SBPMessage for MsgCwStart {
fn get_message_type(&self) -> u16 {
193
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgCwStart {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {}
fn sbp_size(&self) -> usize {
0
}
}
/// Device temperature and voltage levels
///
/// This message contains temperature and voltage level measurements from the
/// processor's monitoring system and the RF frontend die temperature if
/// available.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgDeviceMonitor {
pub sender_id: Option<u16>,
/// Device V_in
pub dev_vin: i16,
/// Processor V_int
pub cpu_vint: i16,
/// Processor V_aux
pub cpu_vaux: i16,
/// Processor temperature
pub cpu_temperature: i16,
/// Frontend temperature (if available)
pub fe_temperature: i16,
}
impl MsgDeviceMonitor {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgDeviceMonitor, crate::Error> {
Ok( MsgDeviceMonitor{
sender_id: None,
dev_vin: _buf.read_i16::<LittleEndian>()?,
cpu_vint: _buf.read_i16::<LittleEndian>()?,
cpu_vaux: _buf.read_i16::<LittleEndian>()?,
cpu_temperature: _buf.read_i16::<LittleEndian>()?,
fe_temperature: _buf.read_i16::<LittleEndian>()?,
} )
}
}
impl super::SBPMessage for MsgDeviceMonitor {
fn get_message_type(&self) -> u16 {
181
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgDeviceMonitor {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.dev_vin.append_to_sbp_buffer(buf);
self.cpu_vint.append_to_sbp_buffer(buf);
self.cpu_vaux.append_to_sbp_buffer(buf);
self.cpu_temperature.append_to_sbp_buffer(buf);
self.fe_temperature.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.dev_vin.sbp_size();
size += self.cpu_vint.sbp_size();
size += self.cpu_vaux.sbp_size();
size += self.cpu_temperature.sbp_size();
size += self.fe_temperature.sbp_size();
size
}
}
/// RF AGC status
///
/// This message describes the gain of each channel in the receiver frontend. Each
/// gain is encoded as a non-dimensional percentage relative to the maximum range
/// possible for the gain stage of the frontend. By convention, each gain array
/// has 8 entries and the index of the array corresponding to the index of the rf channel
/// in the frontend. A gain of 127 percent encodes that rf channel is not present in the hardware.
/// A negative value implies an error for the particular gain stage as reported by the frontend.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgFrontEndGain {
pub sender_id: Option<u16>,
/// RF gain for each frontend channel
pub rf_gain: Vec<i8>,
/// Intermediate frequency gain for each frontend channel
pub if_gain: Vec<i8>,
}
impl MsgFrontEndGain {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgFrontEndGain, crate::Error> {
Ok( MsgFrontEndGain{
sender_id: None,
rf_gain: crate::parser::read_s8_array_limit(_buf, 8)?,
if_gain: crate::parser::read_s8_array_limit(_buf, 8)?,
} )
}
}
impl super::SBPMessage for MsgFrontEndGain {
fn get_message_type(&self) -> u16 {
191
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgFrontEndGain {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.rf_gain.append_to_sbp_buffer(buf);
self.if_gain.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.rf_gain.sbp_size();
size += self.if_gain.sbp_size();
size
}
}
/// State of the Integer Ambiguity Resolution (IAR) process
///
/// This message reports the state of the Integer Ambiguity
/// Resolution (IAR) process, which resolves unknown integer
/// ambiguities from double-differenced carrier-phase measurements
/// from satellite observations.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgIarState {
pub sender_id: Option<u16>,
/// Number of integer ambiguity hypotheses remaining
pub num_hyps: u32,
}
impl MsgIarState {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgIarState, crate::Error> {
Ok( MsgIarState{
sender_id: None,
num_hyps: _buf.read_u32::<LittleEndian>()?,
} )
}
}
impl super::SBPMessage for MsgIarState {
fn get_message_type(&self) -> u16 {
25
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgIarState {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.num_hyps.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.num_hyps.sbp_size();
size
}
}
/// Deprecated
///
/// Deprecated
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgInitBaseDep {
pub sender_id: Option<u16>,
}
impl MsgInitBaseDep {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgInitBaseDep, crate::Error> {
Ok( MsgInitBaseDep{
sender_id: None,
} )
}
}
impl super::SBPMessage for MsgInitBaseDep {
fn get_message_type(&self) -> u16 {
35
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgInitBaseDep {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {}
fn sbp_size(&self) -> usize {
0
}
}
/// Mask a satellite from use in Piksi subsystems
///
/// This message allows setting a mask to prevent a particular satellite
/// from being used in various Piksi subsystems.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgMaskSatellite {
pub sender_id: Option<u16>,
/// Mask of systems that should ignore this satellite.
pub mask: u8,
/// GNSS signal for which the mask is applied
pub sid: GnssSignal,
}
impl MsgMaskSatellite {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgMaskSatellite, crate::Error> {
Ok( MsgMaskSatellite{
sender_id: None,
mask: _buf.read_u8()?,
sid: GnssSignal::parse(_buf)?,
} )
}
}
impl super::SBPMessage for MsgMaskSatellite {
fn get_message_type(&self) -> u16 {
43
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgMaskSatellite {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.mask.append_to_sbp_buffer(buf);
self.sid.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.mask.sbp_size();
size += self.sid.sbp_size();
size
}
}
/// Deprecated
///
/// Deprecated.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgMaskSatelliteDep {
pub sender_id: Option<u16>,
/// Mask of systems that should ignore this satellite.
pub mask: u8,
/// GNSS signal for which the mask is applied
pub sid: GnssSignalDep,
}
impl MsgMaskSatelliteDep {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgMaskSatelliteDep, crate::Error> {
Ok( MsgMaskSatelliteDep{
sender_id: None,
mask: _buf.read_u8()?,
sid: GnssSignalDep::parse(_buf)?,
} )
}
}
impl super::SBPMessage for MsgMaskSatelliteDep {
fn get_message_type(&self) -> u16 {
27
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgMaskSatelliteDep {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.mask.append_to_sbp_buffer(buf);
self.sid.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.mask.sbp_size();
size += self.sid.sbp_size();
size
}
}
/// Bandwidth usage reporting message
///
/// The bandwidth usage, a list of usage by interface.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgNetworkBandwidthUsage {
pub sender_id: Option<u16>,
/// Usage measurement array
pub interfaces: Vec<NetworkUsage>,
}
impl MsgNetworkBandwidthUsage {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgNetworkBandwidthUsage, crate::Error> {
Ok( MsgNetworkBandwidthUsage{
sender_id: None,
interfaces: NetworkUsage::parse_array(_buf)?,
} )
}
}
impl super::SBPMessage for MsgNetworkBandwidthUsage {
fn get_message_type(&self) -> u16 {
189
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgNetworkBandwidthUsage {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.interfaces.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.interfaces.sbp_size();
size
}
}
/// Request state of Piksi network interfaces
///
/// Request state of Piksi network interfaces.
/// Output will be sent in MSG_NETWORK_STATE_RESP messages
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgNetworkStateReq {
pub sender_id: Option<u16>,
}
impl MsgNetworkStateReq {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgNetworkStateReq, crate::Error> {
Ok( MsgNetworkStateReq{
sender_id: None,
} )
}
}
impl super::SBPMessage for MsgNetworkStateReq {
fn get_message_type(&self) -> u16 {
186
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgNetworkStateReq {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {}
fn sbp_size(&self) -> usize {
0
}
}
/// State of network interface
///
/// The state of a network interface on the Piksi.
/// Data is made to reflect output of ifaddrs struct returned by getifaddrs
/// in c.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgNetworkStateResp {
pub sender_id: Option<u16>,
/// IPv4 address (all zero when unavailable)
pub ipv4_address: Vec<u8>,
/// IPv4 netmask CIDR notation
pub ipv4_mask_size: u8,
/// IPv6 address (all zero when unavailable)
pub ipv6_address: Vec<u8>,
/// IPv6 netmask CIDR notation
pub ipv6_mask_size: u8,
/// Number of Rx bytes
pub rx_bytes: u32,
/// Number of Tx bytes
pub tx_bytes: u32,
/// Interface Name
pub interface_name: SbpString,
/// Interface flags from SIOCGIFFLAGS
pub flags: u32,
}
impl MsgNetworkStateResp {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgNetworkStateResp, crate::Error> {
Ok( MsgNetworkStateResp{
sender_id: None,
ipv4_address: crate::parser::read_u8_array_limit(_buf, 4)?,
ipv4_mask_size: _buf.read_u8()?,
ipv6_address: crate::parser::read_u8_array_limit(_buf, 16)?,
ipv6_mask_size: _buf.read_u8()?,
rx_bytes: _buf.read_u32::<LittleEndian>()?,
tx_bytes: _buf.read_u32::<LittleEndian>()?,
interface_name: crate::parser::read_string_limit(_buf, 16)?,
flags: _buf.read_u32::<LittleEndian>()?,
} )
}
}
impl super::SBPMessage for MsgNetworkStateResp {
fn get_message_type(&self) -> u16 {
187
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgNetworkStateResp {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.ipv4_address.append_to_sbp_buffer(buf);
self.ipv4_mask_size.append_to_sbp_buffer(buf);
self.ipv6_address.append_to_sbp_buffer(buf);
self.ipv6_mask_size.append_to_sbp_buffer(buf);
self.rx_bytes.append_to_sbp_buffer(buf);
self.tx_bytes.append_to_sbp_buffer(buf);
self.interface_name.append_to_sbp_buffer(buf);
self.flags.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.ipv4_address.sbp_size();
size += self.ipv4_mask_size.sbp_size();
size += self.ipv6_address.sbp_size();
size += self.ipv6_mask_size.sbp_size();
size += self.rx_bytes.sbp_size();
size += self.tx_bytes.sbp_size();
size += self.interface_name.sbp_size();
size += self.flags.sbp_size();
size
}
}
/// Reset the device (host => Piksi)
///
/// This message from the host resets the Piksi back into the
/// bootloader.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgReset {
pub sender_id: Option<u16>,
/// Reset flags
pub flags: u32,
}
impl MsgReset {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgReset, crate::Error> {
Ok( MsgReset{
sender_id: None,
flags: _buf.read_u32::<LittleEndian>()?,
} )
}
}
impl super::SBPMessage for MsgReset {
fn get_message_type(&self) -> u16 {
182
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgReset {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.flags.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.flags.sbp_size();
size
}
}
/// Reset the device (host => Piksi)
///
/// This message from the host resets the Piksi back into the
/// bootloader.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgResetDep {
pub sender_id: Option<u16>,
}
impl MsgResetDep {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgResetDep, crate::Error> {
Ok( MsgResetDep{
sender_id: None,
} )
}
}
impl super::SBPMessage for MsgResetDep {
fn get_message_type(&self) -> u16 {
178
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgResetDep {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {}
fn sbp_size(&self) -> usize {
0
}
}
/// Reset IAR filters (host => Piksi)
///
/// This message resets either the DGNSS Kalman filters or Integer
/// Ambiguity Resolution (IAR) process.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgResetFilters {
pub sender_id: Option<u16>,
/// Filter flags
pub filter: u8,
}
impl MsgResetFilters {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgResetFilters, crate::Error> {
Ok( MsgResetFilters{
sender_id: None,
filter: _buf.read_u8()?,
} )
}
}
impl super::SBPMessage for MsgResetFilters {
fn get_message_type(&self) -> u16 {
34
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgResetFilters {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.filter.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.filter.sbp_size();
size
}
}
/// Send GPS time from host (host => Piksi)
///
/// This message sets up timing functionality using a coarse GPS
/// time estimate sent by the host.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgSetTime {
pub sender_id: Option<u16>,
}
impl MsgSetTime {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgSetTime, crate::Error> {
Ok( MsgSetTime{
sender_id: None,
} )
}
}
impl super::SBPMessage for MsgSetTime {
fn get_message_type(&self) -> u16 {
104
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgSetTime {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {}
fn sbp_size(&self) -> usize {
0
}
}
/// Spectrum analyzer
///
/// Spectrum analyzer packet.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgSpecan {
pub sender_id: Option<u16>,
/// Channel ID
pub channel_tag: u16,
/// Receiver time of this observation
pub t: GPSTime,
/// Reference frequency of this packet
pub freq_ref: f32,
/// Frequency step of points in this packet
pub freq_step: f32,
/// Reference amplitude of this packet
pub amplitude_ref: f32,
/// Amplitude unit value of points in this packet
pub amplitude_unit: f32,
/// Amplitude values (in the above units) of points in this packet
pub amplitude_value: Vec<u8>,
}
impl MsgSpecan {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgSpecan, crate::Error> {
Ok( MsgSpecan{
sender_id: None,
channel_tag: _buf.read_u16::<LittleEndian>()?,
t: GPSTime::parse(_buf)?,
freq_ref: _buf.read_f32::<LittleEndian>()?,
freq_step: _buf.read_f32::<LittleEndian>()?,
amplitude_ref: _buf.read_f32::<LittleEndian>()?,
amplitude_unit: _buf.read_f32::<LittleEndian>()?,
amplitude_value: crate::parser::read_u8_array(_buf)?,
} )
}
}
impl super::SBPMessage for MsgSpecan {
fn get_message_type(&self) -> u16 {
81
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgSpecan {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.channel_tag.append_to_sbp_buffer(buf);
self.t.append_to_sbp_buffer(buf);
self.freq_ref.append_to_sbp_buffer(buf);
self.freq_step.append_to_sbp_buffer(buf);
self.amplitude_ref.append_to_sbp_buffer(buf);
self.amplitude_unit.append_to_sbp_buffer(buf);
self.amplitude_value.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.channel_tag.sbp_size();
size += self.t.sbp_size();
size += self.freq_ref.sbp_size();
size += self.freq_step.sbp_size();
size += self.amplitude_ref.sbp_size();
size += self.amplitude_unit.sbp_size();
size += self.amplitude_value.sbp_size();
size
}
}
/// Deprecated
///
/// Deprecated.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgSpecanDep {
pub sender_id: Option<u16>,
/// Channel ID
pub channel_tag: u16,
/// Receiver time of this observation
pub t: GPSTimeDep,
/// Reference frequency of this packet
pub freq_ref: f32,
/// Frequency step of points in this packet
pub freq_step: f32,
/// Reference amplitude of this packet
pub amplitude_ref: f32,
/// Amplitude unit value of points in this packet
pub amplitude_unit: f32,
/// Amplitude values (in the above units) of points in this packet
pub amplitude_value: Vec<u8>,
}
impl MsgSpecanDep {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgSpecanDep, crate::Error> {
Ok( MsgSpecanDep{
sender_id: None,
channel_tag: _buf.read_u16::<LittleEndian>()?,
t: GPSTimeDep::parse(_buf)?,
freq_ref: _buf.read_f32::<LittleEndian>()?,
freq_step: _buf.read_f32::<LittleEndian>()?,
amplitude_ref: _buf.read_f32::<LittleEndian>()?,
amplitude_unit: _buf.read_f32::<LittleEndian>()?,
amplitude_value: crate::parser::read_u8_array(_buf)?,
} )
}
}
impl super::SBPMessage for MsgSpecanDep {
fn get_message_type(&self) -> u16 {
80
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgSpecanDep {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.channel_tag.append_to_sbp_buffer(buf);
self.t.append_to_sbp_buffer(buf);
self.freq_ref.append_to_sbp_buffer(buf);
self.freq_step.append_to_sbp_buffer(buf);
self.amplitude_ref.append_to_sbp_buffer(buf);
self.amplitude_unit.append_to_sbp_buffer(buf);
self.amplitude_value.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.channel_tag.sbp_size();
size += self.t.sbp_size();
size += self.freq_ref.sbp_size();
size += self.freq_step.sbp_size();
size += self.amplitude_ref.sbp_size();
size += self.amplitude_unit.sbp_size();
size += self.amplitude_value.sbp_size();
size
}
}
/// State of an RTOS thread
///
/// The thread usage message from the device reports real-time
/// operating system (RTOS) thread usage statistics for the named
/// thread. The reported percentage values must be normalized.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgThreadState {
pub sender_id: Option<u16>,
/// Thread name (NULL terminated)
pub name: SbpString,
/// Percentage cpu use for this thread. Values range from 0 - 1000 and needs
/// to be renormalized to 100
pub cpu: u16,
/// Free stack space for this thread
pub stack_free: u32,
}
impl MsgThreadState {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgThreadState, crate::Error> {
Ok( MsgThreadState{
sender_id: None,
name: crate::parser::read_string_limit(_buf, 20)?,
cpu: _buf.read_u16::<LittleEndian>()?,
stack_free: _buf.read_u32::<LittleEndian>()?,
} )
}
}
impl super::SBPMessage for MsgThreadState {
fn get_message_type(&self) -> u16 {
23
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgThreadState {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.name.append_to_sbp_buffer(buf);
self.cpu.append_to_sbp_buffer(buf);
self.stack_free.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.name.sbp_size();
size += self.cpu.sbp_size();
size += self.stack_free.sbp_size();
size
}
}
/// State of the UART channels
///
/// The UART message reports data latency and throughput of the UART
/// channels providing SBP I/O. On the default Piksi configuration,
/// UARTs A and B are used for telemetry radios, but can also be
/// host access ports for embedded hosts, or other interfaces in
/// future. The reported percentage values must be normalized.
/// Observations latency and period can be used to assess the
/// health of the differential corrections link. Latency provides
/// the timeliness of received base observations while the
/// period indicates their likelihood of transmission.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgUartState {
pub sender_id: Option<u16>,
/// State of UART A
pub uart_a: UARTChannel,
/// State of UART B
pub uart_b: UARTChannel,
/// State of UART FTDI (USB logger)
pub uart_ftdi: UARTChannel,
/// UART communication latency
pub latency: Latency,
/// Observation receipt period
pub obs_period: Period,
}
impl MsgUartState {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgUartState, crate::Error> {
Ok( MsgUartState{
sender_id: None,
uart_a: UARTChannel::parse(_buf)?,
uart_b: UARTChannel::parse(_buf)?,
uart_ftdi: UARTChannel::parse(_buf)?,
latency: Latency::parse(_buf)?,
obs_period: Period::parse(_buf)?,
} )
}
}
impl super::SBPMessage for MsgUartState {
fn get_message_type(&self) -> u16 {
29
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgUartState {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.uart_a.append_to_sbp_buffer(buf);
self.uart_b.append_to_sbp_buffer(buf);
self.uart_ftdi.append_to_sbp_buffer(buf);
self.latency.append_to_sbp_buffer(buf);
self.obs_period.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.uart_a.sbp_size();
size += self.uart_b.sbp_size();
size += self.uart_ftdi.sbp_size();
size += self.latency.sbp_size();
size += self.obs_period.sbp_size();
size
}
}
/// Deprecated
///
/// Deprecated
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgUartStateDepa {
pub sender_id: Option<u16>,
/// State of UART A
pub uart_a: UARTChannel,
/// State of UART B
pub uart_b: UARTChannel,
/// State of UART FTDI (USB logger)
pub uart_ftdi: UARTChannel,
/// UART communication latency
pub latency: Latency,
}
impl MsgUartStateDepa {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgUartStateDepa, crate::Error> {
Ok( MsgUartStateDepa{
sender_id: None,
uart_a: UARTChannel::parse(_buf)?,
uart_b: UARTChannel::parse(_buf)?,
uart_ftdi: UARTChannel::parse(_buf)?,
latency: Latency::parse(_buf)?,
} )
}
}
impl super::SBPMessage for MsgUartStateDepa {
fn get_message_type(&self) -> u16 {
24
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::framer::FramerError> {
let trait_object = self as &dyn super::SBPMessage;
crate::framer::to_frame(trait_object)
}
}
impl crate::serialize::SbpSerialize for MsgUartStateDepa {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.uart_a.append_to_sbp_buffer(buf);
self.uart_b.append_to_sbp_buffer(buf);
self.uart_ftdi.append_to_sbp_buffer(buf);
self.latency.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.uart_a.sbp_size();
size += self.uart_b.sbp_size();
size += self.uart_ftdi.sbp_size();
size += self.latency.sbp_size();
size
}
}
/// Bandwidth usage measurement for a single interface.
///
/// The bandwidth usage for each interface can be reported
/// within this struct and utilize multiple fields to fully
/// specify the type of traffic that is being tracked. As
/// either the interval of collection or the collection time
/// may vary, both a timestamp and period field is provided,
/// though may not necessarily be populated with a value.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct NetworkUsage {
/// Duration over which the measurement was collected
pub duration: u64,
/// Number of bytes handled in total within period
pub total_bytes: u64,
/// Number of bytes transmitted within period
pub rx_bytes: u32,
/// Number of bytes received within period
pub tx_bytes: u32,
/// Interface Name
pub interface_name: SbpString,
}
impl NetworkUsage {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<NetworkUsage, crate::Error> {
Ok( NetworkUsage{
duration: _buf.read_u64::<LittleEndian>()?,
total_bytes: _buf.read_u64::<LittleEndian>()?,
rx_bytes: _buf.read_u32::<LittleEndian>()?,
tx_bytes: _buf.read_u32::<LittleEndian>()?,
interface_name: crate::parser::read_string_limit(_buf, 16)?,
} )
}
pub fn parse_array(buf: &mut &[u8]) -> Result<Vec<NetworkUsage>, crate::Error> {
let mut v = Vec::new();
while buf.len() > 0 {
v.push(NetworkUsage::parse(buf)?);
}
Ok(v)
}
pub fn parse_array_limit(buf: &mut &[u8], n: usize) -> Result<Vec<NetworkUsage>, crate::Error> {
let mut v = Vec::new();
for _ in 0..n {
v.push(NetworkUsage::parse(buf)?);
}
Ok(v)
}
}
impl crate::serialize::SbpSerialize for NetworkUsage {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.duration.append_to_sbp_buffer(buf);
self.total_bytes.append_to_sbp_buffer(buf);
self.rx_bytes.append_to_sbp_buffer(buf);
self.tx_bytes.append_to_sbp_buffer(buf);
self.interface_name.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.duration.sbp_size();
size += self.total_bytes.sbp_size();
size += self.rx_bytes.sbp_size();
size += self.tx_bytes.sbp_size();
size += self.interface_name.sbp_size();
size
}
}
/// base station observation message receipt period
///
/// Statistics on the period of observations received from the base
/// station. As complete observation sets are received, their time
/// of reception is compared with the prior set''s time of reception.
/// This measurement provides a proxy for link quality as incomplete
/// or missing sets will increase the period. Long periods
/// can cause momentary RTK solution outages.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct Period {
/// Average period
pub avg: i32,
/// Minimum period
pub pmin: i32,
/// Maximum period
pub pmax: i32,
/// Smoothed estimate of the current period
pub current: i32,
}
impl Period {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<Period, crate::Error> {
Ok( Period{
avg: _buf.read_i32::<LittleEndian>()?,
pmin: _buf.read_i32::<LittleEndian>()?,
pmax: _buf.read_i32::<LittleEndian>()?,
current: _buf.read_i32::<LittleEndian>()?,
} )
}
pub fn parse_array(buf: &mut &[u8]) -> Result<Vec<Period>, crate::Error> {
let mut v = Vec::new();
while buf.len() > 0 {
v.push(Period::parse(buf)?);
}
Ok(v)
}
pub fn parse_array_limit(buf: &mut &[u8], n: usize) -> Result<Vec<Period>, crate::Error> {
let mut v = Vec::new();
for _ in 0..n {
v.push(Period::parse(buf)?);
}
Ok(v)
}
}
impl crate::serialize::SbpSerialize for Period {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.avg.append_to_sbp_buffer(buf);
self.pmin.append_to_sbp_buffer(buf);
self.pmax.append_to_sbp_buffer(buf);
self.current.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.avg.sbp_size();
size += self.pmin.sbp_size();
size += self.pmax.sbp_size();
size += self.current.sbp_size();
size
}
}
/// State of the UART channel
///
/// Throughput, utilization, and error counts on the RX/TX buffers
/// of this UART channel. The reported percentage values must
/// be normalized.
///
#[cfg_attr(feature = "sbp_serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct UARTChannel {
/// UART transmit throughput
pub tx_throughput: f32,
/// UART receive throughput
pub rx_throughput: f32,
/// UART CRC error count
pub crc_error_count: u16,
/// UART IO error count
pub io_error_count: u16,
/// UART transmit buffer percentage utilization (ranges from 0 to 255)
pub tx_buffer_level: u8,
/// UART receive buffer percentage utilization (ranges from 0 to 255)
pub rx_buffer_level: u8,
}
impl UARTChannel {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<UARTChannel, crate::Error> {
Ok( UARTChannel{
tx_throughput: _buf.read_f32::<LittleEndian>()?,
rx_throughput: _buf.read_f32::<LittleEndian>()?,
crc_error_count: _buf.read_u16::<LittleEndian>()?,
io_error_count: _buf.read_u16::<LittleEndian>()?,
tx_buffer_level: _buf.read_u8()?,
rx_buffer_level: _buf.read_u8()?,
} )
}
pub fn parse_array(buf: &mut &[u8]) -> Result<Vec<UARTChannel>, crate::Error> {
let mut v = Vec::new();
while buf.len() > 0 {
v.push(UARTChannel::parse(buf)?);
}
Ok(v)
}
pub fn parse_array_limit(buf: &mut &[u8], n: usize) -> Result<Vec<UARTChannel>, crate::Error> {
let mut v = Vec::new();
for _ in 0..n {
v.push(UARTChannel::parse(buf)?);
}
Ok(v)
}
}
impl crate::serialize::SbpSerialize for UARTChannel {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.tx_throughput.append_to_sbp_buffer(buf);
self.rx_throughput.append_to_sbp_buffer(buf);
self.crc_error_count.append_to_sbp_buffer(buf);
self.io_error_count.append_to_sbp_buffer(buf);
self.tx_buffer_level.append_to_sbp_buffer(buf);
self.rx_buffer_level.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.tx_throughput.sbp_size();
size += self.rx_throughput.sbp_size();
size += self.crc_error_count.sbp_size();
size += self.io_error_count.sbp_size();
size += self.tx_buffer_level.sbp_size();
size += self.rx_buffer_level.sbp_size();
size
}
}
| 29.228618 | 100 | 0.628718 |
71b1c421f93395055879baa7615fe264b7e3be0c | 11,543 | use kube::client::APIClient;
use kubelet::{pod::Pod, Phase, Provider, Status};
use log::{debug, info};
use wascc_host::{host, Actor, NativeCapability};
use std::collections::HashMap;
const ACTOR_PUBLIC_KEY: &str = "deislabs.io/wascc-action-key";
const TARGET_WASM32_WASCC: &str = "wasm32-wascc";
/// The name of the HTTP capability.
const HTTP_CAPABILITY: &str = "wascc:http_server";
#[cfg(target_os = "linux")]
const HTTP_LIB: &str = "./lib/libwascc_httpsrv.so";
#[cfg(target_os = "macos")]
const HTTP_LIB: &str = "./lib/libwascc_httpsrv.dylib";
/// Kubernetes' view of environment variables is an unordered map of string to string.
type EnvVars = std::collections::HashMap<String, String>;
/// WasccProvider provides a Kubelet runtime implementation that executes WASM binaries.
///
/// Currently, this runtime uses WASCC as a host, loading the primary container as an actor.
/// TODO: In the future, we will look at loading capabilities using the "sidecar" metaphor
/// from Kubernetes.
#[derive(Clone)]
pub struct WasccProvider {}
#[async_trait::async_trait]
impl Provider for WasccProvider {
async fn init(&self) -> anyhow::Result<()> {
tokio::task::spawn_blocking(|| {
let data = NativeCapability::from_file(HTTP_LIB).map_err(|e| {
anyhow::anyhow!("Failed to read HTTP capability {}: {}", HTTP_LIB, e)
})?;
host::add_native_capability(data)
.map_err(|e| anyhow::anyhow!("Failed to load HTTP capability: {}", e))
})
.await?
}
fn arch(&self) -> String {
TARGET_WASM32_WASCC.to_string()
}
fn can_schedule(&self, pod: &Pod) -> bool {
// If there is a node selector and it has arch set to wasm32-wascc, we can
// schedule it.
pod.node_selector()
.and_then(|i| {
i.get("beta.kubernetes.io/arch")
.map(|v| v.eq(&TARGET_WASM32_WASCC))
})
.unwrap_or(false)
}
async fn add(&self, pod: Pod, client: APIClient) -> anyhow::Result<()> {
// To run an Add event, we load the WASM, update the pod status to Running,
// and then execute the WASM, passing in the relevant data.
// When the pod finishes, we update the status to Succeeded unless it
// produces an error, in which case we mark it Failed.
debug!("Pod added {:?}", pod.name());
// This would lock us into one wascc actor per pod. I don't know if
// that is a good thing. Other containers would then be limited
// to acting as components... which largely follows the sidecar
// pattern.
//
// Another possibility is to embed the key in the image reference
// (image/foo.wasm@ed25519:PUBKEY). That might work best, but it is
// not terribly useable.
//
// A really icky one would be to just require the pubkey in the env
// vars and suck it out of there. But that violates the intention
// of env vars, which is to communicate _into_ the runtime, not to
// configure the runtime.
let pub_key = pod.get_annotation(ACTOR_PUBLIC_KEY).unwrap_or_default();
debug!("{:?}", pub_key);
// TODO: Implement this for real.
//
// What it should do:
// - for each volume
// - set up the volume map
// - for each init container:
// - set up the runtime
// - mount any volumes (popen)
// - run it to completion
// - bail with an error if it fails
// - for each container and ephemeral_container
// - set up the runtime
// - mount any volumes (popen)
// - run it to completion
// - bail if it errors
info!("Starting containers for pod {:?}", pod.name());
for container in pod.containers() {
let env = self.env_vars(client.clone(), &container, &pod).await;
debug!("Starting container {} on thread", container.name);
let pub_key = pub_key.to_owned();
// TODO: Replace with actual image store lookup when it is merged
let data = tokio::fs::read("./testdata/echo.wasm").await?;
let http_result =
tokio::task::spawn_blocking(move || wascc_run_http(data, env, &pub_key)).await?;
match http_result {
Ok(_) => {
pod.patch_status(client.clone(), &Phase::Running).await;
}
Err(e) => {
pod.patch_status(client, &Phase::Failed).await;
return Err(anyhow::anyhow!("Failed to run pod: {}", e));
}
}
}
info!(
"All containers started for pod {:?}. Updating status",
pod.name()
);
Ok(())
}
async fn modify(&self, pod: Pod, _client: APIClient) -> anyhow::Result<()> {
// Modify will be tricky. Not only do we need to handle legitimate modifications, but we
// need to sift out modifications that simply alter the status. For the time being, we
// just ignore them, which is the wrong thing to do... except that it demos better than
// other wrong things.
info!("Pod modified");
info!(
"Modified pod spec: {:#?}",
pod.as_kube_pod().status.as_ref().unwrap()
);
Ok(())
}
async fn delete(&self, pod: Pod, _client: APIClient) -> anyhow::Result<()> {
let pub_key = pod
.annotations()
.get(ACTOR_PUBLIC_KEY)
.map(String::as_str)
.unwrap_or_default();
wascc_stop(&pub_key).map_err(|e| anyhow::anyhow!("Failed to stop wascc actor: {}", e))
}
async fn status(&self, pod: Pod, _client: APIClient) -> anyhow::Result<Status> {
match pod.get_annotation(ACTOR_PUBLIC_KEY) {
None => Ok(Status {
phase: Phase::Unknown,
message: None,
container_statuses: Vec::new(),
}),
Some(pk) => {
let pk = pk.to_owned();
let result = tokio::task::spawn_blocking(move || host::actor_claims(&pk)).await?;
match result {
None => {
// FIXME: I don't know how to tell if an actor failed.
Ok(Status {
phase: Phase::Succeeded,
message: None,
container_statuses: Vec::new(),
})
}
Some(_) => Ok(Status {
phase: Phase::Running,
message: None,
container_statuses: Vec::new(),
}),
}
}
}
}
}
/// Run a WasCC module inside of the host, configuring it to handle HTTP requests.
///
/// This bootstraps an HTTP host, using the value of the env's `PORT` key to expose a port.
fn wascc_run_http(data: Vec<u8>, env: EnvVars, key: &str) -> anyhow::Result<()> {
let mut httpenv: HashMap<String, String> = HashMap::new();
httpenv.insert(
"PORT".into(),
env.get("PORT")
.map(|a| a.to_string())
.unwrap_or_else(|| "80".to_string()),
);
wascc_run(
data,
key,
vec![Capability {
name: HTTP_CAPABILITY,
env,
}],
)
}
/// Stop a running waSCC actor.
fn wascc_stop(key: &str) -> anyhow::Result<(), wascc_host::errors::Error> {
host::remove_actor(key)
}
/// Capability describes a waSCC capability.
///
/// Capabilities are made available to actors through a two-part processthread:
/// - They must be registered
/// - For each actor, the capability must be configured
struct Capability {
name: &'static str,
env: EnvVars,
}
/// Run the given WASM data as a waSCC actor with the given public key.
///
/// The provided capabilities will be configured for this actor, but the capabilities
/// must first be loaded into the host by some other process, such as register_native_capabilities().
fn wascc_run(data: Vec<u8>, key: &str, capabilities: Vec<Capability>) -> anyhow::Result<()> {
info!("wascc run");
let load = Actor::from_bytes(data).map_err(|e| anyhow::anyhow!("Error loading WASM: {}", e))?;
host::add_actor(load).map_err(|e| anyhow::anyhow!("Error adding actor: {}", e))?;
capabilities.iter().try_for_each(|cap| {
info!("configuring capability {}", cap.name);
host::configure(key, cap.name, cap.env.clone())
.map_err(|e| anyhow::anyhow!("Error configuring capabilities for module: {}", e))
})?;
info!("Instance executing");
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use k8s_openapi::api::core::v1::Pod as KubePod;
use k8s_openapi::api::core::v1::PodSpec;
#[cfg(target_os = "linux")]
const ECHO_LIB: &str = "./testdata/libecho_provider.so";
#[cfg(target_os = "macos")]
const ECHO_LIB: &str = "./testdata/libecho_provider.dylib";
#[tokio::test]
async fn test_init() {
let provider = WasccProvider {};
provider
.init()
.await
.expect("HTTP capability is registered");
}
#[test]
fn test_wascc_run() {
// Open file
let data = std::fs::read("./testdata/echo.wasm").expect("read the wasm file");
// Send into wascc_run
wascc_run_http(
data,
EnvVars::new(),
"MB4OLDIC3TCZ4Q4TGGOVAZC43VXFE2JQVRAXQMQFXUCREOOFEKOKZTY2",
)
.expect("successfully executed a WASM");
// Give the webserver a chance to start up.
std::thread::sleep(std::time::Duration::from_secs(3));
wascc_stop("MB4OLDIC3TCZ4Q4TGGOVAZC43VXFE2JQVRAXQMQFXUCREOOFEKOKZTY2")
.expect("Removed the actor");
}
#[test]
fn test_wascc_echo() {
let data = NativeCapability::from_file(ECHO_LIB).expect("loaded echo library");
host::add_native_capability(data).expect("added echo capability");
let key = "MDAYLDTOZEHQFPB3CL5PAFY5UTNCW32P54XGWYX3FOM2UBRYNCP3I3BF";
let wasm = std::fs::read("./testdata/echo_actor_s.wasm").expect("load echo WASM");
// TODO: use wascc_run to execute echo_actor
wascc_run(
wasm,
key,
vec![Capability {
name: "wok:echoProvider",
env: EnvVars::new(),
}],
)
.expect("completed echo run")
}
#[test]
fn test_can_schedule() {
let wr = WasccProvider {};
let mock = Default::default();
assert!(!wr.can_schedule(&mock));
let mut selector = std::collections::BTreeMap::new();
selector.insert(
"beta.kubernetes.io/arch".to_string(),
"wasm32-wascc".to_string(),
);
let mut mock: KubePod = mock.into();
mock.spec = Some(PodSpec {
node_selector: Some(selector.clone()),
..Default::default()
});
let mock = Pod::new(mock);
assert!(wr.can_schedule(&mock));
selector.insert("beta.kubernetes.io/arch".to_string(), "amd64".to_string());
let mut mock: KubePod = mock.into();
mock.spec = Some(PodSpec {
node_selector: Some(selector),
..Default::default()
});
let mock = Pod::new(mock);
assert!(!wr.can_schedule(&mock));
}
}
| 36.528481 | 101 | 0.567184 |
f816696113ee193257effa8ec60856c0ee438ed1 | 3,727 | /*
Copyright (c) 2021 VMware, Inc.
SPDX-License-Identifier: MIT
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
impl<'a> FromFlatBuffer<fb::__BigInt<'a>> for ddlog_bigint::Int {
fn from_flatbuf(fb: fb::__BigInt<'a>) -> std::result::Result<ddlog_bigint::Int, String> {
let bytes = fb.bytes().ok_or_else(|| {
format!("ddlog_bigint::Int::from_flatbuf: invalid buffer: failed to extract bytes")
})?;
Ok(ddlog_bigint::Int::from_bytes_be(fb.sign(), bytes))
}
}
impl<'b> ToFlatBuffer<'b> for ddlog_bigint::Int {
type Target = fbrt::WIPOffset<fb::__BigInt<'b>>;
fn to_flatbuf(&self, fbb: &mut fbrt::FlatBufferBuilder<'b>) -> Self::Target {
let (sign, bytes) = self.to_bytes_be();
let vec = fbb.create_vector(&bytes);
fb::__BigInt::create(
fbb,
&fb::__BigIntArgs {
sign: sign != ::num::bigint::Sign::Minus,
bytes: Some(vec),
},
)
}
}
impl<'b> ToFlatBufferTable<'b> for ddlog_bigint::Int {
type Target = fb::__BigInt<'b>;
fn to_flatbuf_table(
&self,
fbb: &mut fbrt::FlatBufferBuilder<'b>,
) -> fbrt::WIPOffset<Self::Target> {
self.to_flatbuf(fbb)
}
}
impl<'b> ToFlatBufferVectorElement<'b> for ddlog_bigint::Int {
type Target = <ddlog_bigint::Int as ToFlatBuffer<'b>>::Target;
fn to_flatbuf_vector_element(&self, fbb: &mut fbrt::FlatBufferBuilder<'b>) -> Self::Target {
self.to_flatbuf(fbb)
}
}
impl<'a> FromFlatBuffer<fb::__BigUint<'a>> for ddlog_bigint::Uint {
fn from_flatbuf(fb: fb::__BigUint<'a>) -> std::result::Result<ddlog_bigint::Uint, String> {
let bytes = fb.bytes().ok_or_else(|| {
format!("ddlog_bigint::Uint::from_flatbuf: invalid buffer: failed to extract bytes")
})?;
Ok(ddlog_bigint::Uint::from_bytes_be(bytes))
}
}
impl<'b> ToFlatBuffer<'b> for ddlog_bigint::Uint {
type Target = fbrt::WIPOffset<fb::__BigUint<'b>>;
fn to_flatbuf(&self, fbb: &mut fbrt::FlatBufferBuilder<'b>) -> Self::Target {
let vec = fbb.create_vector(&self.to_bytes_be());
fb::__BigUint::create(fbb, &fb::__BigUintArgs { bytes: Some(vec) })
}
}
impl<'b> ToFlatBufferTable<'b> for ddlog_bigint::Uint {
type Target = fb::__BigUint<'b>;
fn to_flatbuf_table(
&self,
fbb: &mut fbrt::FlatBufferBuilder<'b>,
) -> fbrt::WIPOffset<Self::Target> {
self.to_flatbuf(fbb)
}
}
impl<'b> ToFlatBufferVectorElement<'b> for ddlog_bigint::Uint {
type Target = <ddlog_bigint::Uint as ToFlatBuffer<'b>>::Target;
fn to_flatbuf_vector_element(&self, fbb: &mut fbrt::FlatBufferBuilder<'b>) -> Self::Target {
self.to_flatbuf(fbb)
}
}
| 36.539216 | 96 | 0.667561 |
bf14cddd6565f0e58c8bc36881894c0443960371 | 2,325 | use flexgen::config::Config;
use flexgen::var::TokenVars;
use flexgen::{import_vars, register_fragments, CodeFragment, CodeGenerator, Error};
use proc_macro2::TokenStream;
use quote::quote;
use quote_doctest::doc_test;
struct DocTest;
impl CodeFragment for DocTest {
fn generate(&self, vars: &TokenVars) -> Result<TokenStream, Error> {
import_vars! { vars => fib, one };
let test = quote! {
assert_eq!(#fib(10), 55);
assert_eq!(#fib(#one), #one);
};
Ok(doc_test!(test)?)
}
}
struct Function;
impl CodeFragment for Function {
fn generate(&self, vars: &TokenVars) -> Result<TokenStream, Error> {
import_vars! { vars => fib, one };
let doc_test = DocTest.generate(vars)?;
Ok(quote! {
/// This will run a compare between fib inputs and the outputs
#doc_test
#[inline]
fn #fib(n: u64) -> u64 {
match n {
0 => 0,
#one => #one,
n => #fib(n - 1) + #fib(n - 2),
}
}
})
}
}
struct Main;
impl CodeFragment for Main {
fn uses(&self, _vars: &TokenVars) -> Result<TokenStream, Error> {
Ok(quote! {
use std::error::{Error as StdError};
use std::io::stdin;
})
}
fn generate(&self, vars: &TokenVars) -> Result<TokenStream, Error> {
import_vars! { vars => fib };
Ok(quote! {
/// This is the main function
fn main() -> Result<(), Box<dyn StdError>> {
println!("Enter a number:");
let mut line = String::new();
stdin().read_line(&mut line)?;
let num: u64 = line.trim_end().parse()?;
_blank_!();
_comment_!("\nCalculate fibonacci for user input\n\n");
let answer = #fib(num);
println!("The number '{num}' in the fibonacci sequence is: {answer}");
_blank_!();
Ok(())
}
})
}
}
fn main() -> Result<(), Error> {
let fragments = register_fragments!(Function, Main);
let config = Config::from_default_toml_file()?;
let gen = CodeGenerator::new(fragments, config)?;
gen.generate_files()
}
| 27.352941 | 86 | 0.513548 |
291db9ba41370637b608f5981a73b3d12b2c50cc | 217 | // gate-test-await_macro
// edition:2018
#![feature(async_await)]
async fn bar() {}
async fn foo() {
await!(bar()); //~ ERROR `await!(<expr>)` macro syntax is unstable, and will soon be removed
}
fn main() {}
| 16.692308 | 96 | 0.631336 |
fe49a92cc8c18c7ac2d888c9c1f5a08816ac49cd | 20,649 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use anyhow::{format_err, Result};
use libra_types::account_address::AccountAddress;
use move_core_types::identifier::Identifier;
use move_ir_types::ast::{ModuleName, NopLabel, QualifiedModuleIdent};
use serde::{Deserialize, Serialize};
use std::{collections::BTreeMap, ops::Bound};
use vm::{
access::*,
file_format::{
CodeOffset, CompiledModule, CompiledScript, FunctionDefinition, FunctionDefinitionIndex,
LocalIndex, MemberCount, ModuleHandleIndex, StructDefinition, StructDefinitionIndex,
TableIndex,
},
};
//***************************************************************************
// Source location mapping
//***************************************************************************
pub type SourceName<Location> = (String, Location);
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct StructSourceMap<Location: Clone + Eq> {
/// The source declaration location of the struct
pub decl_location: Location,
/// Important: type parameters need to be added in the order of their declaration
pub type_parameters: Vec<SourceName<Location>>,
/// Note that fields to a struct source map need to be added in the order of the fields in the
/// struct definition.
pub fields: Vec<Location>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct FunctionSourceMap<Location: Clone + Eq> {
/// The source location for the definition of this entire function. Note that in certain
/// instances this will have no valid source location e.g. the "main" function for modules that
/// are treated as programs are synthesized and therefore have no valid source location.
pub decl_location: Location,
/// Note that type parameters need to be added in the order of their declaration
pub type_parameters: Vec<SourceName<Location>>,
pub parameters: Vec<SourceName<Location>>,
// pub parameters: Vec<SourceName<Location>>,
/// The index into the vector is the locals index. The corresponding `(Identifier, Location)` tuple
/// is the name and location of the local.
pub locals: Vec<SourceName<Location>>,
/// A map to the code offset for a corresponding nop. Nop's are used as markers for some
/// high level language information
pub nops: BTreeMap<NopLabel, CodeOffset>,
/// The source location map for the function body.
pub code_map: BTreeMap<CodeOffset, Location>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct SourceMap<Location: Clone + Eq> {
/// The name <address.module_name> for module that this source map is for
/// None if it is a script
pub module_name_opt: Option<(AccountAddress, Identifier)>,
// A mapping of StructDefinitionIndex to source map for each struct/resource
struct_map: BTreeMap<TableIndex, StructSourceMap<Location>>,
// A mapping of FunctionDefinitionIndex to the soure map for that function.
function_map: BTreeMap<TableIndex, FunctionSourceMap<Location>>,
}
pub fn remap_locations_source_name<Location: Clone + Eq, Other: Clone + Eq>(
(id, loc): SourceName<Location>,
f: &mut impl FnMut(Location) -> Other,
) -> SourceName<Other> {
(id, f(loc))
}
pub fn remap_locations_source_map<Location: Clone + Eq, Other: Clone + Eq>(
map: Vec<SourceMap<Location>>,
f: &mut impl FnMut(Location) -> Other,
) -> Vec<SourceMap<Other>> {
map.into_iter().map(|m| m.remap_locations(f)).collect()
}
impl<Location: Clone + Eq> StructSourceMap<Location> {
pub fn new(decl_location: Location) -> Self {
Self {
decl_location,
type_parameters: Vec::new(),
fields: Vec::new(),
}
}
pub fn add_type_parameter(&mut self, type_name: SourceName<Location>) {
self.type_parameters.push(type_name)
}
pub fn get_type_parameter_name(
&self,
type_parameter_idx: usize,
) -> Option<SourceName<Location>> {
self.type_parameters.get(type_parameter_idx).cloned()
}
pub fn add_field_location(&mut self, field_loc: Location) {
self.fields.push(field_loc)
}
pub fn get_field_location(&self, field_index: MemberCount) -> Option<Location> {
self.fields.get(field_index as usize).cloned()
}
pub fn dummy_struct_map(
&mut self,
module: &CompiledModule,
struct_def: &StructDefinition,
default_loc: Location,
) -> Result<()> {
let struct_handle = module.struct_handle_at(struct_def.struct_handle);
// Add dummy locations for the fields
match struct_def.declared_field_count() {
Err(_) => (),
Ok(count) => (0..count).for_each(|_| self.fields.push(default_loc.clone())),
}
for i in 0..struct_handle.type_parameters.len() {
let name = format!("Ty{}", i);
self.add_type_parameter((name, default_loc.clone()))
}
Ok(())
}
pub fn remap_locations<Other: Clone + Eq>(
self,
f: &mut impl FnMut(Location) -> Other,
) -> StructSourceMap<Other> {
let StructSourceMap {
decl_location,
type_parameters,
fields,
} = self;
let decl_location = f(decl_location);
let type_parameters = type_parameters
.into_iter()
.map(|n| remap_locations_source_name(n, f))
.collect();
let fields = fields.into_iter().map(|loc| f(loc)).collect();
StructSourceMap {
decl_location,
type_parameters,
fields,
}
}
}
impl<Location: Clone + Eq> FunctionSourceMap<Location> {
pub fn new(decl_location: Location) -> Self {
Self {
decl_location,
type_parameters: Vec::new(),
parameters: Vec::new(),
locals: Vec::new(),
code_map: BTreeMap::new(),
nops: BTreeMap::new(),
}
}
pub fn add_type_parameter(&mut self, type_name: SourceName<Location>) {
self.type_parameters.push(type_name)
}
pub fn get_type_parameter_name(
&self,
type_parameter_idx: usize,
) -> Option<SourceName<Location>> {
self.type_parameters.get(type_parameter_idx).cloned()
}
/// A single source-level instruction may possibly map to a number of bytecode instructions. In
/// order to not store a location for each instruction, we instead use a BTreeMap to represent
/// a segment map (holding the left-hand-sides of each segment). Thus, an instruction
/// sequence is always marked from its starting point. To determine what part of the source
/// code corresponds to a given `CodeOffset` we query to find the element that is the largest
/// number less than or equal to the query. This will give us the location for that bytecode
/// range.
pub fn add_code_mapping(&mut self, start_offset: CodeOffset, location: Location) {
let possible_segment = self.get_code_location(start_offset);
match possible_segment.map(|other_location| other_location != location) {
Some(true) | None => {
self.code_map.insert(start_offset, location);
}
_ => (),
};
}
/// Record the code offset for an Nop label
pub fn add_nop_mapping(&mut self, label: NopLabel, offset: CodeOffset) {
assert!(self.nops.insert(label, offset).is_none())
}
// Note that it is important that locations be added in order.
pub fn add_local_mapping(&mut self, name: SourceName<Location>) {
self.locals.push(name);
}
pub fn add_parameter_mapping(&mut self, name: SourceName<Location>) {
self.parameters.push(name)
}
/// Recall that we are using a segment tree. We therefore lookup the location for the code
/// offset by performing a range query for the largest number less than or equal to the code
/// offset passed in.
pub fn get_code_location(&self, code_offset: CodeOffset) -> Option<Location> {
self.code_map
.range((Bound::Unbounded, Bound::Included(&code_offset)))
.next_back()
.map(|(_, vl)| vl.clone())
}
pub fn get_parameter_or_local_name(&self, idx: u64) -> Option<SourceName<Location>> {
let idx = idx as usize;
if idx < self.parameters.len() {
self.parameters.get(idx).cloned()
} else {
self.locals.get(idx - self.parameters.len()).cloned()
}
}
pub fn make_local_name_to_index_map(&self) -> BTreeMap<&String, LocalIndex> {
self.locals
.iter()
.chain(self.parameters.iter())
.enumerate()
.map(|(i, (n, _))| (n, i as LocalIndex))
.collect()
}
pub fn dummy_function_map(
&mut self,
module: &CompiledModule,
function_def: &FunctionDefinition,
default_loc: Location,
) -> Result<()> {
let function_handle = module.function_handle_at(function_def.function);
// Generate names for each type parameter
for i in 0..function_handle.type_parameters.len() {
let name = format!("Ty{}", i);
self.add_type_parameter((name, default_loc.clone()))
}
if let Some(code) = &function_def.code {
let locals = module.signature_at(code.locals);
for i in 0..locals.0.len() {
let name = format!("loc{}", i);
self.add_local_mapping((name, default_loc.clone()))
}
}
// We just need to insert the code map at the 0'th index since we represent this with a
// segment map
self.add_code_mapping(0, default_loc);
Ok(())
}
pub fn remap_locations<Other: Clone + Eq>(
self,
f: &mut impl FnMut(Location) -> Other,
) -> FunctionSourceMap<Other> {
let FunctionSourceMap {
decl_location,
type_parameters,
parameters,
locals,
code_map,
nops,
} = self;
let decl_location = f(decl_location);
let type_parameters = type_parameters
.into_iter()
.map(|n| remap_locations_source_name(n, f))
.collect();
let parameters = parameters
.into_iter()
.map(|n| remap_locations_source_name(n, f))
.collect();
let locals = locals
.into_iter()
.map(|n| remap_locations_source_name(n, f))
.collect();
let code_map = code_map.into_iter().map(|(i, loc)| (i, f(loc))).collect();
FunctionSourceMap {
decl_location,
type_parameters,
parameters,
locals,
code_map,
nops,
}
}
}
impl<Location: Clone + Eq> SourceMap<Location> {
pub fn new(module_name_opt: Option<QualifiedModuleIdent>) -> Self {
let module_name_opt = module_name_opt.map(|module_name| {
let ident = Identifier::new(module_name.name.into_inner()).unwrap();
(module_name.address, ident)
});
Self {
module_name_opt,
struct_map: BTreeMap::new(),
function_map: BTreeMap::new(),
}
}
pub fn add_top_level_function_mapping(
&mut self,
fdef_idx: FunctionDefinitionIndex,
location: Location,
) -> Result<()> {
self.function_map.insert(fdef_idx.0, FunctionSourceMap::new(location)).map_or(Ok(()), |_| { Err(format_err!(
"Multiple functions at same function definition index encountered when constructing source map"
)) })
}
pub fn add_function_type_parameter_mapping(
&mut self,
fdef_idx: FunctionDefinitionIndex,
name: SourceName<Location>,
) -> Result<()> {
let func_entry = self.function_map.get_mut(&fdef_idx.0).ok_or_else(|| {
format_err!("Tried to add function type parameter mapping to undefined function index")
})?;
func_entry.add_type_parameter(name);
Ok(())
}
pub fn get_function_type_parameter_name(
&self,
fdef_idx: FunctionDefinitionIndex,
type_parameter_idx: usize,
) -> Result<SourceName<Location>> {
self.function_map
.get(&fdef_idx.0)
.and_then(|function_source_map| {
function_source_map.get_type_parameter_name(type_parameter_idx)
})
.ok_or_else(|| format_err!("Unable to get function type parameter name"))
}
pub fn add_code_mapping(
&mut self,
fdef_idx: FunctionDefinitionIndex,
start_offset: CodeOffset,
location: Location,
) -> Result<()> {
let func_entry = self
.function_map
.get_mut(&fdef_idx.0)
.ok_or_else(|| format_err!("Tried to add code mapping to undefined function index"))?;
func_entry.add_code_mapping(start_offset, location);
Ok(())
}
pub fn add_nop_mapping(
&mut self,
fdef_idx: FunctionDefinitionIndex,
label: NopLabel,
start_offset: CodeOffset,
) -> Result<()> {
let func_entry = self
.function_map
.get_mut(&fdef_idx.0)
.ok_or_else(|| format_err!("Tried to add nop mapping to undefined function index"))?;
func_entry.add_nop_mapping(label, start_offset);
Ok(())
}
/// Given a function definition and a code offset within that function definition, this returns
/// the location in the source code associated with the instruction at that offset.
pub fn get_code_location(
&self,
fdef_idx: FunctionDefinitionIndex,
offset: CodeOffset,
) -> Result<Location> {
self.function_map
.get(&fdef_idx.0)
.and_then(|function_source_map| function_source_map.get_code_location(offset))
.ok_or_else(|| format_err!("Tried to get code location from undefined function index"))
}
pub fn add_local_mapping(
&mut self,
fdef_idx: FunctionDefinitionIndex,
name: SourceName<Location>,
) -> Result<()> {
let func_entry = self
.function_map
.get_mut(&fdef_idx.0)
.ok_or_else(|| format_err!("Tried to add local mapping to undefined function index"))?;
func_entry.add_local_mapping(name);
Ok(())
}
pub fn add_parameter_mapping(
&mut self,
fdef_idx: FunctionDefinitionIndex,
name: SourceName<Location>,
) -> Result<()> {
let func_entry = self.function_map.get_mut(&fdef_idx.0).ok_or_else(|| {
format_err!("Tried to add parameter mapping to undefined function index")
})?;
func_entry.add_parameter_mapping(name);
Ok(())
}
pub fn get_parameter_or_local_name(
&self,
fdef_idx: FunctionDefinitionIndex,
index: u64,
) -> Result<SourceName<Location>> {
self.function_map
.get(&fdef_idx.0)
.and_then(|function_source_map| function_source_map.get_parameter_or_local_name(index))
.ok_or_else(|| format_err!("Tried to get local name at undefined function index"))
}
pub fn add_top_level_struct_mapping(
&mut self,
struct_def_idx: StructDefinitionIndex,
location: Location,
) -> Result<()> {
self.struct_map.insert(struct_def_idx.0, StructSourceMap::new(location)).map_or(Ok(()), |_| { Err(format_err!(
"Multiple structs at same struct definition index encountered when constructing source map"
)) })
}
pub fn add_struct_field_mapping(
&mut self,
struct_def_idx: StructDefinitionIndex,
location: Location,
) -> Result<()> {
let struct_entry = self
.struct_map
.get_mut(&struct_def_idx.0)
.ok_or_else(|| format_err!("Tried to add file mapping to undefined struct index"))?;
struct_entry.add_field_location(location);
Ok(())
}
pub fn get_struct_field_name(
&self,
struct_def_idx: StructDefinitionIndex,
field_idx: MemberCount,
) -> Option<Location> {
self.struct_map
.get(&struct_def_idx.0)
.and_then(|struct_source_map| struct_source_map.get_field_location(field_idx))
}
pub fn add_struct_type_parameter_mapping(
&mut self,
struct_def_idx: StructDefinitionIndex,
name: SourceName<Location>,
) -> Result<()> {
let struct_entry = self.struct_map.get_mut(&struct_def_idx.0).ok_or_else(|| {
format_err!("Tried to add struct type parameter mapping to undefined struct index")
})?;
struct_entry.add_type_parameter(name);
Ok(())
}
pub fn get_struct_type_parameter_name(
&self,
struct_def_idx: StructDefinitionIndex,
type_parameter_idx: usize,
) -> Result<SourceName<Location>> {
self.struct_map
.get(&struct_def_idx.0)
.and_then(|struct_source_map| {
struct_source_map.get_type_parameter_name(type_parameter_idx)
})
.ok_or_else(|| format_err!("Unable to get struct type parameter name"))
}
pub fn get_function_source_map(
&self,
fdef_idx: FunctionDefinitionIndex,
) -> Result<&FunctionSourceMap<Location>> {
self.function_map
.get(&fdef_idx.0)
.ok_or_else(|| format_err!("Unable to get function source map"))
}
pub fn get_struct_source_map(
&self,
struct_def_idx: StructDefinitionIndex,
) -> Result<&StructSourceMap<Location>> {
self.struct_map
.get(&struct_def_idx.0)
.ok_or_else(|| format_err!("Unable to get struct source map"))
}
/// Create a 'dummy' source map for a compiled module. This is useful for e.g. disassembling
/// with generated or real names depending upon if the source map is available or not.
pub fn dummy_from_module(module: &CompiledModule, default_loc: Location) -> Result<Self> {
let module_handle = module.module_handle_at(ModuleHandleIndex::new(0));
let module_name = ModuleName::new(module.identifier_at(module_handle.name).to_string());
let address = *module.address_identifier_at(module_handle.address);
let module_ident = QualifiedModuleIdent::new(module_name, address);
let mut empty_source_map = Self::new(Some(module_ident));
for (function_idx, function_def) in module.function_defs().iter().enumerate() {
empty_source_map.add_top_level_function_mapping(
FunctionDefinitionIndex(function_idx as TableIndex),
default_loc.clone(),
)?;
empty_source_map
.function_map
.get_mut(&(function_idx as TableIndex))
.ok_or_else(|| format_err!("Unable to get function map while generating dummy"))?
.dummy_function_map(&module, &function_def, default_loc.clone())?;
}
for (struct_idx, struct_def) in module.struct_defs().iter().enumerate() {
empty_source_map.add_top_level_struct_mapping(
StructDefinitionIndex(struct_idx as TableIndex),
default_loc.clone(),
)?;
empty_source_map
.struct_map
.get_mut(&(struct_idx as TableIndex))
.ok_or_else(|| format_err!("Unable to get struct map while generating dummy"))?
.dummy_struct_map(&module, &struct_def, default_loc.clone())?;
}
Ok(empty_source_map)
}
pub fn dummy_from_script(script: &CompiledScript, default_loc: Location) -> Result<Self> {
Self::dummy_from_module(&script.clone().into_module().1, default_loc)
}
pub fn remap_locations<Other: Clone + Eq>(
self,
f: &mut impl FnMut(Location) -> Other,
) -> SourceMap<Other> {
let SourceMap {
module_name_opt,
struct_map,
function_map,
} = self;
let struct_map = struct_map
.into_iter()
.map(|(n, m)| (n, m.remap_locations(f)))
.collect();
let function_map = function_map
.into_iter()
.map(|(n, m)| (n, m.remap_locations(f)))
.collect();
SourceMap {
module_name_opt,
struct_map,
function_map,
}
}
}
| 35.848958 | 118 | 0.613008 |
e8edbb20868ee3177f64ff674a913a28f13289e2 | 1,493 | use polyhorn_core::{Platform, WeakReference};
use crate::events::EventListener;
use crate::geometry::Size;
use crate::handles::{Imperative, ViewHandle};
use crate::styles::ViewStyle;
/// The base component.
pub struct View<P, H>
where
P: Platform + ?Sized,
H: ViewHandle + 'static,
{
/// Controls the appearance and layout of a View.
pub style: ViewStyle,
/// Called when the user cancels pressing a View.
pub on_pointer_cancel: EventListener<()>,
/// Called when the user starts pressing a View.
pub on_pointer_down: EventListener<()>,
/// Called when the user stops pressing a View.
pub on_pointer_up: EventListener<()>,
/// Called when a View is layed out.
pub on_layout: EventListener<Size<f32>>,
/// This is a reference to an imperative view handle that can be used to
/// measure the dimensions of this view or schedule animations.
pub reference: Option<WeakReference<P, Option<H>>>,
}
impl<P, H> Default for View<P, H>
where
P: Platform + ?Sized,
H: ViewHandle,
{
fn default() -> Self {
View {
style: Default::default(),
on_pointer_cancel: Default::default(),
on_pointer_down: Default::default(),
on_pointer_up: Default::default(),
on_layout: Default::default(),
reference: Default::default(),
}
}
}
impl<P, H> Imperative for View<P, H>
where
P: Platform + ?Sized,
H: ViewHandle,
{
type Handle = H;
}
| 25.741379 | 76 | 0.637642 |
fcabece8431ccedd811679326aba502a0e6cbea9 | 8,066 | // Generated from definition io.k8s.api.core.v1.PersistentVolumeStatus
/// PersistentVolumeStatus is the current status of a persistent volume.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct PersistentVolumeStatus {
/// A human-readable message indicating details about why the volume is in this state.
pub message: Option<String>,
/// Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
pub phase: Option<String>,
/// Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.
pub reason: Option<String>,
}
impl<'de> crate::serde::Deserialize<'de> for PersistentVolumeStatus {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_message,
Key_phase,
Key_reason,
Other,
}
impl<'de> crate::serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
Ok(match v {
"message" => Field::Key_message,
"phase" => Field::Key_phase,
"reason" => Field::Key_reason,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = PersistentVolumeStatus;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("PersistentVolumeStatus")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
let mut value_message: Option<String> = None;
let mut value_phase: Option<String> = None;
let mut value_reason: Option<String> = None;
while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_message => value_message = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_phase => value_phase = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_reason => value_reason = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(PersistentVolumeStatus {
message: value_message,
phase: value_phase,
reason: value_reason,
})
}
}
deserializer.deserialize_struct(
"PersistentVolumeStatus",
&[
"message",
"phase",
"reason",
],
Visitor,
)
}
}
impl crate::serde::Serialize for PersistentVolumeStatus {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
let mut state = serializer.serialize_struct(
"PersistentVolumeStatus",
self.message.as_ref().map_or(0, |_| 1) +
self.phase.as_ref().map_or(0, |_| 1) +
self.reason.as_ref().map_or(0, |_| 1),
)?;
if let Some(value) = &self.message {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "message", value)?;
}
if let Some(value) = &self.phase {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "phase", value)?;
}
if let Some(value) = &self.reason {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "reason", value)?;
}
crate::serde::ser::SerializeStruct::end(state)
}
}
#[cfg(feature = "schemars")]
impl crate::schemars::JsonSchema for PersistentVolumeStatus {
fn schema_name() -> String {
"io.k8s.api.core.v1.PersistentVolumeStatus".to_owned()
}
fn json_schema(__gen: &mut crate::schemars::gen::SchemaGenerator) -> crate::schemars::schema::Schema {
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("PersistentVolumeStatus is the current status of a persistent volume.".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Object))),
object: Some(Box::new(crate::schemars::schema::ObjectValidation {
properties: [
(
"message".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("A human-readable message indicating details about why the volume is in this state.".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"phase".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"reason".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
].into(),
..Default::default()
})),
..Default::default()
})
}
}
| 47.447059 | 228 | 0.525167 |
1ca4b10bcd25560b76ea2e56cddb331805cb8fae | 4,601 | use gleam_core::{
build::Telemetry,
error::{Error, StandardIoAction},
Warning,
};
use hexpm::version::Version;
use std::{
io::Write,
time::{Duration, Instant},
};
use termcolor::{BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
use crate::print_warning;
#[derive(Debug, Default, Clone)]
pub struct Reporter;
impl Reporter {
pub fn new() -> Self {
Self::default()
}
}
impl Telemetry for Reporter {
fn compiling_package(&self, name: &str) {
print_compiling(name);
}
fn checking_package(&self, name: &str) {
print_checking(name);
}
fn warning(&self, warning: &Warning) {
print_warning(warning)
}
}
pub fn ask(question: &str) -> Result<String, Error> {
print!("{}: ", question);
std::io::stdout().flush().expect("ask stdout flush");
let mut answer = String::new();
let _ = std::io::stdin()
.read_line(&mut answer)
.map_err(|e| Error::StandardIo {
action: StandardIoAction::Read,
err: Some(e.kind()),
})?;
Ok(answer.trim().to_string())
}
pub fn ask_password(question: &str) -> Result<String, Error> {
let prompt = format!("{} (will not be printed as you type): ", question);
rpassword::read_password_from_tty(Some(&prompt))
.map_err(|e| Error::StandardIo {
action: StandardIoAction::Read,
err: Some(e.kind()),
})
.map(|s| s.trim().to_string())
}
pub fn print_publishing(name: &str, version: &Version) {
print_colourful_prefix(" Publishing", &format!("{} v{}", name, version.to_string()))
}
pub fn print_published(duration: Duration) {
print_colourful_prefix(" Published", &format!("in {}", seconds(duration)))
}
pub fn print_retired(package: &str, version: &str) {
print_colourful_prefix(" Retired", &format!("{} {}", package, version))
}
pub fn print_unretired(package: &str, version: &str) {
print_colourful_prefix(" Unretired", &format!("{} {}", package, version))
}
pub fn print_publishing_documentation() {
print_colourful_prefix(" Publishing", "documentation");
}
pub fn print_downloading(text: &str) {
print_colourful_prefix("Downloading", text)
}
pub fn print_resolving_versions() {
print_colourful_prefix(" Resolving", "versions")
}
pub fn print_compiling(text: &str) {
print_colourful_prefix(" Compiling", text)
}
pub fn print_checking(text: &str) {
print_colourful_prefix(" Checking", text)
}
pub fn print_compiled(duration: Duration) {
print_colourful_prefix(" Compiled", &format!("in {}", seconds(duration)))
}
pub fn print_checked(duration: Duration) {
print_colourful_prefix(" Checked", &format!("in {}", seconds(duration)))
}
pub fn print_running(text: &str) {
print_colourful_prefix(" Running", text)
}
pub fn print_added(text: &str) {
print_colourful_prefix(" Added", text)
}
pub fn print_generating_documentation() {
print_colourful_prefix(" Generating", "documentation")
}
pub fn print_packages_downloaded(start: Instant, count: usize) {
let elapsed = seconds(start.elapsed());
let msg = match count {
1 => format!("1 package in {}", elapsed),
_ => format!("{} packages in {}", count, elapsed),
};
print_colourful_prefix(" Downloaded", &msg)
}
pub fn seconds(duration: Duration) -> String {
format!("{:.2}s", duration.as_millis() as f32 / 1000.)
}
pub fn print_colourful_prefix(prefix: &str, text: &str) {
let buffer_writer = stdout_buffer_writer();
let mut buffer = buffer_writer.buffer();
buffer
.set_color(
ColorSpec::new()
.set_intense(true)
.set_fg(Some(Color::Magenta)),
)
.expect("print_green_prefix");
write!(buffer, "{}", prefix).expect("print_green_prefix");
buffer
.set_color(&ColorSpec::new())
.expect("print_green_prefix");
writeln!(buffer, " {}", text).expect("print_green_prefix");
buffer_writer.print(&buffer).expect("print_green_prefix");
}
pub fn stderr_buffer_writer() -> BufferWriter {
// Don't add color codes to the output if standard error isn't connected to a terminal
termcolor::BufferWriter::stderr(color_choice())
}
pub fn stdout_buffer_writer() -> BufferWriter {
// Don't add color codes to the output if standard error isn't connected to a terminal
termcolor::BufferWriter::stdout(color_choice())
}
fn color_choice() -> ColorChoice {
if atty::is(atty::Stream::Stderr) {
termcolor::ColorChoice::Auto
} else {
termcolor::ColorChoice::Never
}
}
| 27.884848 | 90 | 0.643338 |
8f192868ae34b4cbfce882a7826f918da6dcf821 | 17,113 | //! Converters for nilable primitive Ruby types. Excludes collection types
//! Array and Hash.
use std::collections::HashMap;
use crate::convert::float::Float;
use crate::convert::{Convert, TryConvert};
use crate::sys;
use crate::types::{Int, Ruby};
use crate::value::{Value, ValueLike};
use crate::{Artichoke, ArtichokeError};
// bail out implementation for mixed-type collections
impl Convert<Option<Value>, Value> for Artichoke {
fn convert(&self, value: Option<Value>) -> Value {
if let Some(value) = value {
value
} else {
Value::new(self, unsafe { sys::mrb_sys_nil_value() })
}
}
}
impl Convert<Value, Option<Value>> for Artichoke {
fn convert(&self, value: Value) -> Option<Value> {
if let Ruby::Nil = value.ruby_type() {
None
} else {
Some(value)
}
}
}
macro_rules! option_to_ruby {
($elem:ty) => {
impl<'a> Convert<Option<$elem>, Value> for Artichoke {
fn convert(&self, value: Option<$elem>) -> Value {
if let Some(value) = value {
let result: Value = self.convert(value);
result
} else {
Value::new(self, unsafe { sys::mrb_sys_nil_value() })
}
}
}
};
}
macro_rules! ruby_to_option {
($elem:ty) => {
impl<'a> TryConvert<Value, Option<$elem>> for Artichoke {
fn try_convert(&self, value: Value) -> Result<Option<$elem>, ArtichokeError> {
if let Some(value) = self.convert(value) {
value.try_into::<$elem>().map(Some)
} else {
Ok(None)
}
}
}
};
}
// Primitives
option_to_ruby!(bool);
option_to_ruby!(Vec<u8>);
option_to_ruby!(Int);
option_to_ruby!(Float);
option_to_ruby!(String);
option_to_ruby!(&'a str);
// Array of primitives
option_to_ruby!(Vec<Value>);
option_to_ruby!(Vec<bool>);
option_to_ruby!(Vec<Vec<u8>>);
option_to_ruby!(Vec<Int>);
option_to_ruby!(Vec<Float>);
option_to_ruby!(Vec<String>);
option_to_ruby!(Vec<&'a str>);
// Array of optional primitives
option_to_ruby!(Vec<Option<Value>>);
option_to_ruby!(Vec<Option<bool>>);
option_to_ruby!(Vec<Option<Vec<u8>>>);
option_to_ruby!(Vec<Option<Int>>);
option_to_ruby!(Vec<Option<Float>>);
option_to_ruby!(Vec<Option<String>>);
option_to_ruby!(Vec<Option<&'a str>>);
// Hash of primitive keys to values
option_to_ruby!(HashMap<bool, Value>);
option_to_ruby!(HashMap<bool, bool>);
option_to_ruby!(HashMap<bool, Vec<u8>>);
option_to_ruby!(HashMap<bool, Int>);
option_to_ruby!(HashMap<bool, Float>);
option_to_ruby!(HashMap<bool, String>);
option_to_ruby!(HashMap<bool, &'a str>);
option_to_ruby!(HashMap<Vec<u8>, Value>);
option_to_ruby!(HashMap<Vec<u8>, bool>);
option_to_ruby!(HashMap<Vec<u8>, Vec<u8>>);
option_to_ruby!(HashMap<Vec<u8>, Int>);
option_to_ruby!(HashMap<Vec<u8>, Float>);
option_to_ruby!(HashMap<Vec<u8>, String>);
option_to_ruby!(HashMap<Vec<u8>, &'a str>);
option_to_ruby!(HashMap<Int, Value>);
option_to_ruby!(HashMap<Int, bool>);
option_to_ruby!(HashMap<Int, Vec<u8>>);
option_to_ruby!(HashMap<Int, Int>);
option_to_ruby!(HashMap<Int, Float>);
option_to_ruby!(HashMap<Int, String>);
option_to_ruby!(HashMap<Int, &'a str>);
option_to_ruby!(HashMap<String, Value>);
option_to_ruby!(HashMap<String, bool>);
option_to_ruby!(HashMap<String, Vec<u8>>);
option_to_ruby!(HashMap<String, Int>);
option_to_ruby!(HashMap<String, Float>);
option_to_ruby!(HashMap<String, String>);
option_to_ruby!(HashMap<String, &'a str>);
option_to_ruby!(HashMap<&'a str, Value>);
option_to_ruby!(HashMap<&'a str, bool>);
option_to_ruby!(HashMap<&'a str, Vec<u8>>);
option_to_ruby!(HashMap<&'a str, Int>);
option_to_ruby!(HashMap<&'a str, Float>);
option_to_ruby!(HashMap<&'a str, String>);
option_to_ruby!(HashMap<&'a str, &'a str>);
// Hash of optional keys to values
option_to_ruby!(HashMap<Option<bool>, Value>);
option_to_ruby!(HashMap<Option<bool>, bool>);
option_to_ruby!(HashMap<Option<bool>, Vec<u8>>);
option_to_ruby!(HashMap<Option<bool>, Int>);
option_to_ruby!(HashMap<Option<bool>, Float>);
option_to_ruby!(HashMap<Option<bool>, String>);
option_to_ruby!(HashMap<Option<bool>, &'a str>);
option_to_ruby!(HashMap<Option<Vec<u8>>, Value>);
option_to_ruby!(HashMap<Option<Vec<u8>>, bool>);
option_to_ruby!(HashMap<Option<Vec<u8>>, Vec<u8>>);
option_to_ruby!(HashMap<Option<Vec<u8>>, Int>);
option_to_ruby!(HashMap<Option<Vec<u8>>, Float>);
option_to_ruby!(HashMap<Option<Vec<u8>>, String>);
option_to_ruby!(HashMap<Option<Vec<u8>>, &'a str>);
option_to_ruby!(HashMap<Option<Int>, Value>);
option_to_ruby!(HashMap<Option<Int>, bool>);
option_to_ruby!(HashMap<Option<Int>, Vec<u8>>);
option_to_ruby!(HashMap<Option<Int>, Int>);
option_to_ruby!(HashMap<Option<Int>, Float>);
option_to_ruby!(HashMap<Option<Int>, String>);
option_to_ruby!(HashMap<Option<Int>, &'a str>);
option_to_ruby!(HashMap<Option<String>, Value>);
option_to_ruby!(HashMap<Option<String>, bool>);
option_to_ruby!(HashMap<Option<String>, Vec<u8>>);
option_to_ruby!(HashMap<Option<String>, Int>);
option_to_ruby!(HashMap<Option<String>, Float>);
option_to_ruby!(HashMap<Option<String>, String>);
option_to_ruby!(HashMap<Option<String>, &'a str>);
option_to_ruby!(HashMap<Option<&'a str>, Value>);
option_to_ruby!(HashMap<Option<&'a str>, bool>);
option_to_ruby!(HashMap<Option<&'a str>, Vec<u8>>);
option_to_ruby!(HashMap<Option<&'a str>, Int>);
option_to_ruby!(HashMap<Option<&'a str>, Float>);
option_to_ruby!(HashMap<Option<&'a str>, String>);
option_to_ruby!(HashMap<Option<&'a str>, &'a str>);
// Hash of primitive keys to optional values
option_to_ruby!(HashMap<bool, Option<Value>>);
option_to_ruby!(HashMap<bool, Option<bool>>);
option_to_ruby!(HashMap<bool, Option<Vec<u8>>>);
option_to_ruby!(HashMap<bool, Option<Int>>);
option_to_ruby!(HashMap<bool, Option<Float>>);
option_to_ruby!(HashMap<bool, Option<String>>);
option_to_ruby!(HashMap<bool, Option<&'a str>>);
option_to_ruby!(HashMap<Vec<u8>, Option<Value>>);
option_to_ruby!(HashMap<Vec<u8>, Option<bool>>);
option_to_ruby!(HashMap<Vec<u8>, Option<Vec<u8>>>);
option_to_ruby!(HashMap<Vec<u8>, Option<Int>>);
option_to_ruby!(HashMap<Vec<u8>, Option<Float>>);
option_to_ruby!(HashMap<Vec<u8>, Option<String>>);
option_to_ruby!(HashMap<Vec<u8>, Option<&'a str>>);
option_to_ruby!(HashMap<Int, Option<Value>>);
option_to_ruby!(HashMap<Int, Option<bool>>);
option_to_ruby!(HashMap<Int, Option<Vec<u8>>>);
option_to_ruby!(HashMap<Int, Option<Int>>);
option_to_ruby!(HashMap<Int, Option<Float>>);
option_to_ruby!(HashMap<Int, Option<String>>);
option_to_ruby!(HashMap<Int, Option<&'a str>>);
option_to_ruby!(HashMap<String, Option<Value>>);
option_to_ruby!(HashMap<String, Option<bool>>);
option_to_ruby!(HashMap<String, Option<Vec<u8>>>);
option_to_ruby!(HashMap<String, Option<Int>>);
option_to_ruby!(HashMap<String, Option<Float>>);
option_to_ruby!(HashMap<String, Option<String>>);
option_to_ruby!(HashMap<String, Option<&'a str>>);
option_to_ruby!(HashMap<&'a str, Option<Value>>);
option_to_ruby!(HashMap<&'a str, Option<bool>>);
option_to_ruby!(HashMap<&'a str, Option<Vec<u8>>>);
option_to_ruby!(HashMap<&'a str, Option<Int>>);
option_to_ruby!(HashMap<&'a str, Option<Float>>);
option_to_ruby!(HashMap<&'a str, Option<String>>);
option_to_ruby!(HashMap<&'a str, Option<&'a str>>);
// Hash of primitive optional keys to optional values
option_to_ruby!(HashMap<Option<bool>, Option<Value>>);
option_to_ruby!(HashMap<Option<bool>, Option<bool>>);
option_to_ruby!(HashMap<Option<bool>, Option<Vec<u8>>>);
option_to_ruby!(HashMap<Option<bool>, Option<Int>>);
option_to_ruby!(HashMap<Option<bool>, Option<Float>>);
option_to_ruby!(HashMap<Option<bool>, Option<String>>);
option_to_ruby!(HashMap<Option<bool>, Option<&'a str>>);
option_to_ruby!(HashMap<Option<Vec<u8>>, Option<Value>>);
option_to_ruby!(HashMap<Option<Vec<u8>>, Option<bool>>);
option_to_ruby!(HashMap<Option<Vec<u8>>, Option<Vec<u8>>>);
option_to_ruby!(HashMap<Option<Vec<u8>>, Option<Int>>);
option_to_ruby!(HashMap<Option<Vec<u8>>, Option<Float>>);
option_to_ruby!(HashMap<Option<Vec<u8>>, Option<String>>);
option_to_ruby!(HashMap<Option<Vec<u8>>, Option<&'a str>>);
option_to_ruby!(HashMap<Option<Int>, Option<Value>>);
option_to_ruby!(HashMap<Option<Int>, Option<bool>>);
option_to_ruby!(HashMap<Option<Int>, Option<Vec<u8>>>);
option_to_ruby!(HashMap<Option<Int>, Option<Int>>);
option_to_ruby!(HashMap<Option<Int>, Option<Float>>);
option_to_ruby!(HashMap<Option<Int>, Option<String>>);
option_to_ruby!(HashMap<Option<Int>, Option<&'a str>>);
option_to_ruby!(HashMap<Option<String>, Option<Value>>);
option_to_ruby!(HashMap<Option<String>, Option<bool>>);
option_to_ruby!(HashMap<Option<String>, Option<Vec<u8>>>);
option_to_ruby!(HashMap<Option<String>, Option<Int>>);
option_to_ruby!(HashMap<Option<String>, Option<Float>>);
option_to_ruby!(HashMap<Option<String>, Option<String>>);
option_to_ruby!(HashMap<Option<String>, Option<&'a str>>);
option_to_ruby!(HashMap<Option<&'a str>, Option<Value>>);
option_to_ruby!(HashMap<Option<&'a str>, Option<bool>>);
option_to_ruby!(HashMap<Option<&'a str>, Option<Vec<u8>>>);
option_to_ruby!(HashMap<Option<&'a str>, Option<Int>>);
option_to_ruby!(HashMap<Option<&'a str>, Option<Float>>);
option_to_ruby!(HashMap<Option<&'a str>, Option<String>>);
option_to_ruby!(HashMap<Option<&'a str>, Option<&'a str>>);
// Primitives
ruby_to_option!(bool);
ruby_to_option!(Vec<u8>);
ruby_to_option!(Int);
ruby_to_option!(Float);
ruby_to_option!(String);
ruby_to_option!(&'a str);
// Array of primitives
ruby_to_option!(Vec<Value>);
ruby_to_option!(Vec<bool>);
ruby_to_option!(Vec<Vec<u8>>);
ruby_to_option!(Vec<Int>);
ruby_to_option!(Vec<Float>);
ruby_to_option!(Vec<String>);
ruby_to_option!(Vec<&'a str>);
// Array of optional primitives
ruby_to_option!(Vec<Option<Value>>);
ruby_to_option!(Vec<Option<bool>>);
ruby_to_option!(Vec<Option<Vec<u8>>>);
ruby_to_option!(Vec<Option<Int>>);
ruby_to_option!(Vec<Option<Float>>);
ruby_to_option!(Vec<Option<String>>);
ruby_to_option!(Vec<Option<&'a str>>);
// Hash of primitive keys to values
ruby_to_option!(HashMap<bool, Value>);
ruby_to_option!(HashMap<bool, bool>);
ruby_to_option!(HashMap<bool, Vec<u8>>);
ruby_to_option!(HashMap<bool, Int>);
ruby_to_option!(HashMap<bool, Float>);
ruby_to_option!(HashMap<bool, String>);
ruby_to_option!(HashMap<bool, &'a str>);
ruby_to_option!(HashMap<Vec<u8>, Value>);
ruby_to_option!(HashMap<Vec<u8>, bool>);
ruby_to_option!(HashMap<Vec<u8>, Vec<u8>>);
ruby_to_option!(HashMap<Vec<u8>, Int>);
ruby_to_option!(HashMap<Vec<u8>, Float>);
ruby_to_option!(HashMap<Vec<u8>, String>);
ruby_to_option!(HashMap<Vec<u8>, &'a str>);
ruby_to_option!(HashMap<Int, Value>);
ruby_to_option!(HashMap<Int, bool>);
ruby_to_option!(HashMap<Int, Vec<u8>>);
ruby_to_option!(HashMap<Int, Int>);
ruby_to_option!(HashMap<Int, Float>);
ruby_to_option!(HashMap<Int, String>);
ruby_to_option!(HashMap<Int, &'a str>);
ruby_to_option!(HashMap<String, Value>);
ruby_to_option!(HashMap<String, bool>);
ruby_to_option!(HashMap<String, Vec<u8>>);
ruby_to_option!(HashMap<String, Int>);
ruby_to_option!(HashMap<String, Float>);
ruby_to_option!(HashMap<String, String>);
ruby_to_option!(HashMap<String, &'a str>);
ruby_to_option!(HashMap<&'a str, Value>);
ruby_to_option!(HashMap<&'a str, bool>);
ruby_to_option!(HashMap<&'a str, Vec<u8>>);
ruby_to_option!(HashMap<&'a str, Int>);
ruby_to_option!(HashMap<&'a str, Float>);
ruby_to_option!(HashMap<&'a str, String>);
ruby_to_option!(HashMap<&'a str, &'a str>);
// Hash of optional keys to values
ruby_to_option!(HashMap<Option<bool>, Value>);
ruby_to_option!(HashMap<Option<bool>, bool>);
ruby_to_option!(HashMap<Option<bool>, Vec<u8>>);
ruby_to_option!(HashMap<Option<bool>, Int>);
ruby_to_option!(HashMap<Option<bool>, Float>);
ruby_to_option!(HashMap<Option<bool>, String>);
ruby_to_option!(HashMap<Option<bool>, &'a str>);
ruby_to_option!(HashMap<Option<Vec<u8>>, Value>);
ruby_to_option!(HashMap<Option<Vec<u8>>, bool>);
ruby_to_option!(HashMap<Option<Vec<u8>>, Vec<u8>>);
ruby_to_option!(HashMap<Option<Vec<u8>>, Int>);
ruby_to_option!(HashMap<Option<Vec<u8>>, Float>);
ruby_to_option!(HashMap<Option<Vec<u8>>, String>);
ruby_to_option!(HashMap<Option<Vec<u8>>, &'a str>);
ruby_to_option!(HashMap<Option<Int>, Value>);
ruby_to_option!(HashMap<Option<Int>, bool>);
ruby_to_option!(HashMap<Option<Int>, Vec<u8>>);
ruby_to_option!(HashMap<Option<Int>, Int>);
ruby_to_option!(HashMap<Option<Int>, Float>);
ruby_to_option!(HashMap<Option<Int>, String>);
ruby_to_option!(HashMap<Option<Int>, &'a str>);
ruby_to_option!(HashMap<Option<String>, Value>);
ruby_to_option!(HashMap<Option<String>, bool>);
ruby_to_option!(HashMap<Option<String>, Vec<u8>>);
ruby_to_option!(HashMap<Option<String>, Int>);
ruby_to_option!(HashMap<Option<String>, Float>);
ruby_to_option!(HashMap<Option<String>, String>);
ruby_to_option!(HashMap<Option<String>, &'a str>);
ruby_to_option!(HashMap<Option<&'a str>, Value>);
ruby_to_option!(HashMap<Option<&'a str>, bool>);
ruby_to_option!(HashMap<Option<&'a str>, Vec<u8>>);
ruby_to_option!(HashMap<Option<&'a str>, Int>);
ruby_to_option!(HashMap<Option<&'a str>, Float>);
ruby_to_option!(HashMap<Option<&'a str>, String>);
ruby_to_option!(HashMap<Option<&'a str>, &'a str>);
// Hash of primitive keys to optional values
ruby_to_option!(HashMap<bool, Option<Value>>);
ruby_to_option!(HashMap<bool, Option<bool>>);
ruby_to_option!(HashMap<bool, Option<Vec<u8>>>);
ruby_to_option!(HashMap<bool, Option<Int>>);
ruby_to_option!(HashMap<bool, Option<Float>>);
ruby_to_option!(HashMap<bool, Option<String>>);
ruby_to_option!(HashMap<bool, Option<&'a str>>);
ruby_to_option!(HashMap<Vec<u8>, Option<Value>>);
ruby_to_option!(HashMap<Vec<u8>, Option<bool>>);
ruby_to_option!(HashMap<Vec<u8>, Option<Vec<u8>>>);
ruby_to_option!(HashMap<Vec<u8>, Option<Int>>);
ruby_to_option!(HashMap<Vec<u8>, Option<Float>>);
ruby_to_option!(HashMap<Vec<u8>, Option<String>>);
ruby_to_option!(HashMap<Vec<u8>, Option<&'a str>>);
ruby_to_option!(HashMap<Int, Option<Value>>);
ruby_to_option!(HashMap<Int, Option<bool>>);
ruby_to_option!(HashMap<Int, Option<Vec<u8>>>);
ruby_to_option!(HashMap<Int, Option<Int>>);
ruby_to_option!(HashMap<Int, Option<Float>>);
ruby_to_option!(HashMap<Int, Option<String>>);
ruby_to_option!(HashMap<Int, Option<&'a str>>);
ruby_to_option!(HashMap<String, Option<Value>>);
ruby_to_option!(HashMap<String, Option<bool>>);
ruby_to_option!(HashMap<String, Option<Vec<u8>>>);
ruby_to_option!(HashMap<String, Option<Int>>);
ruby_to_option!(HashMap<String, Option<Float>>);
ruby_to_option!(HashMap<String, Option<String>>);
ruby_to_option!(HashMap<String, Option<&'a str>>);
ruby_to_option!(HashMap<&'a str, Option<Value>>);
ruby_to_option!(HashMap<&'a str, Option<bool>>);
ruby_to_option!(HashMap<&'a str, Option<Vec<u8>>>);
ruby_to_option!(HashMap<&'a str, Option<Int>>);
ruby_to_option!(HashMap<&'a str, Option<Float>>);
ruby_to_option!(HashMap<&'a str, Option<String>>);
ruby_to_option!(HashMap<&'a str, Option<&'a str>>);
// Hash of primitive optional keys to optional values
ruby_to_option!(HashMap<Option<bool>, Option<Value>>);
ruby_to_option!(HashMap<Option<bool>, Option<bool>>);
ruby_to_option!(HashMap<Option<bool>, Option<Vec<u8>>>);
ruby_to_option!(HashMap<Option<bool>, Option<Int>>);
ruby_to_option!(HashMap<Option<bool>, Option<Float>>);
ruby_to_option!(HashMap<Option<bool>, Option<String>>);
ruby_to_option!(HashMap<Option<bool>, Option<&'a str>>);
ruby_to_option!(HashMap<Option<Vec<u8>>, Option<Value>>);
ruby_to_option!(HashMap<Option<Vec<u8>>, Option<bool>>);
ruby_to_option!(HashMap<Option<Vec<u8>>, Option<Vec<u8>>>);
ruby_to_option!(HashMap<Option<Vec<u8>>, Option<Int>>);
ruby_to_option!(HashMap<Option<Vec<u8>>, Option<Float>>);
ruby_to_option!(HashMap<Option<Vec<u8>>, Option<String>>);
ruby_to_option!(HashMap<Option<Vec<u8>>, Option<&'a str>>);
ruby_to_option!(HashMap<Option<Int>, Option<Value>>);
ruby_to_option!(HashMap<Option<Int>, Option<bool>>);
ruby_to_option!(HashMap<Option<Int>, Option<Vec<u8>>>);
ruby_to_option!(HashMap<Option<Int>, Option<Int>>);
ruby_to_option!(HashMap<Option<Int>, Option<Float>>);
ruby_to_option!(HashMap<Option<Int>, Option<String>>);
ruby_to_option!(HashMap<Option<Int>, Option<&'a str>>);
ruby_to_option!(HashMap<Option<String>, Option<Value>>);
ruby_to_option!(HashMap<Option<String>, Option<bool>>);
ruby_to_option!(HashMap<Option<String>, Option<Vec<u8>>>);
ruby_to_option!(HashMap<Option<String>, Option<Int>>);
ruby_to_option!(HashMap<Option<String>, Option<Float>>);
ruby_to_option!(HashMap<Option<String>, Option<String>>);
ruby_to_option!(HashMap<Option<String>, Option<&'a str>>);
ruby_to_option!(HashMap<Option<&'a str>, Option<Value>>);
ruby_to_option!(HashMap<Option<&'a str>, Option<bool>>);
ruby_to_option!(HashMap<Option<&'a str>, Option<Vec<u8>>>);
ruby_to_option!(HashMap<Option<&'a str>, Option<Int>>);
ruby_to_option!(HashMap<Option<&'a str>, Option<Float>>);
ruby_to_option!(HashMap<Option<&'a str>, Option<String>>);
ruby_to_option!(HashMap<Option<&'a str>, Option<&'a str>>);
| 41.739024 | 90 | 0.717817 |
5ddc84c0bae9dfa5e2a5ebf22769e38062048935 | 1,182 | // Copyright 2016 bluss
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cmp::min;
pub struct RangeChunk { i: usize, n: usize, chunk: usize }
/// Create an iterator that splits `n` in chunks of size `chunk`;
/// the last item can be an uneven chunk.
pub fn range_chunk(n: usize, chunk: usize) -> RangeChunk {
RangeChunk {
i: 0,
n: n,
chunk: chunk,
}
}
impl Iterator for RangeChunk {
type Item = (usize, usize);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.n == 0 {
None
} else {
let i = self.i;
let rem = min(self.n, self.chunk);
self.i += 1;
self.n -= rem;
Some((i, rem))
}
}
}
#[inline]
pub fn round_up_to(x: usize, multiple_of: usize) -> usize {
let (mut d, r) = (x / multiple_of, x % multiple_of);
if r > 0 { d += 1; }
d * multiple_of
}
| 25.695652 | 68 | 0.575296 |
1c03b56478cd2352a8c71676fec40dfc487f7dde | 861 | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![allow(dead_code)]
use fidl::endpoints::RequestStream;
use fidl_fidl_test_protocoleventadd as fidl_lib;
use futures::prelude::*;
// [START contents]
fn send_events(stream: fidl_lib::ExampleRequestStream) -> Result<(), fidl::Error> {
let control_handle = stream.control_handle();
control_handle.send_on_existing_event()?;
Ok(())
}
async fn receive_events(client: fidl_lib::ExampleProxy) -> Result<(), fidl::Error> {
let mut event_stream = client.take_event_stream();
while let Some(event) = event_stream.try_next().await? {
match event {
fidl_lib::ExampleEvent::OnExistingEvent { .. } => {}
}
}
Ok(())
}
// [END contents]
fn main() {}
| 28.7 | 84 | 0.681765 |
3ac31fde31fd62ca7ec722a91c89db53e684794f | 754 | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
trait Foo {
extern fn borrow(&self);
extern fn take(self: Box<Self>);
}
struct Bar;
impl Foo for Bar {
extern fn borrow(&self) {}
extern fn take(self: Box<Self>) {}
}
fn main() {
let foo: Box<dyn Foo> = Box::new(Bar);
foo.borrow();
foo.take()
}
| 26.928571 | 68 | 0.680371 |
ac82549167b27322770e5938fd5f3c97179fdd04 | 102 | #[doc = "Reader of register CH8_AL2_CTRL"]
pub type R = crate::R<u32, super::CH8_AL2_CTRL>;
impl R {}
| 25.5 | 48 | 0.686275 |
e5c4ffcd0b3330dc2083a021b07e4af3bddbc9aa | 1,374 | use proc_macro::TokenStream;
use quote::quote;
use syn::DeriveInput;
mod application_command;
mod event_handler;
mod application_subgroup;
mod application_subcommand;
#[proc_macro_attribute]
/// Generates additional code needed to register an EventHandler
pub fn event_handler(_args: TokenStream, input: TokenStream) -> TokenStream {
event_handler::gen_event_handler(_args, input)
}
#[proc_macro_attribute]
/// Generates additional code needed to register a Command
pub fn command(_args: TokenStream, input: TokenStream) -> TokenStream {
application_command::gen_event_handler(_args, input)
}
#[proc_macro_attribute]
/// Generates additional code needed to register a Subcommand Group
pub fn subcommand_group(_args: TokenStream, input: TokenStream) -> TokenStream {
application_subgroup::gen_subgroup_handler(_args, input)
}
#[proc_macro_attribute]
/// Generates additional code needed to register a Subcommand
pub fn subcommand(_args: TokenStream, input: TokenStream) -> TokenStream {
application_subcommand::gen_sub_handler(_args, input)
}
#[proc_macro_derive(CommandArg)]
/// Implements CommandArg for a struct
pub fn command_arg_derive(input: TokenStream) -> TokenStream {
let ast = syn::parse::<DeriveInput>(input).unwrap();
let name = ast.ident;
let gen = quote! {
impl CommandArg for #name {
}
};
gen.into()
}
| 31.227273 | 80 | 0.759098 |
e5ef3e85f1b5218d06c9aec3cb0b50254c943d6d | 30,604 | extern crate byteorder;
use std::io;
use std::io::{Read, Write};
use std::net::TcpStream;
use super::bridge::{Bridge, BridgeError};
use super::riscv::{RiscvCpu, RiscvCpuError};
use log::{debug, error, info};
use crate::gdb::byteorder::ByteOrder;
use byteorder::{BigEndian, NativeEndian};
const SUPPORTED_QUERIES: &[u8] = b"PacketSize=3fff;qXfer:features:read+;qXfer:threads:read+;qXfer:memory-map:read-;QStartNoAckMode+;vContSupported+";
pub struct GdbController {
connection: TcpStream,
}
impl Write for GdbController {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.connection.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.connection.flush()
}
}
impl GdbController {
pub fn gdb_send(&mut self, inp: &[u8]) -> io::Result<()> {
let mut buffer = [0; 16388];
let mut checksum: u8 = 0;
buffer[0] = '$' as u8;
for i in 0..inp.len() {
buffer[i + 1] = inp[i];
checksum = checksum.wrapping_add(inp[i]);
}
let checksum_str = &format!("{:02x}", checksum);
let checksum_bytes = checksum_str.as_bytes();
buffer[inp.len() + 1] = '#' as u8;
buffer[inp.len() + 2] = checksum_bytes[0];
buffer[inp.len() + 3] = checksum_bytes[1];
let (to_write, _rest) = buffer.split_at(inp.len() + 4);
debug!(
" > Writing {} bytes: {}",
to_write.len(),
String::from_utf8_lossy(&to_write)
);
self.connection.write(&to_write)?;
Ok(())
}
pub fn print_string(&mut self, msg: &str) -> io::Result<()> {
debug!("Printing string {} to GDB", msg);
let mut strs: Vec<String> = msg
.as_bytes()
.iter()
.map(|b| format!("{:02X}", b))
.collect();
strs.insert(0, "O".to_string());
let joined = strs.join("");
self.gdb_send(joined.as_bytes())
}
}
pub struct GdbServer {
connection: TcpStream,
no_ack_mode: bool,
is_alive: bool,
last_signal: u8,
}
fn swab(src: u32) -> u32 {
(src << 24) & 0xff000000
| (src << 8) & 0x00ff0000
| (src >> 8) & 0x0000ff00
| (src >> 24) & 0x000000ff
}
pub fn parse_u32(value: &str) -> Result<u32, GdbServerError> {
match u32::from_str_radix(value, 16) {
Ok(o) => Ok(o),
Err(e) => Err(GdbServerError::NumberParseError(value.to_owned(), e)),
}
}
pub fn parse_i32(value: &str) -> Result<i32, GdbServerError> {
match i32::from_str_radix(value, 16) {
Ok(o) => Ok(o),
Err(e) => Err(GdbServerError::NumberParseError(value.to_owned(), e)),
}
}
fn gdb_unescape(input: &[u8]) -> Vec<u8> {
let mut out = input.to_vec();
out.iter_mut().fold(&mut Vec::new(), |vec_acc, this_u8| {
if vec_acc.last() == Some(&('}' as u8)) {
let len = vec_acc.len();
vec_acc[len - 1] = *this_u8 ^ 0x20;
} else {
vec_acc.push(*this_u8);
}
vec_acc
});
out
}
pub fn parse_u64(value: &str) -> Result<u64, GdbServerError> {
match u64::from_str_radix(value, 16) {
Ok(o) => Ok(o),
Err(e) => Err(GdbServerError::NumberParseError(value.to_owned(), e)),
}
}
#[derive(Debug)]
pub enum GdbServerError {
/// Rust standard IO error
IoError(io::Error),
/// The network connection has closed
ConnectionClosed,
/// We were unable to parse an integer
NumberParseError(String, std::num::ParseIntError),
/// Something happened with the CPU
CpuError(RiscvCpuError),
/// The bridge failed somehow
BridgeError(BridgeError),
/// Something strange was received
ProtocolError,
/// Client tried to give us a breakpoint we didn't recognize
UnknownBreakpointType(String),
}
impl std::convert::From<BridgeError> for GdbServerError {
fn from(e: BridgeError) -> Self {
GdbServerError::BridgeError(e)
}
}
impl std::convert::From<RiscvCpuError> for GdbServerError {
fn from(e: RiscvCpuError) -> Self {
GdbServerError::CpuError(e)
}
}
impl std::convert::From<io::Error> for GdbServerError {
fn from(e: io::Error) -> Self {
GdbServerError::IoError(e)
}
}
#[derive(Debug, PartialEq)]
pub enum BreakPointType {
BreakSoft,
BreakHard,
WatchWrite,
WatchRead,
WatchAccess,
}
impl BreakPointType {
fn from_str(r: &str) -> Result<BreakPointType, GdbServerError> {
match r {
"0" => Ok(BreakPointType::BreakSoft),
"1" => Ok(BreakPointType::BreakHard),
"2" => Ok(BreakPointType::WatchWrite),
"3" => Ok(BreakPointType::WatchRead),
"4" => Ok(BreakPointType::WatchAccess),
c => Err(GdbServerError::UnknownBreakpointType(c.to_string())),
}
}
}
#[derive(Debug, PartialEq)]
pub enum GdbCommand {
/// Server gave an unrecognized command
Unknown(String),
/// This should be responded to in the same way as Unknown(String),
/// sent by the server to test how it responds to unknown packets.
MustReplyEmpty,
/// qSupported
SupportedQueries(String),
/// QStartNoAckMode
StartNoAckMode,
/// D
Disconnect,
/// Hg#
SetCurrentThread(u64),
/// Hc# (# may be -1)
ContinueThread(i32),
/// ?
LastSignalPacket,
/// qfThreadInfo
GetThreadInfo,
/// qC
GetCurrentThreadId,
/// qAttached
CheckIsAttached,
/// g
GetRegisters,
/// p#
GetRegister(u32),
/// P#=#
SetRegister(u32, u32),
/// qSymbol::
SymbolsReady,
/// m#,#
ReadMemory(u32 /* addr */, u32 /* length */),
/// M#,#:#
WriteMemory(
u32, /* addr */
u32, /* length */
Vec<u32>, /* value */
),
/// vCont?
VContQuery,
/// vCont;c
VContContinue,
/// vCont;C04:0;c
VContContinueFromSignal(String),
/// vCont;s:0;c
VContStepFromSignal(String),
/// c
Continue,
/// s
Step,
/// Ctrl-C
Interrupt,
/// qRcmd,
MonitorCommand(String),
/// Z0,###,2
AddBreakpoint(
BreakPointType,
u32, /* address */
u32, /* length */
),
/// z0,###,2
RemoveBreakpoint(
BreakPointType,
u32, /* address */
u32, /* length */
),
/// qOffsets
GetOffsets,
/// qXfer:memory-map:read::
ReadMemoryMap(u32 /* offset */, u32 /* len */),
/// qXfer:features:read:target.xml:0,1000
ReadFeature(
String, /* filename */
u32, /* offset */
u32, /* len */
),
/// qTStatus
TraceStatusQuery,
/// qXfer:threads:read::0,1000
ReadThreads(u32 /* offset */, u32 /* len */),
}
impl GdbServer {
pub fn new(connection: TcpStream) -> Result<GdbServer, GdbServerError> {
Ok(GdbServer {
connection,
no_ack_mode: false,
is_alive: true,
last_signal: 0,
})
}
fn packet_to_command(&self, raw_pkt: &[u8]) -> Result<GdbCommand, GdbServerError> {
let pkt = String::from_utf8_lossy(raw_pkt).to_string();
debug!("Raw GDB packet: {}", pkt);
if pkt == "qSupported" || pkt.starts_with("qSupported:") {
Ok(GdbCommand::SupportedQueries(pkt))
} else if pkt == "D" {
Ok(GdbCommand::Disconnect)
} else if pkt == "QStartNoAckMode" {
Ok(GdbCommand::StartNoAckMode)
} else if pkt == "qAttached" {
Ok(GdbCommand::CheckIsAttached)
} else if pkt == "qOffsets" {
Ok(GdbCommand::GetOffsets)
} else if pkt == "qTStatus" {
Ok(GdbCommand::TraceStatusQuery)
} else if pkt.starts_with("qXfer:memory-map:read::") {
let pkt = pkt.trim_start_matches("qXfer:memory-map:read::");
let offsets: Vec<&str> = pkt.split(',').collect();
let offset = parse_u32(offsets[0])?;
let len = parse_u32(offsets[1])?;
Ok(GdbCommand::ReadMemoryMap(offset, len))
} else if pkt.starts_with("qXfer:features:read:") {
let pkt = pkt.trim_start_matches("qXfer:features:read:");
let fields: Vec<&str> = pkt.split(':').collect();
let offsets: Vec<&str> = fields[1].split(',').collect();
let offset = parse_u32(offsets[0])?;
let len = parse_u32(offsets[1])?;
Ok(GdbCommand::ReadFeature(fields[0].to_string(), offset, len))
} else if pkt.starts_with("qXfer:threads:read::") {
let pkt = pkt.trim_start_matches("qXfer:threads:read::");
let offsets: Vec<&str> = pkt.split(',').collect();
let offset = parse_u32(offsets[0])?;
let len = parse_u32(offsets[1])?;
Ok(GdbCommand::ReadThreads(offset, len))
} else if pkt.starts_with("Z") {
let pkt = pkt.trim_start_matches("Z");
let fields: Vec<&str> = pkt.split(',').collect();
let bptype = BreakPointType::from_str(fields[0])?;
let address = parse_u32(fields[1])?;
let size = parse_u32(fields[2])?;
Ok(GdbCommand::AddBreakpoint(bptype, address, size))
} else if pkt.starts_with("z") {
let pkt = pkt.trim_start_matches("z");
let fields: Vec<&str> = pkt.split(',').collect();
let bptype = BreakPointType::from_str(fields[0])?;
let address = parse_u32(fields[1])?;
let size = parse_u32(fields[2])?;
Ok(GdbCommand::RemoveBreakpoint(bptype, address, size))
} else if pkt.starts_with("qRcmd,") {
let pkt = pkt.trim_start_matches("qRcmd,");
let pkt_bytes = pkt.as_bytes();
let mut tmp1 = Vec::new();
let mut acc = 0;
for i in 0..pkt.len() {
let nybble = if pkt_bytes[i] >= 0x30 && pkt_bytes[i] <= 0x39 {
pkt_bytes[i] - 0x30
} else if pkt_bytes[i] >= 0x61 && pkt_bytes[i] <= 0x66 {
pkt_bytes[i] + 10 - 0x61
} else if pkt_bytes[i] >= 0x41 && pkt_bytes[i] <= 0x46 {
pkt_bytes[i] + 10 - 0x41
} else {
0
};
if i & 1 == 1 {
tmp1.push((acc << 4) | nybble);
acc = 0;
} else {
acc = nybble;
}
}
Ok(GdbCommand::MonitorCommand(
String::from_utf8_lossy(&tmp1).to_string(),
))
} else if pkt == "g" {
Ok(GdbCommand::GetRegisters)
} else if pkt.starts_with("P") {
let pkt = pkt.trim_start_matches("P").to_string();
let v: Vec<&str> = pkt.split('=').collect();
let addr = parse_u32(v[0])?;
let value = swab(parse_u32(v[1])?);
Ok(GdbCommand::SetRegister(addr, value))
} else if pkt == "c" {
Ok(GdbCommand::Continue)
} else if pkt == "s" {
Ok(GdbCommand::Step)
} else if pkt.starts_with("m") {
let pkt = pkt.trim_start_matches("m").to_string();
let v: Vec<&str> = pkt.split(',').collect();
let addr = parse_u32(v[0])?;
let length = parse_u32(v[1])?;
Ok(GdbCommand::ReadMemory(addr, length))
} else if pkt.starts_with("M") {
let pkt = pkt.trim_start_matches("M").to_string();
let d: Vec<&str> = pkt.split(':').collect();
let v: Vec<&str> = d[0].split(',').collect();
let addr = parse_u32(v[0])?;
let length = parse_u32(v[1])?;
let value = swab(parse_u32(d[1])?);
Ok(GdbCommand::WriteMemory(addr, length, vec![value]))
} else if pkt.starts_with("X") {
let (_opcode, data) = match raw_pkt.split_first() {
None => return Err(GdbServerError::ProtocolError),
Some(s) => s,
};
// Packet format: Xaddr,count:data
// Look for ":"
let mut delimiter_offset = None;
for (idx, c) in data.iter().enumerate() {
if *c == ':' as u8 {
delimiter_offset = Some(idx);
break;
}
}
let delimiter_offset = match delimiter_offset {
Some(s) => s,
None => return Err(GdbServerError::ProtocolError),
};
// warn!("X command: Not doing GDB unescaping");
let (description, bin_data_plus) = data.split_at(delimiter_offset);
let bin_data_plus = bin_data_plus.split_first();
let description = String::from_utf8_lossy(&description).to_string();
let v: Vec<&str> = description.split(',').collect();
let addr = parse_u32(v[0])?;
let length = parse_u32(v[1])?;
let mut values = vec![];
if let Some((_delimiter, bin_data)) = bin_data_plus {
let bin_data = gdb_unescape(bin_data);
for value in bin_data.chunks_exact(4) {
values.push(swab(BigEndian::read_u32(&value)));
}
let remainder = bin_data.chunks_exact(4).remainder();
if remainder.len() > 0 {
let mut remainder = remainder.to_vec();
while remainder.len() < 4 {
remainder.insert(0, 0);
}
// remainder.resize(4, 0);
values.push(swab(BigEndian::read_u32(&remainder)));
}
}
Ok(GdbCommand::WriteMemory(addr, length, values))
} else if pkt.starts_with("p") {
Ok(GdbCommand::GetRegister(parse_u32(
pkt.trim_start_matches("p"),
)?))
} else if pkt.starts_with("Hg") {
Ok(GdbCommand::SetCurrentThread(parse_u64(
pkt.trim_start_matches("Hg"),
)?))
} else if pkt.starts_with("Hc") {
Ok(GdbCommand::ContinueThread(parse_i32(
pkt.trim_start_matches("Hc"),
)?))
} else if pkt == "qC" {
Ok(GdbCommand::GetCurrentThreadId)
} else if pkt == "?" {
Ok(GdbCommand::LastSignalPacket)
} else if pkt == "qfThreadInfo" {
Ok(GdbCommand::GetThreadInfo)
} else if pkt == "vCont?" {
Ok(GdbCommand::VContQuery)
} else if pkt == "vCont;c" {
Ok(GdbCommand::VContContinue)
} else if pkt.starts_with("vCont;C") {
//vCont;C04:0;c
let pkt = pkt.trim_start_matches("vCont;C").to_string();
// let v: Vec<&str> = pkt.split(',').collect();
Ok(GdbCommand::VContContinueFromSignal(pkt))
} else if pkt.starts_with("vCont;s") {
let pkt = pkt.trim_start_matches("vCont;s").to_string();
Ok(GdbCommand::VContStepFromSignal(pkt))
} else if pkt == "qSymbol::" {
Ok(GdbCommand::SymbolsReady)
} else if pkt == "vMustReplyEmpty" {
Ok(GdbCommand::MustReplyEmpty)
} else {
info!("unrecognized GDB command: {}", pkt);
Ok(GdbCommand::Unknown(pkt))
}
}
pub fn get_controller(&self) -> GdbController {
GdbController {
connection: self.connection.try_clone().unwrap(),
}
}
pub fn get_command(&mut self) -> Result<GdbCommand, GdbServerError> {
let cmd = self.do_get_command()?;
debug!("< GDB packet: {:?}", cmd);
Ok(cmd)
}
fn do_get_command(&mut self) -> Result<GdbCommand, GdbServerError> {
let mut buffer = [0; 16384];
let mut byte = [0; 1];
let mut remote_checksum = [0; 2];
let mut buffer_offset = 0;
// XXX Replace this with a BufReader for performance
loop {
let len = self.connection.read(&mut byte)?;
if len == 0 {
return Err(GdbServerError::ConnectionClosed);
}
match byte[0] {
0x24 /*'$'*/ => {
let mut checksum: u8 = 0;
loop {
let len = self.connection.read(&mut byte)?;
if len == 0 {
return Err(GdbServerError::ConnectionClosed);
}
match byte[0] as char {
'#' => {
// There's got to be a better way to compare the checksum
self.connection.read(&mut remote_checksum)?;
let checksum_str = format!("{:02x}", checksum);
if checksum_str != String::from_utf8_lossy(&remote_checksum) {
info!(
"Checksum mismatch: Calculated {:?} vs {}",
checksum_str,
String::from_utf8_lossy(&remote_checksum)
);
self.gdb_send_nak()?;
} else {
if !self.no_ack_mode {
self.gdb_send_ack()?;
}
}
let (buffer, _remainder) = buffer.split_at(buffer_offset);
// debug!("< Read packet ${:?}#{:#?}", String::from_utf8_lossy(buffer), String::from_utf8_lossy(&remote_checksum));
return self.packet_to_command(&buffer);
}
other => {
buffer[buffer_offset] = other as u8;
buffer_offset = buffer_offset + 1;
checksum = checksum.wrapping_add(other as u8);
}
}
}
}
0x2b /*'+'*/ => {}
0x2d /*'-'*/ => {}
0x3 => return Ok(GdbCommand::Interrupt),
other => error!("Warning: unrecognied byte received: {}", other),
}
}
}
pub fn process(
&mut self,
cmd: GdbCommand,
cpu: &RiscvCpu,
bridge: &Bridge,
) -> Result<(), GdbServerError> {
match cmd {
GdbCommand::SupportedQueries(_) => self.gdb_send(SUPPORTED_QUERIES)?,
GdbCommand::StartNoAckMode => {
self.no_ack_mode = true;
self.gdb_send(b"OK")?
}
GdbCommand::SetCurrentThread(_) => self.gdb_send(b"OK")?,
GdbCommand::ContinueThread(_) => self.gdb_send(b"OK")?,
GdbCommand::AddBreakpoint(_bptype, address, _size) => {
let response = match cpu.add_breakpoint(bridge, address) {
Ok(_) => "OK",
Err(RiscvCpuError::BreakpointExhausted) => {
error!("No available breakpoint found");
"E0E"
}
Err(e) => {
error!(
"An error occurred while trying to add the breakpoint: {:?}",
e
);
"E0E"
}
};
self.gdb_send(response.as_bytes())?;
}
GdbCommand::TraceStatusQuery => self.gdb_send(b"")?,
GdbCommand::RemoveBreakpoint(_bptype, address, _size) => {
cpu.remove_breakpoint(bridge, address)?;
self.gdb_send(b"OK")?
}
GdbCommand::LastSignalPacket => {
let sig_str = format!("S{:02x}", self.last_signal);
self.gdb_send(if self.is_alive {
sig_str.as_bytes()
} else {
b"W00"
})?
}
GdbCommand::GetThreadInfo => self.gdb_send(b"l")?,
GdbCommand::GetCurrentThreadId => self.gdb_send(b"QC0")?,
GdbCommand::CheckIsAttached => self.gdb_send(b"1")?,
GdbCommand::Disconnect => {
cpu.resume(bridge)?;
self.gdb_send("OK".as_bytes())?
}
GdbCommand::GetRegisters => {
let mut register_list = String::new();
for i in cpu.all_cpu_registers() {
register_list
.push_str(format!("{:08x}", swab(cpu.read_register(bridge, i)?)).as_str());
}
self.gdb_send(register_list.as_bytes())?
}
GdbCommand::GetRegister(reg) => {
let response = match cpu.read_register(bridge, reg) {
Ok(val) => format!("{:08x}", swab(val)),
Err(e) => {
error!("Error reading register: {}", e);
format!("E01")
}
};
self.gdb_send(response.as_bytes())?
}
GdbCommand::SetRegister(reg, val) => {
let response = match cpu.write_register(bridge, reg, val) {
Ok(()) => "OK",
Err(_) => "E01",
};
self.gdb_send(response.as_bytes())?
}
GdbCommand::SymbolsReady => self.gdb_send(b"OK")?,
GdbCommand::ReadMemory(addr, len) => {
debug!("Reading memory {:08x}", addr);
let mut values = vec![];
let mut out_str = String::new();
if len == 1 {
let val = cpu.read_memory(bridge, addr, 1)? as u8;
out_str.push_str(&format!("{:02x}", val));
self.gdb_send(out_str.as_bytes())?
} else if len == 2 {
let val = cpu.read_memory(bridge, addr, 2)? as u16;
let mut buf = [0; 2];
BigEndian::write_u16(&mut buf, val);
out_str.push_str(&format!("{:04x}", NativeEndian::read_u16(&buf)));
self.gdb_send(out_str.as_bytes())?
} else if len == 4 {
values.push(cpu.read_memory(bridge, addr, 4)?);
self.gdb_send_u32(values)?
} else {
for offset in (0..len).step_by(4) {
values.push(cpu.read_memory(bridge, addr + offset, 4)?);
if addr + offset >= 0xfffffffc {
break;
}
}
self.gdb_send_u32(values)?
}
}
GdbCommand::WriteMemory(addr, len, values) => {
if len == 1 {
debug!("Writing memory {:08x} -> {:08x}", addr, values[0] >> 24);
cpu.write_memory(bridge, addr, 1, values[0] >> 24)?;
} else if len == 2 {
debug!("Writing memory {:08x} -> {:08x}", addr, values[0] >> 16);
cpu.write_memory(bridge, addr, 2, values[0] >> 16)?;
} else if len == 4 {
debug!("Writing memory {:08x} -> {:08x}", addr, values[0]);
cpu.write_memory(bridge, addr, 4, values[0])?;
} else {
for (offset, value) in values.iter().enumerate() {
debug!("Writing memory {:08x} -> {:08x}", addr, values[offset]);
cpu.write_memory(bridge, addr + (offset as u32 * 4), 4, *value)?;
}
}
self.gdb_send("OK".as_bytes())?
}
GdbCommand::VContQuery => self.gdb_send(b"vCont;c;C;s;S")?,
GdbCommand::VContContinue => {
if let Some(s) = cpu.resume(bridge)? {
self.print_string(&format!("Note: CPU is currently in a trap: {}\n", s))?
}
}
GdbCommand::VContContinueFromSignal(_) => {
if let Some(s) = cpu.resume(bridge)? {
self.print_string(&format!("Note: CPU is currently in a trap: {}\n", s))?
}
}
GdbCommand::VContStepFromSignal(_) => {
if let Some(s) = cpu.step(bridge)? {
self.print_string(&format!("Note: CPU is currently in a trap: {}\n", s))?;
}
self.last_signal = 5;
self.gdb_send(format!("S{:02x}", self.last_signal).as_bytes())?;
}
GdbCommand::GetOffsets => self.gdb_send(b"Text=0;Data=0;Bss=0")?,
GdbCommand::Continue => {
if let Some(s) = cpu.resume(bridge)? {
self.print_string(&format!("Note: CPU is currently in a trap: {}\n", s))?
}
}
GdbCommand::Step => {
if let Some(s) = cpu.step(bridge)? {
self.print_string(&format!("Note: CPU is currently in a trap: {}\n", s))?
}
}
GdbCommand::MonitorCommand(cmd) => {
match cmd.as_str() {
"reset" => {
self.print_string("Resetting CPU...\n")?;
cpu.reset(&bridge)?;
}
"about" => {
self.print_string("VexRiscv GDB bridge\n")?;
}
"explain" => {
self.print_string(&cpu.explain(&bridge)?)?;
}
_ => {
self.print_string("Unrecognized monitor command. Available commands:\n")?;
self.print_string(" about - Information about the bridge\n")?;
self.print_string(" explain - Explain what the CPU is doing\n")?;
self.print_string(" reset - Reset the CPU\n")?;
}
}
self.gdb_send(b"OK")?
}
GdbCommand::ReadFeature(filename, offset, len) => {
self.gdb_send_file(cpu.get_feature(&filename)?, offset, len)?
}
GdbCommand::ReadMemoryMap(_offset, _len) => {
// self.gdb_send_file(cpu.get_memory_map()?, offset, len)?
self.gdb_send(b"")?
}
GdbCommand::ReadThreads(offset, len) => {
self.gdb_send_file(cpu.get_threads()?, offset, len)?
}
GdbCommand::Interrupt => {
self.last_signal = 2;
cpu.halt(bridge)?;
self.gdb_send(format!("S{:02x}", self.last_signal).as_bytes())?;
}
GdbCommand::MustReplyEmpty => self.gdb_send(b"")?,
GdbCommand::Unknown(_) => self.gdb_send(b"")?,
};
Ok(())
}
fn gdb_send_ack(&mut self) -> io::Result<usize> {
self.connection.write(&['+' as u8])
}
fn gdb_send_nak(&mut self) -> io::Result<usize> {
self.connection.write(&['-' as u8])
}
fn gdb_send_u32(&mut self, vals: Vec<u32>) -> io::Result<()> {
let mut out_str = String::new();
for val in vals {
let mut buf = [0; 4];
BigEndian::write_u32(&mut buf, val);
out_str.push_str(&format!("{:08x}", NativeEndian::read_u32(&buf)));
}
self.gdb_send(out_str.as_bytes())
}
fn gdb_send(&mut self, inp: &[u8]) -> io::Result<()> {
let mut buffer = [0; 16388];
let mut checksum: u8 = 0;
buffer[0] = '$' as u8;
for i in 0..inp.len() {
buffer[i + 1] = inp[i];
checksum = checksum.wrapping_add(inp[i]);
}
let checksum_str = &format!("{:02x}", checksum);
let checksum_bytes = checksum_str.as_bytes();
buffer[inp.len() + 1] = '#' as u8;
buffer[inp.len() + 2] = checksum_bytes[0];
buffer[inp.len() + 3] = checksum_bytes[1];
let (to_write, _rest) = buffer.split_at(inp.len() + 4);
// debug!(
// " > Writing {} bytes: {}",
// to_write.len(),
// String::from_utf8_lossy(&to_write)
// );
self.connection.write(&to_write)?;
Ok(())
}
pub fn print_string(&mut self, msg: &str) -> io::Result<()> {
debug!("Printing string {} to GDB", msg);
let mut strs: Vec<String> = msg
.as_bytes()
.iter()
.map(|b| format!("{:02X}", b))
.collect();
strs.insert(0, "O".to_string());
let joined = strs.join("");
self.gdb_send(joined.as_bytes())
}
fn gdb_send_file(&mut self, mut data: Vec<u8>, offset: u32, len: u32) -> io::Result<()> {
let offset = offset as usize;
let len = len as usize;
let mut end = offset + len;
if offset > data.len() {
self.gdb_send(b"l")?;
} else {
if end > data.len() {
end = data.len();
}
let mut trimmed_data: Vec<u8> = data.drain(offset..end).collect();
if trimmed_data.len() >= len {
// XXX should this be <= or < ?
trimmed_data.insert(0, 'm' as u8);
} else {
trimmed_data.insert(0, 'l' as u8);
}
self.gdb_send(&trimmed_data)?;
}
Ok(())
}
}
| 36.916767 | 150 | 0.467717 |
ef3213cff965f75ca3771327bf27e82644997ac2 | 7,899 | use super::*;
#[test_suite(schema(to_many_composites), only(MongoDb))]
mod every {
#[connector_test]
async fn basic(runner: Runner) -> TestResult<()> {
create_to_many_test_data(&runner).await?;
insta::assert_snapshot!(
run_query!(runner, r#"{
findManyTestModel(where: {
to_many_as: {
every: {
a_2: { gt: 0 }
}
}
}) {
id
}
}"#),
@r###"{"data":{"findManyTestModel":[{"id":1},{"id":2},{"id":6},{"id":7}]}}"###
);
Ok(())
}
#[connector_test]
async fn empty(runner: Runner) -> TestResult<()> {
create_to_many_test_data(&runner).await?;
insta::assert_snapshot!(
run_query!(runner, r#"{
findManyTestModel(where: {
to_many_as: {
every: {}
}
}) {
id
}
}"#),
@r###"{"data":{"findManyTestModel":[{"id":1},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7}]}}"###
);
insta::assert_snapshot!(
run_query!(runner, r#"{
findManyTestModel(where: {
NOT: [
{ to_many_as: { every: {} }}
]
}) {
id
}
}"#),
@r###"{"data":{"findManyTestModel":[]}}"###
);
Ok(())
}
#[connector_test]
async fn empty_logical_conditions(runner: Runner) -> TestResult<()> {
create_to_many_test_data(&runner).await?;
// `AND` with empty filter returns is a truthy condition, so all record fulfill the condition by default.
insta::assert_snapshot!(
run_query!(runner, r#"{
findManyTestModel(where: { to_many_as: { every: { AND: {} } }}) {
id
}
}"#),
@r###"{"data":{"findManyTestModel":[{"id":1},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7}]}}"###
);
// `OR` with empty filter returns is a falsey condition, so no records fulfill the condition by default.
// **However**: Empty or non-existing arrays are automatically true due to how we build the conditions,
// and it's unclear if this is really an incorrect result or not.
insta::assert_snapshot!(
run_query!(runner, r#"{
findManyTestModel(where: { to_many_as: { every: { OR: [] } }}) {
id
}
}"#),
@r###"{"data":{"findManyTestModel":[{"id":6},{"id":7}]}}"###
);
insta::assert_snapshot!(
run_query!(runner, r#"{
findManyTestModel(where: { to_many_as: { every: { OR: [], NOT: [] } }}) {
id
}
}"#),
@r###"{"data":{"findManyTestModel":[{"id":6},{"id":7}]}}"###
);
// `NOT` with empty filter returns is a truthy condition, so all record fulfill the condition by default.
insta::assert_snapshot!(
run_query!(runner, r#"{
findManyTestModel(where: { to_many_as: { every: { NOT: {} } }}) {
id
}
}"#),
@r###"{"data":{"findManyTestModel":[{"id":1},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7}]}}"###
);
Ok(())
}
#[connector_test]
async fn locical_and(runner: Runner) -> TestResult<()> {
create_to_many_test_data(&runner).await?;
// Implicit AND
insta::assert_snapshot!(
run_query!(runner, r#"{
findManyTestModel(where: {
to_many_as: {
every: {
a_1: { contains: "oo" }
a_2: { gt: 0 }
}
}
}) {
id
}
}"#),
@r###"{"data":{"findManyTestModel":[{"id":1},{"id":6},{"id":7}]}}"###
);
// Explicit AND
insta::assert_snapshot!(
run_query!(runner, r#"{
findManyTestModel(where: {
to_many_as: {
every: {
AND: [
{ a_1: { contains: "oo" } },
{ a_2: { gt: 0 } }
]
}
}
}) {
id
}
}"#),
@r###"{"data":{"findManyTestModel":[{"id":1},{"id":6},{"id":7}]}}"###
);
Ok(())
}
#[connector_test(capabilities(InsensitiveFilters))]
async fn insensitive(runner: Runner) -> TestResult<()> {
create_to_many_test_data(&runner).await?;
insta::assert_snapshot!(
run_query!(runner, r#"{
findManyTestModel(where: {
to_many_as: {
every: {
a_1: { contains: "test", mode: insensitive }
}
}
}) {
id
}
}"#),
@r###"{"data":{"findManyTestModel":[{"id":2},{"id":4},{"id":5},{"id":6},{"id":7}]}}"###
);
Ok(())
}
#[connector_test]
async fn logical_or(runner: Runner) -> TestResult<()> {
create_to_many_test_data(&runner).await?;
create_row(
&runner,
r#"{ id: 10, to_many_as: [ { a_1: "foo", a_2: 1 }, { a_1: "test", a_2: 10 } ] }"#,
)
.await?;
insta::assert_snapshot!(
run_query!(runner, r#"{
findManyTestModel(where: {
to_many_as: {
every: {
OR: [
{ a_1: { contains: "oo" } },
{ a_1: { contains: "test" } }
]
}
}
}) {
id
}
}"#),
@r###"{"data":{"findManyTestModel":[{"id":1},{"id":2},{"id":6},{"id":7},{"id":10}]}}"###
);
Ok(())
}
#[connector_test]
async fn logical_not(runner: Runner) -> TestResult<()> {
create_to_many_test_data(&runner).await?;
insta::assert_snapshot!(
run_query!(runner, r#"{
findManyTestModel(where: {
to_many_as: {
every: {
NOT: [
{ a_1: { contains: "oo" } },
{ a_1: { contains: "test" } }
]
}
}
}) {
id
}
}"#),
@r###"{"data":{"findManyTestModel":[{"id":5},{"id":6},{"id":7}]}}"###
);
Ok(())
}
#[connector_test]
async fn nested_every(runner: Runner) -> TestResult<()> {
create_to_many_nested_test_data(&runner).await?;
insta::assert_snapshot!(
run_query!(runner, r#"{
findManyTestModel(where: {
to_many_as: {
every: {
a_to_many_bs: {
every: {
b_field: { gte: 0 }
}
}
}
}
}) {
id
}
}"#),
@r###"{"data":{"findManyTestModel":[{"id":1},{"id":4},{"id":5},{"id":6},{"id":7}]}}"###
);
Ok(())
}
}
| 30.976471 | 115 | 0.364983 |
50d0e8f2d84ed91d920ba02828c92eb08b9e9d5e | 743 | // Copyright 2019-2022 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
mod cli;
mod daemon;
mod logger;
mod subcommand;
use cli::{cli_error_and_die, Cli};
use structopt::StructOpt;
#[async_std::main]
async fn main() {
logger::setup_logger();
// Capture Cli inputs
let Cli { opts, cmd } = Cli::from_args();
// Run forest as a daemon if no other subcommands are used. Otherwise, run the subcommand.
match opts.to_config() {
Ok(cfg) => match cmd {
Some(command) => subcommand::process(command, cfg).await,
None => daemon::start(cfg).await,
},
Err(e) => {
cli_error_and_die(&format!("Error parsing config. Error was: {}", e), 1);
}
};
}
| 25.62069 | 94 | 0.611036 |
ffb3eeeac0e58302fe039338f430adfb12e47a1c | 5,175 | use std::collections::HashMap;
use crate::instructions::{exec, Instruction};
use crate::machine::ConstOperand::*;
use crate::machine::MutOperand::*;
use crate::machine::Register::*;
#[derive(Hash, Eq, PartialEq, Copy, Clone)]
pub enum Register {
// General purposes register
R0,
R1,
R2,
R3,
R4,
R5,
R6,
R7,
R8,
// static base
SB,
// stack limit
SL,
// intra-procedure-call scratch register
IP,
// stack pointer
SP,
// link register
LR,
// program counter. It is incremented for each instruction.
PC,
// current program status register 1 if >, 0 if =, -1 if <
CPSR,
}
#[derive(Hash, Eq, PartialEq, Copy, Clone)]
pub enum ConstOperand {
ConstRegister(Register),
Literal(i32),
ConstAddress(usize),
}
#[derive(Hash, Eq, PartialEq, Copy, Clone)]
pub enum MutOperand {
MutRegister(Register),
MutAddress(usize),
}
impl From<Register> for ConstOperand {
fn from(register: Register) -> Self {
ConstRegister(register)
}
}
impl From<i32> for ConstOperand {
fn from(value: i32) -> Self {
Literal(value)
}
}
impl From<usize> for ConstOperand {
fn from(value: usize) -> Self {
ConstAddress(value)
}
}
impl From<Register> for MutOperand {
fn from(register: Register) -> Self {
MutRegister(register)
}
}
impl From<usize> for MutOperand {
fn from(value: usize) -> Self {
MutAddress(value)
}
}
impl MutOperand {
pub(crate) fn as_const(self) -> ConstOperand {
match self {
MutRegister(reg) => ConstRegister(reg),
MutAddress(addr) => ConstAddress(addr),
}
}
}
pub struct Memory {
registers: HashMap<Register, i32>,
stack: Vec<i32>,
heap: Vec<i32>,
}
impl Memory {
pub fn get_op(&self, operand: ConstOperand) -> i32 {
match operand {
ConstRegister(reg) => *self.registers.get(®).unwrap(),
Literal(value) => value,
ConstAddress(_) => todo!("Implement heap read"),
}
}
pub fn set_op(&mut self, operand: MutOperand, value: i32) {
match operand {
MutRegister(reg) => self.registers.insert(reg, value),
MutAddress(_) => todo!("Implement heap write"),
};
}
pub fn get_reg(&self, reg: Register) -> i32 {
*self.registers.get(®).unwrap()
}
pub fn set_reg(&mut self, reg: Register, value: i32) {
self.registers.insert(reg, value);
}
pub fn stack_put(&mut self, pointer: i32, value: i32) {
self.stack[pointer as usize] = value;
}
pub fn stack_load(&mut self, pointer: i32) -> i32 {
self.stack[pointer as usize]
}
pub fn new() -> Memory {
const STACK_SIZE: usize = 1024;
const HEAP_SIZE: usize = 4096;
Memory {
registers: [
(R0, 0),
(R1, 0),
(R2, 0),
(R3, 0),
(R4, 0),
(R5, 0),
(R6, 0),
(R7, 0),
(R8, 0),
(R8, 0),
(SB, 0),
(SL, STACK_SIZE as i32),
(IP, 0),
(SP, -1),
(LR, 0),
(PC, 0),
(CPSR, 0),
]
.iter()
.cloned()
.collect(),
stack: vec![0; STACK_SIZE],
heap: vec![0; HEAP_SIZE],
}
}
}
pub struct Machine {
memory: Memory,
instructions: Vec<Instruction>,
}
impl Machine {
pub fn new() -> Machine {
Machine {
instructions: Vec::new(),
memory: Memory::new(),
}
}
pub fn get(&self, register: Register) -> i32 {
*self.memory.registers.get(®ister).unwrap()
}
pub fn set(&mut self, register: Register, value: i32) {
self.memory.registers.insert(register, value);
}
pub fn append(&mut self, instruction: Instruction) {
self.instructions.push(instruction)
}
pub fn append_all(&mut self, instructions: Vec<Instruction>) {
self.instructions.extend(instructions);
}
pub fn run(&mut self) {
while self.step() {}
}
pub fn step(&mut self) -> bool {
let instruction = self.instructions.get(self.get(PC) as usize);
if instruction.is_none() {
return false;
}
exec(*instruction.unwrap(), &mut self.memory);
self.set(PC, self.get(PC) + 1);
return true;
}
}
#[cfg(test)]
mod tests {
use crate::instructions::Instruction::{ADD, CMP, JE, JG, MOV, MUL};
use super::*;
#[test]
fn test_fib() {
let mut machine = Machine::new();
machine.append_all(Vec::from([
MOV(R0.into(), 5.into()),
MOV(R1.into(), 1.into()),
MUL(R1.into(), R0.into()),
ADD(R0.into(), Literal(-1)),
CMP(R0.into(), 0.into()),
JG(Literal(-4)),
]));
machine.run();
assert_eq!(machine.get(R0), 0);
assert_eq!(machine.get(R1), 1 * 2 * 3 * 4 * 5);
}
} | 23.206278 | 71 | 0.52 |
abac982b08d0827ea488e72a84d244e934ee1f27 | 21,353 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Typesafe wrappers around the /pkgfs/install filesystem.
use {
fidl::endpoints::RequestStream, fidl_fuchsia_io as fio, fuchsia_hash::Hash,
fuchsia_zircon::Status, futures::prelude::*, thiserror::Error,
};
/// The kind of blob to be installed.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum BlobKind {
/// The blob should be interpreted as a package.
Package,
/// The blob should be interpreted as a content blob in a package.
Data,
}
impl BlobKind {
fn make_install_path(&self, merkle: &Hash) -> String {
let name = match *self {
BlobKind::Package => "pkg",
BlobKind::Data => "blob",
};
format!("{}/{}", name, merkle)
}
}
/// An error encountered while creating a blob
#[derive(Debug, Error)]
#[allow(missing_docs)]
pub enum BlobCreateError {
#[error("the blob already exists and is readable")]
AlreadyExists,
#[error("the blob is in the process of being written")]
ConcurrentWrite,
#[error("while creating the blob: {}", _0)]
Io(io_util::node::OpenError),
}
/// An open handle to /pkgfs/install
#[derive(Debug, Clone)]
pub struct Client {
proxy: fio::DirectoryProxy,
}
impl Client {
/// Returns an client connected to pkgfs from the current component's namespace
pub fn open_from_namespace() -> Result<Self, io_util::node::OpenError> {
let proxy = io_util::directory::open_in_namespace(
"/pkgfs/install",
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)?;
Ok(Client { proxy })
}
/// Returns an client connected to pkgfs from the given pkgfs root dir.
pub fn open_from_pkgfs_root(
pkgfs: &fio::DirectoryProxy,
) -> Result<Self, io_util::node::OpenError> {
Ok(Client {
proxy: io_util::directory::open_directory_no_describe(
pkgfs,
"install",
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)?,
})
}
/// Creates a new client backed by the returned request stream. This constructor should not be
/// used outside of tests.
///
/// # Panics
///
/// Panics on error
pub fn new_test() -> (Self, fio::DirectoryRequestStream) {
let (proxy, stream) =
fidl::endpoints::create_proxy_and_stream::<fio::DirectoryMarker>().unwrap();
(Self { proxy }, stream)
}
/// Creates a new client backed by the returned mock. This constructor should not be used
/// outside of tests.
///
/// # Panics
///
/// Panics on error
pub fn new_mock() -> (Self, Mock) {
let (proxy, stream) =
fidl::endpoints::create_proxy_and_stream::<fio::DirectoryMarker>().unwrap();
(Self { proxy }, Mock { stream })
}
/// Create a new blob with the given install intent. Returns an open file proxy to the blob.
pub async fn create_blob(
&self,
merkle: Hash,
blob_kind: BlobKind,
) -> Result<(Blob<NeedsTruncate>, BlobCloser), BlobCreateError> {
let flags = fio::OpenFlags::CREATE | fio::OpenFlags::RIGHT_WRITABLE;
let blob = io_util::directory::open_file(
&self.proxy,
&blob_kind.make_install_path(&merkle),
flags,
)
.await
.map_err(|e| match e {
io_util::node::OpenError::OpenError(Status::ALREADY_EXISTS) => {
// Lost a race writing to blobfs, and the blob already exists.
BlobCreateError::AlreadyExists
}
io_util::node::OpenError::OpenError(Status::ACCESS_DENIED) => {
// Lost a race with another process writing to blobfs, and the blob is in the
// process of being written.
BlobCreateError::ConcurrentWrite
}
other => BlobCreateError::Io(other),
})?;
Ok((
Blob { proxy: Clone::clone(&blob), kind: blob_kind, state: NeedsTruncate },
BlobCloser { proxy: blob, closed: false },
))
}
}
/// A testing server implementation of /pkgfs/install.
///
/// Mock does not handle requests until instructed to do so.
pub struct Mock {
stream: fio::DirectoryRequestStream,
}
impl Mock {
/// Consume the next directory request, verifying it is intended to create the blob identified
/// by `merkle` and `kind`. Returns a `MockBlob` representing the open blob install file.
///
/// # Panics
///
/// Panics on error or assertion violation (unexpected requests or a mismatched open call)
pub async fn expect_create_blob(&mut self, merkle: Hash, kind: BlobKind) -> MockBlob {
match self.stream.next().await {
Some(Ok(fio::DirectoryRequest::Open {
flags: _,
mode: _,
path,
object,
control_handle: _,
})) => {
assert_eq!(path, kind.make_install_path(&merkle));
let stream = object.into_stream().unwrap().cast_stream();
MockBlob { stream }
}
other => panic!("unexpected request: {:?}", other),
}
}
/// Asserts that the request stream closes without any further requests.
///
/// # Panics
///
/// Panics on error
pub async fn expect_done(mut self) {
match self.stream.next().await {
None => {}
Some(request) => panic!("unexpected request: {:?}", request),
}
}
}
/// A testing server implementation of an open /pkgfs/install/{pkg,blob}/<merkle> file.
///
/// MockBlob does not send the OnOpen event or handle requests until instructed to do so.
pub struct MockBlob {
stream: fio::FileRequestStream,
}
impl MockBlob {
fn send_on_open(&mut self, status: Status) {
let mut info = fio::NodeInfo::File(fio::FileObject { event: None, stream: None });
let () =
self.stream.control_handle().send_on_open_(status.into_raw(), Some(&mut info)).unwrap();
}
async fn handle_truncate(&mut self, status: Status) -> u64 {
match self.stream.next().await {
Some(Ok(fio::FileRequest::Resize { length, responder })) => {
responder.send(&mut Err(status.into_raw())).unwrap();
length
}
other => panic!("unexpected request: {:?}", other),
}
}
async fn expect_truncate(&mut self) -> u64 {
self.handle_truncate(Status::OK).await
}
async fn handle_write(&mut self, status: Status) -> Vec<u8> {
match self.stream.next().await {
Some(Ok(fio::FileRequest::WriteDeprecated { data, responder })) => {
responder.send(status.into_raw(), data.len() as u64).unwrap();
data
}
Some(Ok(fio::FileRequest::Write { data, responder })) => {
if status == Status::OK {
responder.send(&mut Ok(data.len() as u64)).unwrap();
} else {
responder.send(&mut Err(status.into_raw())).unwrap();
}
data
}
other => panic!("unexpected request: {:?}", other),
}
}
fn fail_open_with_error(mut self, status: Status) {
assert_ne!(status, Status::OK);
self.send_on_open(status);
}
/// Fail the open request with an error indicating the blob already exists.
///
/// # Panics
///
/// Panics on error
pub fn fail_open_with_already_exists(self) {
self.fail_open_with_error(Status::ALREADY_EXISTS);
}
/// Fail the open request with an error indicating the blob is open for concurrent write.
///
/// # Panics
///
/// Panics on error
pub fn fail_open_with_concurrent_write(self) {
self.fail_open_with_error(Status::ACCESS_DENIED);
}
async fn fail_write_with_status(mut self, status: Status) {
self.send_on_open(Status::OK);
let length = self.expect_truncate().await;
// divide rounding up
let expected_write_calls = (length + (fio::MAX_BUF - 1)) / fio::MAX_BUF;
for _ in 0..(expected_write_calls - 1) {
self.handle_write(Status::OK).await;
}
self.handle_write(status).await;
}
/// Succeeds the open request, consumes the truncate request, the initial write calls, then
/// fails the final write indicating the written data was corrupt.
///
/// # Panics
///
/// Panics on error
pub async fn fail_write_with_corrupt(self) {
self.fail_write_with_status(Status::IO_DATA_INTEGRITY).await
}
/// Succeeds the open request, then verifies the blob is immediately closed (possibly after
/// handling a single Close request).
///
/// # Panics
///
/// Panics on error
pub async fn expect_close(mut self) {
self.send_on_open(Status::OK);
match self.stream.next().await {
None => {}
Some(Ok(fio::FileRequest::CloseDeprecated { responder })) => {
let _ = responder.send(Status::OK.into_raw());
}
Some(Ok(fio::FileRequest::Close { responder })) => {
let _ = responder.send(&mut Ok(()));
}
Some(other) => panic!("unexpected request: {:?}", other),
}
}
/// Succeeds the open request, then returns a future that, when awaited, verifies the blob is
/// truncated, written, and closed with the given `expected` payload.
///
/// # Panics
///
/// Panics on error
pub fn expect_payload(mut self, expected: &[u8]) -> impl Future<Output = ()> + '_ {
self.send_on_open(Status::OK);
async move {
assert_eq!(self.expect_truncate().await, expected.len() as u64);
let mut rest = expected;
while !rest.is_empty() {
let expected_chunk = if rest.len() > fio::MAX_BUF as usize {
&rest[..fio::MAX_BUF as usize]
} else {
rest
};
assert_eq!(self.handle_write(Status::OK).await, expected_chunk);
rest = &rest[expected_chunk.len()..];
}
match self.stream.next().await {
Some(Ok(fio::FileRequest::CloseDeprecated { responder })) => {
responder.send(Status::OK.into_raw()).unwrap();
}
Some(Ok(fio::FileRequest::Close { responder })) => {
responder.send(&mut Ok(())).unwrap();
}
other => panic!("unexpected request: {:?}", other),
}
}
}
}
/// An error encountered while truncating a blob
#[derive(Debug, Error)]
#[allow(missing_docs)]
pub enum BlobTruncateError {
#[error("fidl error: {}", _0)]
Fidl(fidl::Error),
#[error("fidl endpoint reported that insufficient storage space is available")]
NoSpace,
#[error("received unexpected failure status: {}", _0)]
UnexpectedResponse(Status),
}
/// An error encountered while truncating a blob
#[derive(Debug, Error)]
#[allow(missing_docs)]
pub enum BlobWriteError {
#[error("fidl error: {}", _0)]
Fidl(fidl::Error),
#[error("file endpoint reported more bytes written than were provided")]
Overwrite,
#[error("fidl endpoint reported the written data was corrupt")]
Corrupt,
#[error("fidl endpoint reported that insufficient storage space is available")]
NoSpace,
#[error("received unexpected failure status: {}", _0)]
UnexpectedResponse(Status),
}
/// A handle to a blob that must be explicitly closed to prevent future opens of the same blob from
/// racing with this blob closing.
#[derive(Debug)]
#[must_use]
pub struct BlobCloser {
proxy: fio::FileProxy,
closed: bool,
}
impl BlobCloser {
/// Close the blob, silently ignoring errors.
pub async fn close(mut self) {
let _ = self.proxy.close().await;
self.closed = true;
}
}
impl Drop for BlobCloser {
fn drop(&mut self) {
if !self.closed {
// Dropped without waiting on close. We can at least send the close request here, but
// there could be a race with another attempt to open the blob.
let _ = self.proxy.close();
}
}
}
/// The successful result of writing some data to a blob.
#[derive(Debug)]
pub enum BlobWriteSuccess {
/// There is still more data to write.
MoreToWrite(Blob<NeedsData>),
/// The blob is fully written.
Done,
}
/// State for a blob that can be truncated.
#[derive(Debug)]
pub struct NeedsTruncate;
/// State for a blob that can be written to.
#[derive(Debug)]
pub struct NeedsData {
size: u64,
written: u64,
}
/// A blob in the process of being written.
#[derive(Debug)]
pub struct Blob<S> {
proxy: fio::FileProxy,
kind: BlobKind,
state: S,
}
impl Blob<NeedsTruncate> {
/// Truncates the blob to the given size. On success, the blob enters the writable state.
pub async fn truncate(self, size: u64) -> Result<Blob<NeedsData>, BlobTruncateError> {
let () = self
.proxy
.resize(size)
.await
.map_err(BlobTruncateError::Fidl)?
.map_err(Status::from_raw)
.map_err(|status| match status {
Status::NO_SPACE => BlobTruncateError::NoSpace,
status => BlobTruncateError::UnexpectedResponse(status),
})?;
Ok(Blob { proxy: self.proxy, kind: self.kind, state: NeedsData { size, written: 0 } })
}
/// Creates a new blob/closer client backed by the returned request stream. This constructor
/// should not be used outside of tests.
///
/// # Panics
///
/// Panics on error
pub fn new_test(kind: BlobKind) -> (Self, BlobCloser, fio::FileRequestStream) {
let (proxy, stream) =
fidl::endpoints::create_proxy_and_stream::<fio::FileMarker>().unwrap();
(
Blob { proxy: Clone::clone(&proxy), kind, state: NeedsTruncate },
BlobCloser { proxy, closed: false },
stream,
)
}
}
impl Blob<NeedsData> {
/// Writes all of the given buffer to the blob.
///
/// # Panics
///
/// Panics if a write is attempted with a buf larger than the remaining blob size.
pub async fn write(mut self, mut buf: &[u8]) -> Result<BlobWriteSuccess, BlobWriteError> {
assert!(self.state.written + buf.len() as u64 <= self.state.size);
while !buf.is_empty() {
// Don't try to write more than MAX_BUF bytes at a time.
let limit = buf.len().min(fio::MAX_BUF as usize);
let written = self.write_some(&buf[..limit]).await?;
buf = &buf[written..];
}
if self.state.written == self.state.size {
Ok(BlobWriteSuccess::Done)
} else {
Ok(BlobWriteSuccess::MoreToWrite(self))
}
}
/// Writes some of the given buffer to the blob.
///
/// Returns the number of bytes written (which may be less than the buffer's size) or the error
/// encountered during the write.
async fn write_some(&mut self, buf: &[u8]) -> Result<usize, BlobWriteError> {
// TODO(https://fxbug.dev/88872): The ALREADY_EXISTS special case below relies on write
// semantics (both status and count always present) that are not possible now that write
// uses FIDL error syntax. This is bad. We hope pkgfs is not long for this world; see the
// referenced bug for removal plans.
let fut = self.proxy.write_deprecated(buf);
let (status, actual) = fut.await.map_err(BlobWriteError::Fidl)?;
match Status::from_raw(status) {
Status::OK => {}
Status::IO_DATA_INTEGRITY => {
return Err(BlobWriteError::Corrupt);
}
Status::NO_SPACE => {
return Err(BlobWriteError::NoSpace);
}
Status::ALREADY_EXISTS => {
if self.kind == BlobKind::Package && self.state.written + actual == self.state.size
{
// pkgfs returns ALREADY_EXISTS on the final write of a meta FAR iff no other
// needs exist. Allow the error, but ignore the hint and check needs anyway.
} else {
return Err(BlobWriteError::UnexpectedResponse(Status::ALREADY_EXISTS));
}
}
status => {
return Err(BlobWriteError::UnexpectedResponse(status));
}
}
if actual > buf.len() as u64 {
return Err(BlobWriteError::Overwrite);
}
self.state.written += actual;
Ok(actual as usize)
}
}
#[cfg(test)]
mod tests {
use {
super::*,
assert_matches::assert_matches,
fuchsia_merkle::MerkleTree,
fuchsia_pkg_testing::{Package, PackageBuilder},
pkgfs_ramdisk::PkgfsRamdisk,
std::io::Read,
};
impl Client {
pub(crate) async fn write_blob(&self, merkle: Hash, blob_kind: BlobKind, data: &[u8]) {
let (blob, closer) = self.create_blob(merkle, blob_kind).await.unwrap();
let blob = blob.truncate(data.len() as u64).await.unwrap();
assert_matches!(blob.write(data).await, Ok(BlobWriteSuccess::Done));
closer.close().await;
}
pub(crate) async fn write_meta_far(&self, pkg: &Package) {
let mut buf = vec![];
pkg.meta_far().unwrap().read_to_end(&mut buf).unwrap();
let pkg_merkle = pkg.meta_far_merkle_root().to_owned();
self.write_blob(pkg_merkle, BlobKind::Package, &buf[..]).await;
}
}
#[fuchsia_async::run_singlethreaded(test)]
async fn handles_empty_content_blob() {
let pkgfs = PkgfsRamdisk::start().unwrap();
let root = pkgfs.root_dir_proxy().unwrap();
let client = Client::open_from_pkgfs_root(&root).unwrap();
// Make the test package.
let pkg = PackageBuilder::new("handles-empty-content-blob")
.add_resource_at("data/empty", "".as_bytes())
.build()
.await
.unwrap();
// Write the meta far.
client.write_meta_far(&pkg).await;
// Write the empty blob.
let empty_merkle = MerkleTree::from_reader(std::io::empty()).unwrap().root();
client.write_blob(empty_merkle, BlobKind::Data, &[]).await;
pkgfs.stop().await.unwrap();
}
#[fuchsia_async::run_singlethreaded(test)]
async fn prevents_duplicate_blob_writes() {
let pkgfs = PkgfsRamdisk::start().unwrap();
let root = pkgfs.root_dir_proxy().unwrap();
let client = Client::open_from_pkgfs_root(&root).unwrap();
// Make the test package.
let pkg = PackageBuilder::new("prevents-duplicate-blob-writes")
.add_resource_at("data/write-twice", "will it detect the race?".as_bytes())
.add_resource_at("data/prevent-activation", "by not writing this blob".as_bytes())
.build()
.await
.unwrap();
let pkg_contents = pkg.meta_contents().unwrap().contents().to_owned();
// Write the meta far.
client.write_meta_far(&pkg).await;
// Write the test blob the first time.
client
.write_blob(
pkg_contents["data/write-twice"],
BlobKind::Data,
"will it detect the race?".as_bytes(),
)
.await;
// Pkgfs fails repeat attempts to write the same blob.
assert_matches!(
client.create_blob(pkg_contents["data/write-twice"], BlobKind::Data).await,
Err(BlobCreateError::AlreadyExists)
);
pkgfs.stop().await.unwrap();
}
#[fuchsia_async::run_singlethreaded(test)]
async fn rejects_corrupt_content_blobs() {
let pkgfs = PkgfsRamdisk::start().unwrap();
let root = pkgfs.root_dir_proxy().unwrap();
let client = Client::open_from_pkgfs_root(&root).unwrap();
// Make the test package.
let pkg = PackageBuilder::new("rejects-corrupt-content-blob")
.add_resource_at("data/corrupt", "foo".as_bytes())
.build()
.await
.unwrap();
let pkg_contents = pkg.meta_contents().unwrap().contents().to_owned();
// Write the meta far.
client.write_meta_far(&pkg).await;
// Writing invalid blob data fails.
let (blob, closer) =
client.create_blob(pkg_contents["data/corrupt"], BlobKind::Data).await.unwrap();
let blob = blob.truncate("foo".len() as u64).await.unwrap();
assert_matches!(blob.write("bar".as_bytes()).await, Err(BlobWriteError::Corrupt));
closer.close().await;
// Retrying with the correct data succeeds.
client.write_blob(pkg_contents["data/corrupt"], BlobKind::Data, "foo".as_bytes()).await;
pkgfs.stop().await.unwrap();
}
}
| 33.364063 | 100 | 0.584649 |
9baf0b089122ad1a5d5f269206a83bcc8043118a | 1,534 | use crate::prelude::*;
use std::ffi::{CStr, CString};
// The thread name buffer should allow space for up to 16 bytes, including the terminating null byte.
const THREAD_NAME_MAX_LEN: usize = 16;
/// A thread name represented in a fixed buffer of 16 bytes.
///
/// The length is chosen to be consistent with Linux.
#[derive(Debug, Clone, Default)]
pub struct ThreadName {
buf: [u8; THREAD_NAME_MAX_LEN],
len: usize, // including null terminator
}
impl ThreadName {
/// Construct a thread name from str
pub fn new(name: &str) -> Self {
Self::from_slice(CString::new(name).unwrap().as_bytes_with_nul())
}
pub const fn max_len() -> usize {
THREAD_NAME_MAX_LEN
}
/// Construct a thread name from slice
pub fn from_slice(input: &[u8]) -> Self {
let mut buf = [0; THREAD_NAME_MAX_LEN];
let mut len = THREAD_NAME_MAX_LEN;
for (i, b) in buf.iter_mut().take(THREAD_NAME_MAX_LEN - 1).enumerate() {
if input[i] == '\0' as u8 {
len = i + 1;
break;
}
*b = input[i];
}
debug_assert!(buf[THREAD_NAME_MAX_LEN - 1] == 0);
Self { buf, len }
}
/// Returns a byte slice
pub fn as_slice(&self) -> &[u8] {
&self.buf
}
/// Converts to a CStr.
pub fn as_c_str(&self) -> &CStr {
// Note: from_bytes_with_nul will fail if slice has more than 1 '\0' at the end
CStr::from_bytes_with_nul(&self.buf[..self.len]).unwrap_or_default()
}
}
| 28.943396 | 101 | 0.593872 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.