hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
bb1948bf3c81bca99b6bd82d7dc005c7946aabf9
| 639 |
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
#![feature(box_syntax)]
fn f() -> Box<isize> {
box 100
}
pub fn main() {
assert_eq!(f(), box 100);
}
| 27.782609 | 68 | 0.707355 |
21ce5b5ae676b88136b71b1718361616537bb246
| 6,797 |
use std::{iter::FromIterator, path::PathBuf, time::Duration};
use anyhow::{bail, format_err, Context, Result};
use hash_engine::{
experiment::controller::config::{OutputPersistenceConfig, OUTPUT_PERSISTENCE_KEY},
output::local::config::LocalPersistenceConfig,
proto::{self, ExecutionEnvironment},
utils::parse_env_duration,
};
use serde_json::json;
use tokio::time::{self, timeout};
use super::process;
use crate::{exsrv::Handler, manifest::read_manifest, Args};
lazy_static::lazy_static! {
static ref ENGINE_START_TIMEOUT: Duration = parse_env_duration("ENGINE_START_TIMEOUT", 2);
static ref ENGINE_WAIT_TIMEOUT: Duration = parse_env_duration("ENGINE_WAIT_TIMEOUT", 60);
}
/// `run_experiment` will build a queue of tokio tasks attached to the simulation workers. Any
/// requests over the websocket will be handled and sent to the appropriate worker (if available).
/// The simulations will run to completion and the connection will finish once the last run is done,
/// or if there is an error.
pub async fn run_experiment(args: Args, handler: Handler) -> Result<()> {
let project = &args.project;
let absolute_project_path = PathBuf::from(project)
.canonicalize()
.with_context(|| format!("Could not canonicalize project path: {project:?}"))?;
let project_name = args.project_name.clone().unwrap_or(
absolute_project_path
.file_name()
.with_context(|| format!("Project path didn't point to a directory: {absolute_project_path:?}"))? // Shouldn't be able to fail as we canonicalize above
.to_string_lossy()
.to_string(),
);
let experiment_run = read_manifest(&absolute_project_path, &args.r#type)?;
run_experiment_with_manifest(args, experiment_run, project_name, handler).await?;
Ok(())
}
fn create_engine_command(
args: &Args,
experiment_id: &str,
controller_url: &str,
) -> Result<Box<dyn process::Command + Send>> {
Ok(Box::new(process::LocalCommand::new(
experiment_id,
args.num_workers as usize,
controller_url,
)?))
}
async fn run_experiment_with_manifest(
args: Args,
experiment_run: proto::ExperimentRun,
project_name: String,
mut handler: Handler,
) -> Result<()> {
let experiment_id = experiment_run.base.id.clone();
let mut engine_handle = handler
.register_experiment(&experiment_id)
.await
.with_context(|| format!("Could not register experiment: {experiment_id}"))?;
// Create and start the experiment run
let cmd = create_engine_command(&args, &experiment_id, handler.url())
.context("Could not build engine command")?;
let mut engine_process = cmd.run().await.context("Could not run experiment")?;
// Wait to receive a message that the experiment has started before sending the init message.
let msg = timeout(*ENGINE_START_TIMEOUT, engine_handle.recv())
.await
.map_err(|_| format_err!("engine start timeout"));
match msg {
Ok(proto::EngineStatus::Started) => {}
Ok(m) => {
bail!(
"expected to receive `Started` message but received: `{}`",
m.kind()
);
}
Err(e) => {
error!("Engine start timeout for experiment {experiment_id}");
engine_process
.exit_and_cleanup()
.await
.context("Failed to cleanup after failed start")?;
bail!(e);
}
};
debug!("Received start message from {experiment_id}");
let mut output_folder = PathBuf::from(args.output);
output_folder.push(project_name);
let map_iter = [(
OUTPUT_PERSISTENCE_KEY.to_string(),
json!(OutputPersistenceConfig::Local(LocalPersistenceConfig {
output_folder
})),
)];
// Now we can send the init message
let init_message = proto::InitMessage {
experiment: experiment_run.clone().into(),
env: ExecutionEnvironment::None, // We don't connect to the API
dyn_payloads: serde_json::Map::from_iter(map_iter),
};
engine_process
.send(&proto::EngineMsg::Init(init_message))
.await
.context("Could not send `Init` message")?;
debug!("Sent init message to {experiment_id}");
loop {
let msg: Option<proto::EngineStatus>;
tokio::select! {
_ = time::sleep(*ENGINE_WAIT_TIMEOUT) => {
error!("Did not receive status from experiment {experiment_id} for over {:?}. Exiting now.", *ENGINE_WAIT_TIMEOUT);
break;
}
m = engine_handle.recv() => { msg = Some(m) },
}
let msg = msg.unwrap();
debug!("Got message from experiment run with type: {}", msg.kind());
match msg {
proto::EngineStatus::Stopping => {
debug!("Stopping experiment {experiment_id}");
}
proto::EngineStatus::SimStart { sim_id, globals: _ } => {
debug!("Started simulation: {sim_id}");
}
proto::EngineStatus::SimStatus(status) => {
debug!("Got simulation run status: {status:?}");
// TODO: OS - handle status fields
}
proto::EngineStatus::SimStop(sim_id) => {
debug!("Simulation stopped: {sim_id}");
}
proto::EngineStatus::Errors(sim_id, errs) => {
error!("There were errors when running simulation [{sim_id}]: {errs:?}");
}
proto::EngineStatus::Warnings(sim_id, warnings) => {
warn!("There were warnings when running simulation [{sim_id}]: {warnings:?}");
}
proto::EngineStatus::Logs(sim_id, logs) => {
for log in logs {
if !log.is_empty() {
info!(target: "behaviors", "[{experiment_id}][{sim_id}]: {log}");
}
}
}
proto::EngineStatus::Exit => {
debug!("Process exited successfully for experiment run with id {experiment_id}",);
break;
}
proto::EngineStatus::ProcessError(error) => {
error!("Got error: {error:?}");
break;
}
proto::EngineStatus::Started => {
error!(
"Received unexpected engine `Started` message after engine had already \
started: {}",
msg.kind()
);
break;
}
}
}
debug!("Performing cleanup");
engine_process
.exit_and_cleanup()
.await
.context("Could not cleanup after finish")?;
Ok(())
}
| 37.761111 | 163 | 0.587465 |
71d7deaf5c020712a685c7f6929e55b5da3d3b57
| 4,440 |
use crate::{web_sys::HtmlElement, *};
use std::iter;
use std::marker::PhantomData;
// ------ ------
// Element
// ------ ------
make_flags!(Empty);
pub struct Stack<EmptyFlag> {
raw_el: RawHtmlEl,
flags: PhantomData<EmptyFlag>,
}
impl Stack<EmptyFlagSet> {
pub fn new() -> Self {
Self::with_tag(Tag::Custom("div"))
}
}
impl Element for Stack<EmptyFlagNotSet> {
fn into_raw_element(self) -> RawElement {
self.raw_el.into()
}
}
impl<EmptyFlag> IntoIterator for Stack<EmptyFlag> {
type Item = Self;
type IntoIter = iter::Once<Self>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
iter::once(self)
}
}
impl<EmptyFlag> UpdateRawEl<RawHtmlEl> for Stack<EmptyFlag> {
fn update_raw_el(mut self, updater: impl FnOnce(RawHtmlEl) -> RawHtmlEl) -> Self {
self.raw_el = updater(self.raw_el);
self
}
}
// ------ ------
// Abilities
// ------ ------
impl ChoosableTag for Stack<EmptyFlagSet> {
fn with_tag(tag: Tag) -> Self {
run_once!(|| {
global_styles()
.style_group(
StyleGroup::new(".stack > *")
.style("grid-column", "1")
.style("grid-row", "1"),
)
.style_group(
StyleGroup::new(".stack > .center_x")
.style("margin-left", "auto")
.style("margin-right", "auto"),
)
.style_group(
StyleGroup::new(".stack > .center_y")
.style("margin-top", "auto")
.style("margin-bottom", "auto"),
)
.style_group(StyleGroup::new(".stack > .align_bottom").style("margin-top", "auto"))
.style_group(StyleGroup::new(".stack > .align_left").style("margin-right", "auto"))
.style_group(StyleGroup::new(".stack > .align_right").style("margin-left", "auto"))
.style_group(StyleGroup::new(".stack > .fill_width").style("width", "100%"))
.style_group(StyleGroup::new(".stack > .fill_height").style("height", "100%"));
});
Self {
raw_el: RawHtmlEl::new(tag.as_str())
.class("stack")
.style("display", "inline-grid")
.style("grid-auto-columns", "minmax(0, auto)")
.style("grid-auto-rows", "minmax(0, auto)"),
flags: PhantomData,
}
}
}
impl<EmptyFlag> Styleable<'_, RawHtmlEl> for Stack<EmptyFlag> {}
impl<EmptyFlag> KeyboardEventAware<RawHtmlEl> for Stack<EmptyFlag> {}
impl<EmptyFlag> MouseEventAware<RawHtmlEl> for Stack<EmptyFlag> {}
impl<EmptyFlag> PointerEventAware<RawHtmlEl> for Stack<EmptyFlag> {}
impl<EmptyFlag> TouchEventAware<RawHtmlEl> for Stack<EmptyFlag> {}
impl<EmptyFlag> MutableViewport<RawHtmlEl> for Stack<EmptyFlag> {}
impl<EmptyFlag> ResizableViewport<RawHtmlEl> for Stack<EmptyFlag> {}
impl<EmptyFlag> Hookable<RawHtmlEl> for Stack<EmptyFlag> {
type WSElement = HtmlElement;
}
impl<EmptyFlag> AddNearbyElement<'_> for Stack<EmptyFlag> {}
impl<EmptyFlag> HasClassId<RawHtmlEl> for Stack<EmptyFlag> {}
impl<EmptyFlag> SelectableTextContent<RawHtmlEl> for Stack<EmptyFlag> {}
// ------ ------
// Attributes
// ------ ------
impl<'a, EmptyFlag> Stack<EmptyFlag> {
pub fn layer(mut self, layer: impl IntoOptionElement<'a> + 'a) -> Stack<EmptyFlagNotSet> {
self.raw_el = self.raw_el.child(layer);
self.into_type()
}
pub fn layer_signal(
mut self,
layer: impl Signal<Item = impl IntoOptionElement<'a>> + Unpin + 'static,
) -> Stack<EmptyFlagNotSet> {
self.raw_el = self.raw_el.child_signal(layer);
self.into_type()
}
pub fn layers(
mut self,
layers: impl IntoIterator<Item = impl IntoElement<'a> + 'a>,
) -> Stack<EmptyFlagNotSet> {
self.raw_el = self.raw_el.children(layers);
self.into_type()
}
pub fn layers_signal_vec(
mut self,
layers: impl SignalVec<Item = impl IntoElement<'a>> + Unpin + 'static,
) -> Stack<EmptyFlagNotSet> {
self.raw_el = self.raw_el.children_signal_vec(layers);
self.into_type()
}
fn into_type<NewEmptyFlag>(self) -> Stack<NewEmptyFlag> {
Stack {
raw_el: self.raw_el,
flags: PhantomData,
}
}
}
| 31.942446 | 99 | 0.569369 |
eb3442c092bcf94d6030d9c0d4c100393dc89c1c
| 5,339 |
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationList {
#[serde(skip_serializing_if = "Vec::is_empty")]
pub value: Vec<OperationsDefinition>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationsDefinition {
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub display: Option<OperationsDisplayDefinition>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationsDisplayDefinition {
#[serde(skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AttestationProvider {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
pub properties: StatusResult,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StatusResult {
#[serde(rename = "trustModel", skip_serializing_if = "Option::is_none")]
pub trust_model: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<status_result::Status>,
#[serde(rename = "attestUri", skip_serializing_if = "Option::is_none")]
pub attest_uri: Option<String>,
}
pub mod status_result {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Ready,
NotReady,
Error,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudError {
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<CloudErrorBody>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudErrorBody {
#[serde(skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AttestationProviderListResult {
#[serde(skip_serializing_if = "Vec::is_empty")]
pub value: Vec<AttestationProvider>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AttestationServicePatchParams {
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AttestationServiceCreationParams {
pub location: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
pub properties: AttestationServiceCreationSpecificParams,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AttestationServiceCreationSpecificParams {
#[serde(rename = "attestationPolicy", skip_serializing_if = "Option::is_none")]
pub attestation_policy: Option<String>,
#[serde(rename = "policySigningCertificates", skip_serializing_if = "Option::is_none")]
pub policy_signing_certificates: Option<JsonWebKeySet>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TrackedResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
pub location: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(skip_serializing)]
pub id: Option<String>,
#[serde(skip_serializing)]
pub name: Option<String>,
#[serde(rename = "type", skip_serializing)]
pub type_: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JsonWebKeySet {
#[serde(skip_serializing_if = "Vec::is_empty")]
pub keys: Vec<JsonWebKey>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JsonWebKey {
pub alg: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub crv: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub d: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub dp: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub dq: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub e: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub k: Option<String>,
pub kid: String,
pub kty: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub n: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub p: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub q: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub qi: Option<String>,
#[serde(rename = "use")]
pub use_: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub x: Option<String>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub x5c: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub y: Option<String>,
}
| 37.076389 | 91 | 0.702566 |
fc1bf4b5a814ece8bcd08e36642804a2b3208947
| 6,140 |
//
// Copyright (C) 2018 Kubos Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License")
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Test requesting and processing system version information
//
// Note: Also including test cases for non-good responses. get_response() is a private
// function, so can't be explicitly tested.
use super::*;
use messages::commands::ResponseID;
use messages::ReceiverStatusFlags;
#[test]
fn test_request_version_good() {
let mut mock = MockStream::default();
mock.write.set_input(vec![
0xAA, 0x44, 0x12, 0x1C, 0x1, 0x0, 0x0, 0xC0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x25, 0x0, 0x0,
0x0, 0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x30, 0x8E, 0x33, 0x3C,
]);
mock.read.set_output(vec![
0xAA, 0x44, 0x12, 0x1C, 0x1, 0x0, 0x80, 0x20, 0x6, 0x0, 0x0, 0x0, 0xFF, 0x78, 0xD1, 0xB,
0x6E, 0x5D, 0xC9, 0x9, 0x0, 0x0, 0x0, 0x0, 0xFB, 0xFD, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x4F,
0x4B, 0x92, 0x8F, 0x77, 0x4A,
]);
let oem = mock_new!(mock);
assert_eq!(oem.request_version(), Ok(()));
}
#[test]
fn test_request_version_bad_no_response() {
let mut mock = MockStream::default();
mock.write.set_input(vec![
0xAA, 0x44, 0x12, 0x1C, 0x1, 0x0, 0x0, 0xC0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x25, 0x0, 0x0,
0x0, 0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x30, 0x8E, 0x33, 0x3C,
]);
let oem = mock_new!(mock);
assert_eq!(oem.request_version().unwrap_err(), OEMError::NoResponse);
}
#[test]
fn test_request_version_bad_response_crc() {
let mut mock = MockStream::default();
mock.write.set_input(vec![
0xAA, 0x44, 0x12, 0x1C, 0x1, 0x0, 0x0, 0xC0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x25, 0x0, 0x0,
0x0, 0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x30, 0x8E, 0x33, 0x3C,
]);
mock.read.set_output(vec![
0xAA, 0x44, 0x12, 0x1C, 0x1, 0x0, 0x80, 0x20, 0x6, 0x0, 0x0, 0x0, 0xFF, 0x78, 0xD1, 0xB,
0x6E, 0x5D, 0xC9, 0x9, 0x0, 0x0, 0x0, 0x0, 0xFB, 0xFD, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x4F,
0x4B, 0x92, 0x8F, 0x77, 0x4B,
]);
let oem = mock_new!(mock);
assert_eq!(oem.request_version().unwrap_err(), OEMError::NoResponse);
}
#[test]
fn test_request_version_fail_response() {
let mut mock = MockStream::default();
mock.write.set_input(vec![
0xAA, 0x44, 0x12, 0x1C, 0x1, 0x0, 0x0, 0xC0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x25, 0x0, 0x0,
0x0, 0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x30, 0x8E, 0x33, 0x3C,
]);
mock.read.set_output(vec![
0xAA, 0x44, 0x12, 0x1C, 0x1, 0x0, 0x80, 0x20, 0x15, 0x0, 0x0, 0x0, 0xFF, 0x78, 0xD1, 0xB,
0x6E, 0x5D, 0xC9, 0x9, 0x0, 0x0, 0x0, 0x0, 0xFB, 0xFD, 0x0, 0x0, 0x1F, 0x0, 0x0, 0x0, 0x4D,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x74, 0x69, 0x6D, 0x65, 0x64, 0x20, 0x6F, 0x75,
0x74, 0xCB, 0xE5, 0x83, 0x92,
]);
let oem = mock_new!(mock);
assert_eq!(
oem.request_version().unwrap_err(),
OEMError::CommandError {
id: ResponseID::Timeout,
description: "Message timed out".to_owned(),
}
);
}
#[test]
fn test_get_version() {
let mut mock = MockStream::default();
mock.read.set_output(vec![
0xAA, 0x44, 0x12, 0x1C, 0x25, 0x0, 0x0, 0x20, 0x70, 0x0, 0x0, 0x0, 0x7D, 0x78, 0xD1, 0xB,
0x38, 0x5E, 0xC9, 0x9, 0x0, 0x0, 0x48, 0x0, 0x81, 0x36, 0xFA, 0x33, 0x1, 0x0, 0x0, 0x0,
0x1, 0x0, 0x0, 0x0, 0x47, 0x31, 0x53, 0x42, 0x30, 0x47, 0x54, 0x54, 0x30, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x42, 0x4A, 0x59, 0x41, 0x31, 0x35, 0x31, 0x32, 0x30, 0x30, 0x33, 0x38,
0x48, 0x0, 0x0, 0x0, 0x4F, 0x45, 0x4D, 0x36, 0x31, 0x35, 0x2D, 0x32, 0x2E, 0x30, 0x30, 0x0,
0x0, 0x0, 0x0, 0x0, 0x4F, 0x45, 0x4D, 0x30, 0x36, 0x30, 0x36, 0x30, 0x30, 0x52, 0x4E, 0x30,
0x30, 0x30, 0x30, 0x0, 0x4F, 0x45, 0x4D, 0x30, 0x36, 0x30, 0x32, 0x30, 0x31, 0x52, 0x42,
0x30, 0x30, 0x30, 0x30, 0x0, 0x32, 0x30, 0x31, 0x35, 0x2F, 0x4A, 0x61, 0x6E, 0x2F, 0x32,
0x38, 0x0, 0x31, 0x35, 0x3A, 0x32, 0x37, 0x3A, 0x32, 0x39, 0x0, 0x0, 0x0, 0x0, 0xC6, 0x5E,
0x86, 0x47,
]);
let oem = mock_new!(mock);
let expected: Log = Log::Version(VersionLog {
recv_status: ReceiverStatusFlags::CLOCK_MODEL_INVALID
| ReceiverStatusFlags::POSITION_SOLUTION_INVALID,
time_status: 120,
week: 3025,
ms: 164191800,
num_components: 1,
components: vec![Component {
comp_type: 1,
model: "G1SB0GTT0".to_owned(),
serial_num: "BJYA15120038H".to_owned(),
hw_version: "OEM615-2.00".to_owned(),
sw_version: "OEM060600RN0000".to_owned(),
boot_version: "OEM060201RB0000".to_owned(),
compile_date: "2015/Jan/28".to_owned(),
compile_time: "15:27:29".to_owned(),
}],
});
assert_eq!(oem.get_log().unwrap(), expected);
}
| 39.612903 | 99 | 0.609446 |
db0b5cde2e25d3b87023e91a301c24284929aa78
| 2,275 |
/*
the point of this file is to get the provinces from the map.json file that should be genereated by the
SOE game itself
*/
use crate::ui_ext::popups::ask;
use crate::ui_ext::popups::note;
use serde_derive::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::Path;
use crate::common_traits::*;
const SAVE_FILE: &str = "map.json";
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct prov {
pub provinces: HashMap<u32, String>,
provinces_reverse: HashMap<String, u32>,
}
impl prov {
pub fn new() -> Self {
/*
checks if the map.json file exists otherwise it makes a new obj thats empty
*/
if Path::new(SAVE_FILE).exists() {
let x: Self = serde_json::from_str(&openfile::read_file(SAVE_FILE)).unwrap_e("err");
return x;
}
Self {
provinces: HashMap::new(),
provinces_reverse: HashMap::new(),
}
}
pub fn add(&mut self, color: u32, name: String) {
if self.provinces.get(&color) == Some(&name) {
return; // Already registered the same way
}
if self.provinces_reverse.contains_key(&name) {
// Make sure not to have two provinces with the same name
note::note("Province name already exist. Not writing");
} else if self.provinces.contains_key(&color) {
if ask::ask("Province color already registred. Write over?") {
let prev_name = self.provinces.get(&color).unwrap();
self.provinces_reverse.remove(prev_name);
self.provinces.insert(color, name.clone());
self.provinces_reverse.insert(name, color);
}
} else {
self.provinces.insert(color, name.clone());
self.provinces_reverse.insert(name, color);
}
}
pub fn save(&mut self) {
// saves the file
openfile::write_file(SAVE_FILE, &serde_json::to_string(&self).unwrap_e("err"))
.unwrap_e("err");
}
pub fn exists(&self, colour: u32) -> bool {
// checks if the province colour exists and returns a bool
match self.provinces.get(&colour) {
Some(_) => true, //return (),
_ => false,
}
}
}
| 31.597222 | 102 | 0.588132 |
bf8785feee0d2e708f74d5172900fec84fb20c61
| 2,050 |
use crate::iiif::resources::Resource;
use crate::iiif::types::{Id, Uri};
use serde::Serialize;
#[derive(Debug, Serialize)]
pub enum Motivation {
#[serde(rename = "painting")]
Painting,
}
#[derive(Debug, Serialize)]
#[serde(tag = "type")]
pub struct AnnotationPage {
id: Uri,
items: Vec<Annotation>,
}
impl AnnotationPage {
pub fn id(presentation_api: &str, item_id: &Id, index: usize) -> Uri {
Uri::new(format!(
"{}/{}/page/{}",
presentation_api, item_id.encoded, index
))
}
pub fn new(
presentation_api: &str,
item_id: &Id,
index: usize,
items: Vec<Annotation>,
) -> AnnotationPage {
AnnotationPage {
id: AnnotationPage::id(presentation_api, item_id, index),
items,
}
}
}
#[derive(Debug, Serialize)]
#[serde(tag = "type")]
pub struct Annotation {
id: Uri,
motivation: Motivation,
body: Resource,
target: Uri,
}
impl Annotation {
pub fn id(presentation_api: &str, item_id: &Id, page: usize, suffix: &str) -> Uri {
Uri::new(format!(
"{}/{}/annotation/{}-{}",
presentation_api, item_id.encoded, page, suffix
))
}
pub fn new(
presentation_api: &str,
item_id: &Id,
index: usize,
resource: Resource,
target: Uri,
motivation: Motivation,
) -> Annotation {
let id = match resource {
Resource::Image(_) => Annotation::id(presentation_api, item_id, index, "image"),
};
Annotation {
id,
motivation,
body: resource,
target,
}
}
pub fn new_painting(
presentation_api: &str,
item_id: &Id,
index: usize,
resource: Resource,
target: Uri,
) -> Annotation {
Annotation::new(
presentation_api,
item_id,
index,
resource,
target,
Motivation::Painting,
)
}
}
| 22.282609 | 92 | 0.526829 |
67c2585ddf1d0925113fe93d8e073458c81d6c10
| 2,374 |
use std::path::PathBuf;
/// A directory lock.
///
/// A lock is associated to a specific path and some
/// [`LockParams`](./enum.LockParams.html).
/// Tantivy itself uses only two locks but client application
/// can use the directory facility to define their own locks.
/// - [INDEX_WRITER_LOCK](./struct.INDEX_WRITER_LOCK.html)
/// - [META_LOCK](./struct.META_LOCK.html)
///
/// Check out these locks documentation for more information.
///
#[derive(Debug)]
pub struct Lock {
/// The lock needs to be associated with its own file `path`.
/// Depending on the platform, the lock might rely on the creation
/// and deletion of this filepath.
pub filepath: PathBuf,
/// `lock_params` describes whether acquiring the lock is meant
/// to be a blocking operation or a non-blocking.
///
/// Acquiring a blocking lock blocks until the lock is
/// available.
/// Acquiring a blocking lock returns rapidly, either successfully
/// or with an error signifying that someone is already holding
/// the lock.
pub is_blocking: bool,
}
lazy_static! {
/// Only one process should be able to write tantivy's index at a time.
/// This lock file, when present, is in charge of preventing other processes to open an IndexWriter.
///
/// If the process is killed and this file remains, it is safe to remove it manually.
///
/// Failing to acquire this lock usually means a misuse of tantivy's API,
/// (creating more than one instance of the `IndexWriter`), are a spurious
/// lock file remaining after a crash. In the latter case, removing the file after
/// checking no process running tantivy is running is safe.
pub static ref INDEX_WRITER_LOCK: Lock = Lock {
filepath: PathBuf::from(".tantivy-writer.lock"),
is_blocking: false
};
/// The meta lock file is here to protect the segment files being opened by
/// `IndexReader::reload()` from being garbage collected.
/// It makes it possible for another process to safely consume
/// our index in-writing. Ideally, we may have prefered `RWLock` semantics
/// here, but it is difficult to achieve on Windows.
///
/// Opening segment readers is a very fast process.
pub static ref META_LOCK: Lock = Lock {
filepath: PathBuf::from(".tantivy-meta.lock"),
is_blocking: true
};
}
| 41.649123 | 104 | 0.68155 |
e4f522828cb95de8646663686526693b6d364a75
| 1,682 |
fn print(count: &mut usize, id: usize, layout: &layout::tree::LayoutR) {
*count += 1;
debug_println!("result: {:?} {:?} {:?}", *count, id, layout);
}
pub fn compute() {
let mut layout_tree = layout::tree::LayoutTree::default();
layout_tree.insert(
1,
0,
0,
layout::idtree::InsertType::Back,
layout::style::Style {
position_type: layout::style::PositionType::Absolute,
size: layout::geometry::Size {
width: layout::style::Dimension::Points(1920.0),
height: layout::style::Dimension::Points(1024.0),
},
..Default::default()
},
);
layout_tree.insert(
2,
1,
0,
layout::idtree::InsertType::Back,
layout::style::Style {
size: layout::geometry::Size {
width: layout::style::Dimension::Points(100f32),
height: layout::style::Dimension::Points(100f32),
..Default::default()
},
..Default::default()
},
);
layout_tree.insert(
3,
2,
0,
layout::idtree::InsertType::Back,
layout::style::Style {
size: layout::geometry::Size {
width: layout::style::Dimension::Points(10f32),
..Default::default()
},
max_size: layout::geometry::Size {
height: layout::style::Dimension::Points(50f32),
..Default::default()
},
..Default::default()
},
);
layout_tree.compute(print, &mut 0);
}
| 31.148148 | 73 | 0.474435 |
728d433bfde8bdc9c0b7611000e2da701ce82f47
| 5,791 |
use std::time::Instant;
use std::sync::{RwLock, Mutex, Arc};
use std::sync::atomic::{AtomicBool, Ordering};
use std::collections::HashMap;
use std::sync::mpsc::{Sender, Receiver};
use packet::{Packet, encode_payload, Payload, ID};
#[derive(Clone)]
#[doc(hidden)]
pub enum Transport {
Polling(Sender<Packet>, Arc<Mutex<Receiver<Packet>>>),
}
#[derive(Clone)]
pub struct Socket {
transport: Transport,
sid: Arc<String>,
last_pong: Arc<RwLock<Instant>>,
last_ping: Arc<RwLock<Instant>>,
closed: Arc<AtomicBool>,
b64: bool,
xhr2: bool,
jsonp: Option<i32>,
client_map: Arc<RwLock<HashMap<Arc<String>, Socket>>>,
on_close: Arc<RwLock<Option<Box<Fn(&str) + 'static>>>>,
on_message: Arc<RwLock<Option<Box<Fn(&[u8]) + 'static>>>>,
on_packet: Arc<RwLock<Option<Box<Fn(Packet) + 'static>>>>,
on_flush: Arc<RwLock<Option<Box<Fn(&[Packet]) + 'static>>>>,
}
unsafe impl Send for Socket {}
unsafe impl Sync for Socket {}
impl Socket {
#[doc(hidden)]
pub fn new(sid: Arc<String>,
transport: Transport,
client_map: Arc<RwLock<HashMap<Arc<String>, Socket>>>,
b64: bool,
jsonp: Option<i32>)
-> Socket {
Socket {
transport: transport,
sid: sid,
last_pong: Arc::new(RwLock::new(Instant::now())),
last_ping: Arc::new(RwLock::new(Instant::now())),
closed: Arc::new(AtomicBool::new(false)),
b64: b64,
jsonp: jsonp,
xhr2: !b64,
client_map: client_map,
on_close: Arc::new(RwLock::new(None)),
on_message: Arc::new(RwLock::new(None)),
on_packet: Arc::new(RwLock::new(None)),
on_flush: Arc::new(RwLock::new(None)),
}
}
pub fn id(&self) -> String {
self.sid.clone().as_str().to_string()
}
#[doc(hidden)]
pub fn reset_timeout(&self) {
*self.last_pong.write().unwrap() = Instant::now();
}
#[doc(hidden)]
pub fn reset_last_ping(&self) {
let data = self.last_ping.clone();
let mut instant = data.write().unwrap();
*instant = Instant::now();
}
pub fn get_last_pong(&self) -> Instant {
let data = self.last_pong.clone();
let instant = data.read().unwrap();
*instant
}
pub fn get_last_ping(&self) -> Instant {
let data = self.last_pong.clone();
let instant = data.read().unwrap();
*instant
}
#[inline(always)]
pub fn b64(&self) -> bool {
self.b64
}
#[inline(always)]
pub fn xhr2(&self) -> bool {
self.xhr2
}
#[inline(always)]
pub fn jsonp_index(&self) -> Option<i32> {
self.jsonp
}
#[inline(always)]
pub fn close(&mut self, reason: &str) {
self.closed.store(true, Ordering::Relaxed);
let data = self.client_map.clone();
let mut map = data.write().unwrap();
map.remove(&self.sid);
self.on_close.read().unwrap().as_ref().map(|f| f(reason));
}
#[inline(always)]
pub fn closed(&self) -> bool {
self.closed.load(Ordering::Relaxed)
}
#[doc(hidden)]
pub fn emit(&self, data: Packet) {
if self.closed.load(Ordering::Relaxed) {
return;
}
debug!("sending ID {:?}", data.id);
match self.transport {
Transport::Polling(ref send, _) => send.send(data).unwrap(),
}
}
/// Send a message to the client
pub fn send(&self, data: Vec<u8>) {
self.emit(Packet{
id: ID::Message,
data: data
})
}
/// Set callback for when a packet is sent to the client (message, ping)
pub fn on_packet<F>(&self, f: F)
where F: Fn(Packet) + 'static
{
let mut func = self.on_packet.write().unwrap();
*func = Some(Box::new(f));
}
/// Set callback for when the write buffer is flushed
pub fn on_flush<F>(&self, f: F)
where F: Fn(&[Packet]) + 'static
{
let mut func = self.on_flush.write().unwrap();
*func = Some(Box::new(f))
}
/// Set callback for when the client is disconnected
pub fn on_close<F>(&self, f: F)
where F: Fn(&str) + 'static
{
let mut data = self.on_close.write().unwrap();
*data = Some(Box::new(f));
}
/// Set callback for when client sends a message
pub fn on_message<F>(&self, f: F)
where F: Fn(&[u8]) + 'static
{
let mut data = self.on_message.write().unwrap();
*data = Some(Box::new(f));
}
#[inline]
#[doc(hidden)]
pub fn call_on_message(&self, data: &[u8]) {
if self.closed() {
return;
}
if let Some(ref func) = *self.on_message.read().unwrap() {
func(data)
}
}
#[doc(hidden)]
pub fn call_on_packet(&self, p: Packet) {
if self.closed() {
return;
}
if let Some(ref func) = *self.on_packet.read().unwrap() {
func(p)
}
}
#[doc(hidden)]
pub fn encode_write_buffer(&self) -> Payload {
let Transport::Polling(_, ref lock) = self.transport;
let mut packets = vec![];
let recv = lock.lock().unwrap();
packets.push(recv.recv().unwrap());
while let Ok(packet) = recv.try_recv() {
packets.push(packet)
}
self.call_on_flush(packets.as_slice());
encode_payload(&packets, self.jsonp, self.b64, self.xhr2)
}
#[inline]
fn call_on_flush(&self, packets: &[Packet]) {
if self.closed() {
return;
}
if let Some(ref func) = *self.on_flush.read().unwrap() {
func(packets)
}
}
}
| 26.934884 | 76 | 0.539112 |
e574ced954f3a7e5b1acf811f4a9f8391528dc11
| 131 |
mod input_system;
mod rendering_system;
pub use self::input_system::InputSystem;
pub use self::rendering_system::RenderingSystem;
| 21.833333 | 48 | 0.824427 |
614eda1f32ef71f0e09832d949ff2f796ccba43e
| 4,212 |
use crate::{
errors::to_py_err,
wasmer_inner::{wasmer, wasmer_engines as engines},
};
use pyo3::{exceptions::PyTypeError, prelude::*};
/// The store represents all global state that can be manipulated by
/// WebAssembly programs. It consists of the runtime representation of
/// all instances of functions, tables, memories, and globals that
/// have been allocated during the lifetime of the abstract machine.
///
/// The `Store` holds the engine (that is —amongst many things— used
/// to compile the WebAssembly bytes into a valid module artifact), in
/// addition to the `Tunables` (that are used to create the memories,
/// tables and globals). The engine comes from the `wasmer.engine`
/// module.
///
/// Specification: https://webassembly.github.io/spec/core/exec/runtime.html#store
///
/// Read the documentation of the `engine` submodule to learn more.
///
/// ## Examples
///
/// Use the Universal engine with no compiler (headless mode):
///
/// ```py
/// from wasmer import engine, Store
///
/// store = Store(engine.Universal())
/// ```
///
/// Use the Universal engine with the LLVM compiler:
///
/// ```py,ignore
/// from wasmer import engine, Store
/// from wasmer_compiler_llvm import Compiler
///
/// store = Store(engine.Universal(Compiler))
/// ```
///
/// If the store is built without an engine, the Universal engine will be
/// used, with the first compiler found in this order:
/// `compiler_compiler_cranelift`, `compiler_compiler_llvm`,
/// `compiler_compiler_singlepass`, otherwise it will run in headless
/// mode.
#[pyclass]
#[pyo3(text_signature = "(engine)")]
pub struct Store {
inner: wasmer::Store,
engine_name: String,
compiler_name: Option<String>,
}
impl Store {
pub fn inner(&self) -> &wasmer::Store {
&self.inner
}
}
#[pymethods]
impl Store {
#[new]
fn new(py: Python, engine: Option<&PyAny>) -> PyResult<Self> {
let (inner, engine_name, compiler_name) = match engine {
Some(engine) => {
if let Ok(universal) = engine.downcast::<PyCell<engines::Universal>>() {
let universal = universal.borrow();
(
wasmer::Store::new(universal.inner()),
engines::Universal::name(),
universal.compiler_name().cloned(),
)
} else if let Ok(dylib) = engine.downcast::<PyCell<engines::Dylib>>() {
let dylib = dylib.borrow();
(
wasmer::Store::new(dylib.inner()),
engines::Dylib::name(),
dylib.compiler_name().cloned(),
)
} else {
return Err(to_py_err::<PyTypeError, _>("Unknown engine"));
}
}
// No engine?
None => {
// This package embeds the `Universal` engine, we are going
// to use it. We may want to load a compiler with it,
// otherwise it's going to be a headless engine.
let compiler = py
// Which compiler is available?
.import("wasmer_compiler_cranelift")
.or_else(|_| py.import("wasmer_compiler_llvm"))
.or_else(|_| py.import("wasmer_compiler_singlepass"))
// If any, load the `Compiler` class.
.and_then(|compiler_module| compiler_module.getattr("Compiler"))
.ok();
let target = None;
let engine = engines::Universal::raw_new(compiler, target)?;
(
wasmer::Store::new(engine.inner()),
engines::Universal::name(),
engine.compiler_name().cloned(),
)
}
};
Ok(Self {
inner,
engine_name: engine_name.to_string(),
compiler_name,
})
}
#[getter]
fn engine_name(&self) -> &String {
&self.engine_name
}
#[getter]
fn compiler_name(&self) -> Option<&String> {
self.compiler_name.as_ref()
}
}
| 32.651163 | 88 | 0.555318 |
71d33579f61957e3be34bf4ecee7e3ec222220e3
| 11,245 |
// This file is part of the open-source port of SeetaFace engine, which originally includes three modules:
// SeetaFace Detection, SeetaFace Alignment, and SeetaFace Identification.
//
// This file is part of the SeetaFace Detection module, containing codes implementing the face detection method described in the following paper:
//
// Funnel-structured cascade for multi-view face detection with alignment awareness,
// Shuzhe Wu, Meina Kan, Zhenliang He, Shiguang Shan, Xilin Chen.
// In Neurocomputing (under review)
//
// Copyright (C) 2016, Visual Information Processing and Learning (VIPL) group,
// Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China.
//
// As an open-source face recognition engine: you can redistribute SeetaFace source codes
// and/or modify it under the terms of the BSD 2-Clause License.
//
// You should have received a copy of the BSD 2-Clause License along with the software.
// If not, see < https://opensource.org/licenses/BSD-2-Clause>.
use num;
use num::integer::Integer;
use num::traits::WrappingAdd;
use common::Rectangle;
use feat::FeatureMap;
use math;
pub struct LabBoostedFeatureMap {
roi: Option<Rectangle>,
width: u32,
height: u32,
length: usize,
feat_map: Vec<u8>,
rect_sum: Vec<i32>,
int_img: Vec<i32>,
square_int_img: Vec<u32>,
rect_width: u32,
rect_height: u32,
num_rect: u32,
}
impl FeatureMap for LabBoostedFeatureMap {
fn compute(&mut self, input: *const u8, width: u32, height: u32) {
if width == 0 || height == 0 {
panic!(format!(
"Illegal arguments: width ({}), height ({})",
width, height
));
}
self.reshape(width, height);
self.compute_integral_images(input);
self.compute_rect_sum();
self.compute_feature_map();
}
fn set_roi(&mut self, roi: Rectangle) {
self.roi = Some(roi);
}
}
impl LabBoostedFeatureMap {
pub fn new() -> Self {
LabBoostedFeatureMap {
roi: None,
width: 0,
height: 0,
length: 0,
feat_map: vec![],
rect_sum: vec![],
int_img: vec![],
square_int_img: vec![],
rect_width: 3,
rect_height: 3,
num_rect: 3,
}
}
pub fn get_feature_val(&self, offset_x: i32, offset_y: i32) -> u8 {
let roi = self.roi.as_ref().unwrap();
let i = (roi.y() + offset_y) * (self.width as i32) + roi.x() + offset_x;
self.feat_map[i as usize]
}
pub fn get_std_dev(&self) -> f64 {
let roi_width;
let roi_height;
let roi_x;
let roi_y;
{
let roi = self.roi.as_ref().unwrap();
roi_width = roi.width() as i32;
roi_height = roi.height() as i32;
roi_x = roi.x() as i32;
roi_y = roi.y() as i32;
}
let self_width = self.width as i32;
let mean;
let m2;
let area = f64::from(roi_width * roi_height);
match (roi_x, roi_y) {
(0, 0) => {
let bottom_right = (roi_height - 1) * self_width + roi_width - 1;
mean = f64::from(self.int_img[bottom_right as usize]) / area;
m2 = f64::from(self.square_int_img[bottom_right as usize]) / area;
}
(0, _) => {
let top_right = (roi_y - 1) * self_width + roi_width - 1;
let bottom_right = top_right + roi_height * self_width;
mean = f64::from(
self.int_img[bottom_right as usize] - self.int_img[top_right as usize],
) / area;
m2 = f64::from(
self.square_int_img[bottom_right as usize]
- self.square_int_img[top_right as usize],
) / area;
}
(_, 0) => {
let bottom_left = (roi_height - 1) * self_width + roi_x - 1;
let bottom_right = bottom_left + roi_width;
mean = f64::from(
self.int_img[bottom_right as usize] - self.int_img[bottom_left as usize],
) / area;
m2 = f64::from(
self.square_int_img[bottom_right as usize]
- self.square_int_img[bottom_left as usize],
) / area;
}
(_, _) => {
let top_left = (roi_y - 1) * self_width + roi_x - 1;
let top_right = top_left + roi_width;
let bottom_left = top_left + roi_height * self_width;
let bottom_right = bottom_left + roi_width;
mean = f64::from(
self.int_img[bottom_right as usize] - self.int_img[bottom_left as usize]
+ self.int_img[top_left as usize]
- self.int_img[top_right as usize],
) / area;
m2 = f64::from(
self.square_int_img[bottom_right as usize]
.wrapping_sub(self.square_int_img[bottom_left as usize])
.wrapping_add(self.square_int_img[top_left as usize])
.wrapping_sub(self.square_int_img[top_right as usize]),
) / area;
}
}
(m2 - mean * mean).sqrt()
}
fn reshape(&mut self, width: u32, height: u32) {
self.width = width;
self.height = height;
self.length = (width * height) as usize;
self.feat_map.resize(self.length, 0);
self.rect_sum.resize(self.length, 0);
self.int_img.resize(self.length, 0);
self.square_int_img.resize(self.length, 0);
}
fn compute_integral_images(&mut self, input: *const u8) {
unsafe {
math::copy_u8_to_i32(input, self.int_img.as_mut_ptr(), self.length);
math::square(
self.int_img.as_ptr(),
self.square_int_img.as_mut_ptr(),
self.length,
);
LabBoostedFeatureMap::compute_integral(
self.int_img.as_mut_ptr(),
self.width,
self.height,
);
LabBoostedFeatureMap::compute_integral(
self.square_int_img.as_mut_ptr(),
self.width,
self.height,
);
}
}
unsafe fn compute_integral<T: Integer + WrappingAdd + Copy>(
data: *mut T,
width: u32,
height: u32,
) {
let mut src = data;
let mut dest = data;
let mut dest_previous_row = dest;
*dest = *src;
src = src.offset(1);
for _ in 1..width {
*dest.offset(1) = *dest + *src;
src = src.offset(1);
dest = dest.offset(1);
}
dest = dest.offset(1);
for _ in 1..height {
let mut s: T = num::zero();
for _ in 0..width {
s = s + *src;
// overflow does happen here for the list of squares..
// original code does not seem to worry about this though
*dest = (*dest_previous_row).wrapping_add(&s);
src = src.offset(1);
dest = dest.offset(1);
dest_previous_row = dest_previous_row.offset(1);
}
}
}
fn compute_rect_sum(&mut self) {
let width = (self.width - self.rect_width) as usize;
let height = self.height - self.rect_height;
let int_img_ptr = self.int_img.as_ptr();
let rect_sum_ptr = self.rect_sum.as_mut_ptr();
unsafe {
*rect_sum_ptr = *(int_img_ptr
.offset(((self.rect_height - 1) * self.width + self.rect_width - 1) as isize));
math::vector_sub(
int_img_ptr
.offset(((self.rect_height - 1) * self.width + self.rect_width) as isize),
int_img_ptr.offset(((self.rect_height - 1) * self.width) as isize),
rect_sum_ptr.offset(1),
width,
);
for i in 1..(height + 1) {
let top_left = int_img_ptr.offset(((i - 1) * self.width) as isize);
let top_right = top_left.offset((self.rect_width - 1) as isize);
let bottom_left = top_left.offset((self.rect_height * self.width) as isize);
let bottom_right = bottom_left.offset((self.rect_width - 1) as isize);
let mut dest = rect_sum_ptr.offset((i * self.width) as isize);
*dest = *bottom_right - *top_right;
dest = dest.offset(1);
math::vector_sub(bottom_right.offset(1), top_right.offset(1), dest, width);
math::vector_sub(dest, bottom_left, dest, width);
math::vector_add(dest, top_left, dest, width);
}
}
}
fn compute_feature_map(&mut self) {
let width = self.width - self.rect_width * self.num_rect;
let height = self.height - self.rect_height * self.num_rect;
let offset = self.width * self.rect_height;
let feat_map_ptr = self.feat_map.as_mut_ptr();
unsafe {
for r in 0..(height + 1) {
for c in 0..(width + 1) {
let dest = feat_map_ptr.offset((r * self.width + c) as isize);
*dest = 0;
let white_rect_sum = self.rect_sum
[((r + self.rect_height) * self.width + c + self.rect_width) as usize];
let mut black_rect_idx = r * self.width + c;
if white_rect_sum >= self.rect_sum[black_rect_idx as usize] {
*dest |= 0x80
};
black_rect_idx += self.rect_width;
if white_rect_sum >= self.rect_sum[black_rect_idx as usize] {
*dest |= 0x40
};
black_rect_idx += self.rect_width;
if white_rect_sum >= self.rect_sum[black_rect_idx as usize] {
*dest |= 0x20
};
black_rect_idx += offset;
if white_rect_sum >= self.rect_sum[black_rect_idx as usize] {
*dest |= 0x08
};
black_rect_idx += offset;
if white_rect_sum >= self.rect_sum[black_rect_idx as usize] {
*dest |= 0x01
};
black_rect_idx -= self.rect_width;
if white_rect_sum >= self.rect_sum[black_rect_idx as usize] {
*dest |= 0x02
};
black_rect_idx -= self.rect_width;
if white_rect_sum >= self.rect_sum[black_rect_idx as usize] {
*dest |= 0x04
};
black_rect_idx -= offset;
if white_rect_sum >= self.rect_sum[black_rect_idx as usize] {
*dest |= 0x10
};
}
}
}
}
}
| 36.157556 | 145 | 0.515073 |
8f2e5377861b1fb27b5efabd68243d9447688e77
| 44,292 |
use serde::Deserialize;
use std::iter;
use swc_common::{Spanned, SyntaxContext, DUMMY_SP};
use swc_ecma_ast::*;
use swc_ecma_transforms_base::helper;
use swc_ecma_utils::alias_ident_for;
use swc_ecma_utils::alias_if_required;
use swc_ecma_utils::has_rest_pat;
use swc_ecma_utils::is_literal;
use swc_ecma_utils::private_ident;
use swc_ecma_utils::prop_name_to_expr;
use swc_ecma_utils::quote_ident;
use swc_ecma_utils::undefined;
use swc_ecma_utils::ExprFactory;
use swc_ecma_utils::StmtLike;
use swc_ecma_visit::noop_visit_type;
use swc_ecma_visit::{noop_fold_type, Fold, FoldWith, Node, Visit, VisitWith};
/// `@babel/plugin-transform-destructuring`
///
/// # Example
/// ## In
/// ```js
/// let {x, y} = obj;
///
/// let [a, b, ...rest] = arr;
/// ```
/// ## Out
/// ```js
/// let _obj = obj,
/// x = _obj.x,
/// y = _obj.y;
///
/// let _arr = arr,
/// _arr2 = _toArray(_arr),
/// a = _arr2[0],
/// b = _arr2[1],
/// rest = _arr2.slice(2);
/// ```
pub fn destructuring(c: Config) -> impl Fold {
Destructuring { c }
}
struct Destructuring {
c: Config,
}
#[derive(Debug, Default, Clone, Copy, Deserialize)]
pub struct Config {
#[serde(default)]
pub loose: bool,
}
macro_rules! impl_for_for_stmt {
($name:ident, $T:tt) => {
fn $name(&mut self, mut for_stmt: $T) -> $T {
let (left, stmt) = match for_stmt.left {
VarDeclOrPat::VarDecl(var_decl) => {
let has_complex = var_decl.decls.iter().any(|d| match d.name {
Pat::Ident(_) => false,
_ => true,
});
if !has_complex {
return $T {
left: VarDeclOrPat::VarDecl(var_decl),
..for_stmt
};
}
let ref_ident = make_ref_ident_for_for_stmt();
let left = VarDeclOrPat::VarDecl(VarDecl {
decls: vec![VarDeclarator {
span: DUMMY_SP,
name: Pat::Ident(ref_ident.clone().into()),
init: None,
definite: false,
}],
..var_decl
});
// Unpack variables
let stmt = Stmt::Decl(Decl::Var(VarDecl {
span: var_decl.span(),
kind: VarDeclKind::Let,
// I(kdy1) guess var_decl.len() == 1
decls: var_decl
.decls
.into_iter()
.map(|decl| VarDeclarator {
init: Some(Box::new(Expr::Ident(ref_ident.clone()))),
..decl
})
.collect::<Vec<_>>()
.fold_with(self),
declare: false,
}));
(left, stmt)
}
VarDeclOrPat::Pat(pat) => match pat {
Pat::Ident(..) => {
return $T {
left: VarDeclOrPat::Pat(pat),
..for_stmt
};
}
_ => {
let left_ident = make_ref_ident_for_for_stmt();
let left = VarDeclOrPat::Pat(Pat::Ident(left_ident.clone().into()));
// Unpack variables
let stmt = AssignExpr {
span: DUMMY_SP,
left: PatOrExpr::Pat(Box::new(pat)),
op: op!("="),
right: Box::new(left_ident.into()),
}
.into_stmt();
(left, stmt)
}
},
};
for_stmt.left = left;
for_stmt.body = Box::new(Stmt::Block(match *for_stmt.body {
Stmt::Block(BlockStmt { span, stmts }) => BlockStmt {
span,
stmts: iter::once(stmt).chain(stmts).collect(),
},
body => BlockStmt {
span: DUMMY_SP,
stmts: vec![stmt, body],
},
}));
for_stmt
}
};
}
fn make_ref_ident_for_for_stmt() -> Ident {
private_ident!("ref")
}
impl AssignFolder {
fn fold_var_decl(&mut self, decls: &mut Vec<VarDeclarator>, decl: VarDeclarator) {
match decl.name {
Pat::Ident(..) => decls.push(decl),
Pat::Rest(..) => unreachable!(
"rest pattern should handled by array pattern handler: {:?}",
decl.name
),
Pat::Array(ArrayPat { elems, .. }) => {
assert!(
decl.init.is_some(),
"destructuring pattern binding requires initializer"
);
let init = decl.init.unwrap();
if is_literal(&init) {
match *init {
Expr::Array(arr)
if elems.len() == arr.elems.len() || has_rest_pat(&elems) =>
{
let mut arr_elems = Some(arr.elems.into_iter());
elems.into_iter().for_each(|p| match p {
Some(Pat::Rest(p)) => {
self.fold_var_decl(
decls,
VarDeclarator {
span: p.span(),
name: *p.arg,
init: Some(Box::new(Expr::Array(ArrayLit {
span: DUMMY_SP,
elems: arr_elems
.take()
.expect("two rest element?")
.collect(),
}))),
definite: false,
},
);
}
Some(p) => {
let e = arr_elems
.as_mut()
.expect("pattern after rest element?")
.next()
.unwrap();
self.fold_var_decl(
decls,
VarDeclarator {
span: p.span(),
init: e.map(|e| {
debug_assert_eq!(e.spread, None);
e.expr
}),
name: p,
definite: false,
},
)
}
None => {}
});
return;
}
_ => {}
}
}
// Make ref var if required
let ref_ident = make_ref_ident_for_array(
self.c,
if self.exporting {
&mut self.vars
} else {
decls
},
Some(init),
Some(if has_rest_pat(&elems) {
std::usize::MAX
} else {
elems.len()
}),
);
for (i, elem) in elems.into_iter().enumerate() {
let elem: Pat = match elem {
Some(elem) => elem,
None => continue,
};
let var_decl = match elem {
Pat::Rest(RestPat {
dot3_token, arg, ..
}) => VarDeclarator {
span: dot3_token,
name: *arg,
init: Some(Box::new(Expr::Call(CallExpr {
span: DUMMY_SP,
callee: ref_ident
.clone()
.make_member(quote_ident!("slice"))
.as_callee(),
args: vec![Lit::Num(Number {
value: i as f64,
span: dot3_token,
})
.as_arg()],
type_args: Default::default(),
}))),
definite: false,
},
_ => VarDeclarator {
span: elem.span(),
// This might be pattern.
// So we fold it again.
name: elem,
init: Some(Box::new(make_ref_idx_expr(&ref_ident, i))),
definite: false,
},
};
decls.extend(vec![var_decl].fold_with(self));
}
}
Pat::Object(ObjectPat { span, props, .. }) if props.is_empty() => {
let (ident, aliased) = alias_if_required(&decl.init.as_ref().unwrap(), "ref");
if aliased {
decls.push(VarDeclarator {
span: DUMMY_SP,
name: Pat::Ident(ident.clone().into()),
init: decl.init,
definite: false,
});
}
// We should convert
//
// var {} = null;
//
// to
//
// var _ref = null;
// _objectDestructuringEmpty(_ref);
//
decls.push(VarDeclarator {
span,
name: Pat::Ident(ident.clone().into()),
init: Some(Box::new(Expr::Cond(CondExpr {
span: DUMMY_SP,
test: Box::new(Expr::Bin(BinExpr {
span: DUMMY_SP,
left: Box::new(Expr::Ident(ident.clone())),
op: op!("!=="),
right: Box::new(Expr::Lit(Lit::Null(Null { span: DUMMY_SP }))),
})),
cons: Box::new(Expr::Ident(ident.clone())),
alt: Box::new(Expr::Call(CallExpr {
span: DUMMY_SP,
callee: helper!(throw, "throw"),
args: vec![
// new TypeError("Cannot destructure undefined")
NewExpr {
span: DUMMY_SP,
callee: Box::new(Expr::Ident(Ident::new(
"TypeError".into(),
DUMMY_SP,
))),
args: Some(vec![Lit::Str(Str {
span: DUMMY_SP,
value: "Cannot destructure undefined".into(),
has_escape: false,
kind: Default::default(),
})
.as_arg()]),
type_args: Default::default(),
}
.as_arg(),
],
type_args: Default::default(),
})),
}))),
definite: false,
})
}
Pat::Object(ObjectPat { props, .. }) => {
assert!(
decl.init.is_some(),
"destructuring pattern binding requires initializer"
);
let can_be_null = can_be_null(decl.init.as_ref().unwrap());
let ref_ident = make_ref_ident(self.c, decls, decl.init);
let ref_ident = if can_be_null {
let init = Box::new(Expr::Ident(ref_ident.clone()));
make_ref_ident(self.c, decls, Some(init))
} else {
ref_ident
};
for prop in props {
let prop_span = prop.span();
match prop {
ObjectPatProp::KeyValue(KeyValuePatProp { key, value }) => {
let computed = match key {
PropName::Computed(..) => true,
_ => false,
};
let var_decl = VarDeclarator {
span: prop_span,
name: *value,
init: Some(Box::new(make_ref_prop_expr(
&ref_ident,
Box::new(prop_name_to_expr(key)),
computed,
))),
definite: false,
};
decls.extend(vec![var_decl].fold_with(self));
}
ObjectPatProp::Assign(AssignPatProp { key, value, .. }) => {
let computed = false;
match value {
Some(value) => {
let ref_ident = make_ref_ident(
self.c,
decls,
Some(Box::new(make_ref_prop_expr(
&ref_ident,
Box::new(key.clone().into()),
computed,
))),
);
let var_decl = VarDeclarator {
span: prop_span,
name: Pat::Ident(key.clone().into()),
init: Some(Box::new(make_cond_expr(ref_ident, value))),
definite: false,
};
decls.extend(vec![var_decl].fold_with(self));
}
None => {
let var_decl = VarDeclarator {
span: prop_span,
name: Pat::Ident(key.clone().into()),
init: Some(Box::new(make_ref_prop_expr(
&ref_ident,
Box::new(key.clone().into()),
computed,
))),
definite: false,
};
decls.extend(vec![var_decl].fold_with(self));
}
}
}
ObjectPatProp::Rest(..) => unreachable!(
"Object rest pattern should be removed by es2018::object_rest_spread \
pass"
),
}
}
}
Pat::Assign(AssignPat {
span,
left,
right: def_value,
..
}) => {
assert!(
decl.init.is_some(),
"destructuring pattern binding requires initializer"
);
let init = decl.init;
let tmp_ident: Ident = (|| {
match init {
Some(ref e) => match &**e {
Expr::Ident(ref i) if i.span.ctxt() != SyntaxContext::empty() => {
return i.clone();
}
_ => {}
},
_ => {}
}
let tmp_ident = private_ident!(span, "tmp");
decls.push(VarDeclarator {
span: DUMMY_SP,
name: Pat::Ident(tmp_ident.clone().into()),
init,
definite: false,
});
tmp_ident
})();
let var_decl = VarDeclarator {
span,
name: *left,
// tmp === void 0 ? def_value : tmp
init: Some(Box::new(make_cond_expr(tmp_ident, def_value))),
definite: false,
};
decls.extend(vec![var_decl].fold_with(self))
}
_ => unimplemented!("Pattern {:?}", decl),
}
}
}
impl Fold for Destructuring {
noop_fold_type!();
impl_for_for_stmt!(fold_for_in_stmt, ForInStmt);
impl_for_for_stmt!(fold_for_of_stmt, ForOfStmt);
impl_fold_fn!();
fn fold_module_items(&mut self, n: Vec<ModuleItem>) -> Vec<ModuleItem> {
self.fold_stmt_like(n)
}
fn fold_stmts(&mut self, n: Vec<Stmt>) -> Vec<Stmt> {
self.fold_stmt_like(n)
}
}
impl Destructuring {
fn fold_fn_like(&mut self, ps: Vec<Param>, body: BlockStmt) -> (Vec<Param>, BlockStmt) {
let mut params = vec![];
let mut decls = vec![];
for param in ps {
let span = param.span();
match param.pat {
Pat::Ident(..) => params.push(param),
Pat::Array(..) | Pat::Object(..) | Pat::Assign(..) => {
let ref_ident = private_ident!(span, "ref");
params.push(Param {
span: DUMMY_SP,
decorators: Default::default(),
pat: Pat::Ident(ref_ident.clone().into()),
});
decls.push(VarDeclarator {
span,
name: param.pat,
init: Some(Box::new(Expr::Ident(ref_ident))),
definite: false,
})
}
_ => {}
}
}
let stmts = if decls.is_empty() {
body.stmts
} else {
iter::once(
Stmt::Decl(Decl::Var(VarDecl {
span: DUMMY_SP,
kind: VarDeclKind::Let,
decls,
declare: false,
}))
.fold_with(self),
)
.chain(body.stmts)
.collect()
};
(params, BlockStmt { stmts, ..body })
}
}
struct AssignFolder {
c: Config,
exporting: bool,
vars: Vec<VarDeclarator>,
/// Used like `.take().is_some()`.
ignore_return_value: Option<()>,
}
impl Fold for AssignFolder {
noop_fold_type!();
fn fold_export_decl(&mut self, decl: ExportDecl) -> ExportDecl {
let old = self.exporting;
self.exporting = true;
let decl = decl.fold_children_with(self);
self.exporting = old;
decl
}
fn fold_expr(&mut self, expr: Expr) -> Expr {
let ignore_return_value = self.ignore_return_value.take().is_some();
let expr = match expr {
// Handle iife
Expr::Fn(..) | Expr::Object(..) => expr.fold_with(&mut Destructuring { c: self.c }),
_ => expr.fold_children_with(self),
};
match expr {
Expr::Assign(AssignExpr {
span,
left,
op: op!("="),
right,
}) => match left {
PatOrExpr::Pat(pat) => match *pat {
Pat::Expr(expr) => Expr::Assign(AssignExpr {
span,
left: PatOrExpr::Expr(expr),
op: op!("="),
right,
}),
Pat::Ident(..) => Expr::Assign(AssignExpr {
span,
left: PatOrExpr::Pat(pat),
op: op!("="),
right,
}),
Pat::Array(ArrayPat { elems, .. }) => {
let mut exprs = Vec::with_capacity(elems.len() + 1);
if is_literal(&right) && ignore_return_value {
match *right {
Expr::Array(arr)
if elems.len() == arr.elems.len() || has_rest_pat(&elems) =>
{
let mut arr_elems = Some(arr.elems.into_iter());
elems.into_iter().for_each(|p| match p {
Some(Pat::Rest(p)) => {
exprs.push(Box::new(Expr::Assign(AssignExpr {
span: p.span(),
left: PatOrExpr::Pat(p.arg),
op: op!("="),
right: Box::new(Expr::Array(ArrayLit {
span: DUMMY_SP,
elems: arr_elems
.take()
.expect("two rest element?")
.collect(),
})),
})));
}
Some(p) => {
let e = arr_elems
.as_mut()
.expect("pattern after rest element?")
.next()
.and_then(|v| v);
let right = e
.map(|e| {
debug_assert_eq!(e.spread, None);
e.expr
})
.unwrap_or_else(|| undefined(p.span()));
exprs.push(Box::new(Expr::Assign(AssignExpr {
span: p.span(),
left: PatOrExpr::Pat(Box::new(p)),
op: op!("="),
right,
})));
}
None => {}
});
return SeqExpr { span, exprs }.into();
}
_ => {}
}
}
// initialized by first element of sequence expression
let ref_ident = make_ref_ident_for_array(
self.c,
&mut self.vars,
None,
Some(if has_rest_pat(&elems) {
std::usize::MAX
} else {
elems.len()
}),
);
exprs.push(Box::new(Expr::Assign(AssignExpr {
span: DUMMY_SP,
op: op!("="),
left: PatOrExpr::Pat(Box::new(Pat::Ident(ref_ident.clone().into()))),
right,
})));
for (i, elem) in elems.into_iter().enumerate() {
let elem = match elem {
Some(elem) => elem,
None => continue,
};
let elem_span = elem.span();
match elem {
Pat::Assign(AssignPat {
span, left, right, ..
}) => {
// initialized by sequence expression.
let assign_ref_ident =
make_ref_ident(self.c, &mut self.vars, None);
exprs.push(Box::new(Expr::Assign(AssignExpr {
span: DUMMY_SP,
left: PatOrExpr::Pat(Box::new(Pat::Ident(
assign_ref_ident.clone().into(),
))),
op: op!("="),
right: Box::new(
ref_ident.clone().computed_member(i as f64),
),
})));
exprs.push(Box::new(
Expr::Assign(AssignExpr {
span,
left: PatOrExpr::Pat(left),
op: op!("="),
right: Box::new(make_cond_expr(
assign_ref_ident,
right,
)),
})
.fold_with(self),
));
}
Pat::Rest(RestPat { arg, .. }) => exprs.push(Box::new(
Expr::Assign(AssignExpr {
span: elem_span,
op: op!("="),
left: PatOrExpr::Pat(arg),
right: Box::new(Expr::Call(CallExpr {
span: DUMMY_SP,
callee: ref_ident
.clone()
.make_member(quote_ident!("slice"))
.as_callee(),
args: vec![(i as f64).as_arg()],
type_args: Default::default(),
})),
})
.fold_with(self),
)),
_ => exprs.push(Box::new(
Expr::Assign(AssignExpr {
span: elem_span,
op: op!("="),
left: PatOrExpr::Pat(Box::new(elem)),
right: Box::new(make_ref_idx_expr(&ref_ident, i)),
})
.fold_with(self),
)),
}
}
// last one should be `ref`
exprs.push(Box::new(Expr::Ident(ref_ident)));
Expr::Seq(SeqExpr {
span: DUMMY_SP,
exprs,
})
}
Pat::Object(ObjectPat { span, props, .. }) => {
let ref_ident = make_ref_ident(self.c, &mut self.vars, None);
let mut exprs = vec![];
exprs.push(Box::new(Expr::Assign(AssignExpr {
span,
left: PatOrExpr::Pat(Box::new(Pat::Ident(ref_ident.clone().into()))),
op: op!("="),
right,
})));
for prop in props {
let span = prop.span();
match prop {
ObjectPatProp::KeyValue(KeyValuePatProp { key, value }) => {
let computed = match key {
PropName::Computed(..) => true,
_ => false,
};
exprs.push(Box::new(Expr::Assign(AssignExpr {
span,
left: PatOrExpr::Pat(value),
op: op!("="),
right: Box::new(make_ref_prop_expr(
&ref_ident,
Box::new(prop_name_to_expr(key)),
computed,
)),
})));
}
ObjectPatProp::Assign(AssignPatProp { key, value, .. }) => {
let computed = false;
match value {
Some(value) => {
let prop_ident =
make_ref_ident(self.c, &mut self.vars, None);
exprs.push(Box::new(Expr::Assign(AssignExpr {
span,
left: PatOrExpr::Pat(Box::new(Pat::Ident(
prop_ident.clone().into(),
))),
op: op!("="),
right: Box::new(make_ref_prop_expr(
&ref_ident,
Box::new(key.clone().into()),
computed,
)),
})));
exprs.push(Box::new(Expr::Assign(AssignExpr {
span,
left: PatOrExpr::Pat(Box::new(Pat::Ident(
key.clone().into(),
))),
op: op!("="),
right: Box::new(make_cond_expr(prop_ident, value)),
})));
}
None => {
exprs.push(Box::new(Expr::Assign(AssignExpr {
span,
left: PatOrExpr::Pat(Box::new(Pat::Ident(
key.clone().into(),
))),
op: op!("="),
right: Box::new(make_ref_prop_expr(
&ref_ident,
Box::new(key.clone().into()),
computed,
)),
})));
}
}
}
ObjectPatProp::Rest(_) => unreachable!(
"object rest pattern should be removed by \
es2018::object_rest_spread pass"
),
}
}
// Last one should be object itself.
exprs.push(Box::new(Expr::Ident(ref_ident)));
Expr::Seq(SeqExpr {
span: DUMMY_SP,
exprs,
})
}
Pat::Assign(pat) => unimplemented!("assignment pattern {:?}", pat),
Pat::Rest(pat) => unimplemented!("rest pattern {:?}", pat),
Pat::Invalid(..) => unreachable!(),
},
_ => Expr::Assign(AssignExpr {
span,
left,
op: op!("="),
right,
}),
},
_ => expr,
}
}
fn fold_stmt(&mut self, s: Stmt) -> Stmt {
match s {
Stmt::Expr(e) => {
self.ignore_return_value = Some(());
let e = e.fold_with(self);
assert_eq!(self.ignore_return_value, None);
Stmt::Expr(e)
}
_ => s.fold_children_with(self),
}
}
fn fold_var_declarators(&mut self, declarators: Vec<VarDeclarator>) -> Vec<VarDeclarator> {
let declarators = declarators.fold_children_with(self);
let is_complex = declarators.iter().any(|d| match d.name {
Pat::Ident(..) => false,
_ => true,
});
if !is_complex {
return declarators;
}
let mut decls = Vec::with_capacity(declarators.len());
for decl in declarators {
self.fold_var_decl(&mut decls, decl)
}
decls
}
}
impl Destructuring {
fn fold_stmt_like<T>(&mut self, stmts: Vec<T>) -> Vec<T>
where
Vec<T>: FoldWith<Self> + VisitWith<DestructuringVisitor>,
T: StmtLike + VisitWith<DestructuringVisitor> + FoldWith<AssignFolder>,
{
// fast path
if !has_destructuring(&stmts) {
return stmts;
}
let stmts = stmts.fold_children_with(self);
let mut buf = Vec::with_capacity(stmts.len());
for stmt in stmts {
let mut folder = AssignFolder {
c: self.c,
exporting: false,
vars: vec![],
ignore_return_value: None,
};
match stmt.try_into_stmt() {
Err(item) => {
let item = item.fold_with(&mut folder);
// Add variable declaration
// e.g. var ref
if !folder.vars.is_empty() {
buf.push(T::from_stmt(Stmt::Decl(Decl::Var(VarDecl {
span: DUMMY_SP,
kind: VarDeclKind::Var,
decls: folder.vars,
declare: false,
}))));
}
buf.push(item)
}
Ok(stmt) => {
let stmt = stmt.fold_with(&mut folder);
// Add variable declaration
// e.g. var ref
if !folder.vars.is_empty() {
buf.push(T::from_stmt(Stmt::Decl(Decl::Var(VarDecl {
span: DUMMY_SP,
kind: VarDeclKind::Var,
decls: folder.vars,
declare: false,
}))));
}
buf.push(T::from_stmt(stmt));
}
}
}
buf
}
}
fn make_ref_idx_expr(ref_ident: &Ident, i: usize) -> Expr {
ref_ident.clone().computed_member(i as f64)
}
fn make_ref_ident(c: Config, decls: &mut Vec<VarDeclarator>, init: Option<Box<Expr>>) -> Ident {
make_ref_ident_for_array(c, decls, init, None)
}
fn make_ref_ident_for_array(
c: Config,
decls: &mut Vec<VarDeclarator>,
mut init: Option<Box<Expr>>,
elem_cnt: Option<usize>,
) -> Ident {
if elem_cnt.is_none() {
if let Some(e) = init {
match *e {
Expr::Ident(i) => return i,
_ => init = Some(e),
}
}
}
let span = init.span();
let (ref_ident, aliased) = if c.loose {
if let Some(ref init) = init {
alias_if_required(&init, "ref")
} else {
(private_ident!(span, "ref"), true)
}
} else {
if let Some(ref init) = init {
(alias_ident_for(&init, "ref"), true)
} else {
(private_ident!(span, "ref"), true)
}
};
if aliased {
decls.push(VarDeclarator {
span,
name: Pat::Ident(ref_ident.clone().into()),
init: init.map(|v| {
if c.loose
|| match *v {
Expr::Array(..) => true,
_ => false,
}
{
v
} else {
match elem_cnt {
None => v,
Some(std::usize::MAX) => Box::new(
CallExpr {
span: DUMMY_SP,
callee: helper!(to_array, "toArray"),
args: vec![v.as_arg()],
type_args: Default::default(),
}
.into(),
),
Some(value) => Box::new(
CallExpr {
span: DUMMY_SP,
callee: helper!(sliced_to_array, "slicedToArray"),
args: vec![
v.as_arg(),
Lit::Num(Number {
span: DUMMY_SP,
value: value as _,
})
.as_arg(),
],
type_args: Default::default(),
}
.into(),
),
}
}
}),
definite: false,
});
}
ref_ident
}
fn make_ref_prop_expr(ref_ident: &Ident, prop: Box<Expr>, mut computed: bool) -> Expr {
computed |= match *prop {
Expr::Lit(Lit::Num(..)) | Expr::Lit(Lit::Str(..)) => true,
_ => false,
};
Expr::Member(MemberExpr {
span: DUMMY_SP,
obj: ExprOrSuper::Expr(Box::new(ref_ident.clone().into())),
computed,
prop,
})
}
/// Creates `tmp === void 0 ? def_value : tmp`
fn make_cond_expr(tmp: Ident, def_value: Box<Expr>) -> Expr {
Expr::Cond(CondExpr {
span: DUMMY_SP,
test: Box::new(Expr::Bin(BinExpr {
span: DUMMY_SP,
left: Box::new(Expr::Ident(tmp.clone())),
op: op!("==="),
right: Box::new(Expr::Unary(UnaryExpr {
span: DUMMY_SP,
op: op!("void"),
arg: Box::new(Expr::Lit(Lit::Num(Number {
span: DUMMY_SP,
value: 0.0,
}))),
})),
})),
cons: def_value,
alt: Box::new(Expr::Ident(tmp)),
})
}
fn can_be_null(e: &Expr) -> bool {
match *e {
Expr::Lit(Lit::Null(..))
| Expr::This(..)
| Expr::Ident(..)
| Expr::PrivateName(..)
| Expr::Member(..)
| Expr::Call(..)
| Expr::New(..)
| Expr::Yield(..)
| Expr::Await(..)
| Expr::MetaProp(..) => true,
// This does not include null
Expr::Lit(..) => false,
Expr::Array(..)
| Expr::Arrow(..)
| Expr::Object(..)
| Expr::Fn(..)
| Expr::Class(..)
| Expr::Tpl(..) => false,
Expr::TaggedTpl(..) => true,
Expr::Paren(ParenExpr { ref expr, .. }) => can_be_null(expr),
Expr::Seq(SeqExpr { ref exprs, .. }) => {
exprs.last().map(|e| can_be_null(e)).unwrap_or(true)
}
Expr::Assign(AssignExpr { ref right, .. }) => can_be_null(right),
Expr::Cond(CondExpr {
ref cons, ref alt, ..
}) => can_be_null(cons) || can_be_null(alt),
// TODO(kdy1): I'm not sure about this.
Expr::Unary(..) | Expr::Update(..) | Expr::Bin(..) => true,
Expr::JSXMember(..)
| Expr::JSXNamespacedName(..)
| Expr::JSXEmpty(..)
| Expr::JSXElement(..)
| Expr::JSXFragment(..) => unreachable!("destructuring jsx"),
// Trust user
Expr::TsNonNull(..) => false,
Expr::TsAs(TsAsExpr { ref expr, .. })
| Expr::TsTypeAssertion(TsTypeAssertion { ref expr, .. })
| Expr::TsTypeCast(TsTypeCastExpr { ref expr, .. })
| Expr::TsConstAssertion(TsConstAssertion { ref expr, .. }) => can_be_null(expr),
Expr::OptChain(ref e) => can_be_null(&e.expr),
Expr::Invalid(..) => unreachable!(),
}
}
fn has_destructuring<N>(node: &N) -> bool
where
N: VisitWith<DestructuringVisitor>,
{
let mut v = DestructuringVisitor { found: false };
node.visit_with(&Invalid { span: DUMMY_SP } as _, &mut v);
v.found
}
struct DestructuringVisitor {
found: bool,
}
impl Visit for DestructuringVisitor {
noop_visit_type!();
fn visit_pat(&mut self, node: &Pat, _: &dyn Node) {
node.visit_children_with(self);
match *node {
Pat::Ident(..) => {}
_ => self.found = true,
}
}
}
| 39.370667 | 99 | 0.325205 |
0ee1f450133a000d78b4bc78a541ceab6ce07f36
| 707 |
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use fidl_fuchsia_location_position::{Position, PositionExtras};
use serde::Serialize;
#[derive(Serialize)]
#[serde(remote = "Position")]
struct PositionDef {
pub latitude: f64,
pub longitude: f64,
#[serde(with = "PositionExtrasDef")]
pub extras: PositionExtras,
}
#[derive(Serialize)]
#[serde(remote = "PositionExtras")]
struct PositionExtrasDef {
pub accuracy_meters: Option<f64>,
pub altitude_meters: Option<f64>,
}
#[derive(Serialize)]
pub struct PositionSerializer(#[serde(with = "PositionDef")] pub Position);
| 27.192308 | 75 | 0.72843 |
9c9e7126ee3877861d7460015dd6257330a69a46
| 1,795 |
extern crate termion;
mod entities;
use entities::player::{Player};
use std::io::{Write, Stdout, stdout};
use std::time::{Duration};
use std::thread::{sleep};
use termion::{AsyncReader, async_stdin};
use termion::input::{TermRead};
use termion::raw::{IntoRawMode, RawTerminal};
use termion::event::{Key};
use termion::cursor::{Goto, Hide};
use termion::clear::{All as ClearAll};
use termion::style::{Reset};
struct Game {
stdin: AsyncReader,
stdout: RawTerminal<Stdout>,
player: Player,
}
impl Game {
pub fn new() -> Game {
let stdin = async_stdin();
let stdout = stdout().into_raw_mode().unwrap();
let (x, y) = termion::terminal_size().unwrap();
let x_pos: u16 = x / 2;
let y_pos: u16 = y - (y / 8);
let player = Player::new(x_pos, y_pos);
Game {
stdin,
stdout,
player,
}
}
fn setup(&mut self) {
write!(
self.stdout,
"{}{}{}",
ClearAll,
Goto(self.player.x_pos(), self.player.y_pos()),
Hide
).unwrap();
self.stdout.flush().unwrap();
}
fn reset(&mut self) {
write!(
self.stdout,
"{}{}{}",
ClearAll,
Reset,
Goto(1, 1),
).unwrap();
}
fn main_loop(&mut self) {
let stdin = &mut self.stdin;
loop {
// keeps shit smoooooth
sleep(Duration::from_millis(10));
let key = stdin.keys().next();
match key {
Some(Ok(Key::Char('q'))) => break,
Some(Ok(Key::Left)) => self.player.move_left(),
Some(Ok(Key::Right)) => self.player.move_right(),
None => (),
_ => ()
};
self.player.draw(&mut self.stdout);
}
}
}
fn main() {
let mut game = Game::new();
game.setup();
game.main_loop();
game.reset();
}
| 19.944444 | 61 | 0.542061 |
16c0a385abcbf2c15b191f8c53fa3053b338d593
| 937 |
use super::{node::NodeKind, Error, Result};
/// Type def for an event
pub(super) type Event = Option<EventKind>;
/// Describes the possible events that can occur
#[derive(Debug)]
pub(super) enum EventKind {
Node(NodeKind),
Failure(Error),
Done,
}
impl EventKind {
/// Converts an event into the equivalent Option/Result nesting
pub(super) fn transpose(self) -> Option<Result<NodeKind>> {
match self {
Self::Node(node) => Some(Ok(node)),
Self::Failure(err) if err.is_repeat() => None,
Self::Failure(err) => Some(Err(err)),
Self::Done => None,
}
}
}
impl From<NodeKind> for EventKind {
fn from(node: NodeKind) -> Self {
Self::Node(node)
}
}
impl From<Error> for EventKind {
fn from(err: Error) -> Self {
Self::Failure(err)
}
}
impl From<()> for EventKind {
fn from(_: ()) -> Self {
Self::Done
}
}
| 21.790698 | 67 | 0.578442 |
ace84e853899deca45af18b03d626457d1e6923e
| 31,137 |
// Copyright (c) The Dijets Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
counters,
error::Error,
logging::{LogEntry, LogEvent, LogSchema},
shared_components::SyncState,
};
use dijets_logger::prelude::*;
use dijets_types::{
account_state::AccountState,
contract_event::ContractEvent,
ledger_info::LedgerInfoWithSignatures,
move_resource::MoveStorage,
on_chain_config,
on_chain_config::{config_address, OnChainConfigPayload, ON_CHAIN_CONFIG_REGISTRY},
transaction::TransactionListWithProof,
};
use executor_types::{ChunkExecutor, ExecutedTrees};
use itertools::Itertools;
use std::{collections::HashSet, convert::TryFrom, sync::Arc};
use storage_interface::DbReader;
use subscription_service::ReconfigSubscription;
/// Proxies interactions with execution and storage for state synchronization
pub trait ExecutorProxyTrait: Send {
/// Sync the local state with the latest in storage.
fn get_local_storage_state(&self) -> Result<SyncState, Error>;
/// Execute and commit a batch of transactions
fn execute_chunk(
&mut self,
txn_list_with_proof: TransactionListWithProof,
verified_target_li: LedgerInfoWithSignatures,
intermediate_end_of_epoch_li: Option<LedgerInfoWithSignatures>,
) -> Result<(), Error>;
/// Gets chunk of transactions given the known version, target version and the max limit.
fn get_chunk(
&self,
known_version: u64,
limit: u64,
target_version: u64,
) -> Result<TransactionListWithProof, Error>;
/// Get the epoch changing ledger info for the given epoch so that we can move to next epoch.
fn get_epoch_change_ledger_info(&self, epoch: u64) -> Result<LedgerInfoWithSignatures, Error>;
/// Get ledger info at an epoch boundary version.
fn get_epoch_ending_ledger_info(&self, version: u64)
-> Result<LedgerInfoWithSignatures, Error>;
/// Returns the ledger's timestamp for the given version in microseconds
fn get_version_timestamp(&self, version: u64) -> Result<u64, Error>;
/// publishes on-chain config updates to subscribed components
fn publish_on_chain_config_updates(&mut self, events: Vec<ContractEvent>) -> Result<(), Error>;
}
pub(crate) struct ExecutorProxy {
storage: Arc<dyn DbReader>,
executor: Box<dyn ChunkExecutor>,
reconfig_subscriptions: Vec<ReconfigSubscription>,
on_chain_configs: OnChainConfigPayload,
}
impl ExecutorProxy {
pub(crate) fn new(
storage: Arc<dyn DbReader>,
executor: Box<dyn ChunkExecutor>,
mut reconfig_subscriptions: Vec<ReconfigSubscription>,
) -> Self {
let on_chain_configs = Self::fetch_all_configs(&*storage)
.expect("[state sync] Failed initial read of on-chain configs");
for subscription in reconfig_subscriptions.iter_mut() {
subscription
.publish(on_chain_configs.clone())
.expect("[state sync] Failed to publish initial on-chain config");
}
Self {
storage,
executor,
reconfig_subscriptions,
on_chain_configs,
}
}
fn fetch_all_configs(storage: &dyn DbReader) -> Result<OnChainConfigPayload, Error> {
let access_paths = ON_CHAIN_CONFIG_REGISTRY
.iter()
.map(|config_id| config_id.access_path())
.collect();
let configs = storage
.batch_fetch_resources(access_paths)
.map_err(|error| {
Error::UnexpectedError(format!("Failed batch fetch of resources: {}", error))
})?;
let synced_version = storage.fetch_synced_version().map_err(|error| {
Error::UnexpectedError(format!("Failed to fetch storage synced version: {}", error))
})?;
let account_state_blob = storage
.get_account_state_with_proof_by_version(config_address(), synced_version)
.map_err(|error| {
Error::UnexpectedError(format!(
"Failed to fetch account state with proof {}",
error
))
})?
.0;
let epoch = account_state_blob
.map(|blob| {
AccountState::try_from(&blob).and_then(|state| {
Ok(state
.get_configuration_resource()?
.ok_or_else(|| {
Error::UnexpectedError("Configuration resource does not exist".into())
})?
.epoch())
})
})
.ok_or_else(|| Error::UnexpectedError("Missing account state blob".into()))?
.map_err(|error| {
Error::UnexpectedError(format!("Failed to fetch configuration resource: {}", error))
})?;
Ok(OnChainConfigPayload::new(
epoch,
Arc::new(
ON_CHAIN_CONFIG_REGISTRY
.iter()
.cloned()
.zip_eq(configs)
.collect(),
),
))
}
}
impl ExecutorProxyTrait for ExecutorProxy {
fn get_local_storage_state(&self) -> Result<SyncState, Error> {
let storage_info = self.storage.get_startup_info().map_err(|error| {
Error::UnexpectedError(format!(
"Failed to get startup info from storage: {}",
error
))
})?;
let storage_info = storage_info
.ok_or_else(|| Error::UnexpectedError("Missing startup info from storage".into()))?;
let current_epoch_state = storage_info.get_epoch_state().clone();
let synced_trees = if let Some(synced_tree_state) = storage_info.synced_tree_state {
ExecutedTrees::from(synced_tree_state)
} else {
ExecutedTrees::from(storage_info.committed_tree_state)
};
Ok(SyncState::new(
storage_info.latest_ledger_info,
synced_trees,
current_epoch_state,
))
}
fn execute_chunk(
&mut self,
txn_list_with_proof: TransactionListWithProof,
verified_target_li: LedgerInfoWithSignatures,
intermediate_end_of_epoch_li: Option<LedgerInfoWithSignatures>,
) -> Result<(), Error> {
// track chunk execution time
let timer = counters::EXECUTE_CHUNK_DURATION.start_timer();
let events = self
.executor
.execute_and_commit_chunk(
txn_list_with_proof,
verified_target_li,
intermediate_end_of_epoch_li,
)
.map_err(|error| {
Error::UnexpectedError(format!("Execute and commit chunk failed: {}", error))
})?;
timer.stop_and_record();
let reconfig_events = extract_reconfig_events(events);
if let Err(e) = self.publish_on_chain_config_updates(reconfig_events) {
error!(
LogSchema::event_log(LogEntry::Reconfig, LogEvent::Fail).error(&e),
"Failed to publish reconfig updates in execute_chunk"
);
counters::RECONFIG_PUBLISH_COUNT
.with_label_values(&[counters::FAIL_LABEL])
.inc();
}
Ok(())
}
fn get_chunk(
&self,
known_version: u64,
limit: u64,
target_version: u64,
) -> Result<TransactionListWithProof, Error> {
let starting_version = known_version
.checked_add(1)
.ok_or_else(|| Error::IntegerOverflow("Starting version has overflown!".into()))?;
self.storage
.get_transactions(starting_version, limit, target_version, false)
.map_err(|error| {
Error::UnexpectedError(format!("Failed to get transactions from storage {}", error))
})
}
fn get_epoch_change_ledger_info(&self, epoch: u64) -> Result<LedgerInfoWithSignatures, Error> {
let next_epoch = epoch
.checked_add(1)
.ok_or_else(|| Error::IntegerOverflow("Next epoch has overflown!".into()))?;
let mut epoch_ending_ledger_infos = self
.storage
.get_epoch_ending_ledger_infos(epoch, next_epoch)
.map_err(|error| Error::UnexpectedError(error.to_string()))?;
epoch_ending_ledger_infos
.ledger_info_with_sigs
.pop()
.ok_or_else(|| {
Error::UnexpectedError(format!(
"Missing epoch change ledger info for epoch: {:?}",
epoch
))
})
}
fn get_epoch_ending_ledger_info(
&self,
version: u64,
) -> Result<LedgerInfoWithSignatures, Error> {
self.storage
.get_epoch_ending_ledger_info(version)
.map_err(|error| Error::UnexpectedError(error.to_string()))
}
fn get_version_timestamp(&self, version: u64) -> Result<u64, Error> {
self.storage
.get_block_timestamp(version)
.map_err(|error| Error::UnexpectedError(error.to_string()))
}
fn publish_on_chain_config_updates(&mut self, events: Vec<ContractEvent>) -> Result<(), Error> {
if events.is_empty() {
return Ok(());
}
info!(LogSchema::new(LogEntry::Reconfig)
.count(events.len())
.reconfig_events(events.clone()));
let event_keys = events
.iter()
.map(|event| *event.key())
.collect::<HashSet<_>>();
// calculate deltas
let new_configs = Self::fetch_all_configs(&*self.storage)?;
let changed_configs = new_configs
.configs()
.iter()
.filter(|(id, cfg)| {
&self.on_chain_configs.configs().get(id).unwrap_or_else(|| {
panic!("Missing on-chain config value in local copy: {}", id)
}) != cfg
})
.map(|(id, _)| *id)
.collect::<HashSet<_>>();
// notify subscribers
let mut publish_success = true;
for subscription in self.reconfig_subscriptions.iter_mut() {
// publish updates if *any* of the subscribed configs changed
// or any of the subscribed events were emitted
let subscribed_items = subscription.subscribed_items();
if !changed_configs.is_disjoint(&subscribed_items.configs)
|| !event_keys.is_disjoint(&subscribed_items.events)
{
if let Err(e) = subscription.publish(new_configs.clone()) {
publish_success = false;
error!(
LogSchema::event_log(LogEntry::Reconfig, LogEvent::PublishError)
.subscription_name(subscription.name.clone())
.error(&Error::UnexpectedError(e.to_string())),
"Failed to publish reconfig notification to subscription {}",
subscription.name
);
} else {
info!(
LogSchema::event_log(LogEntry::Reconfig, LogEvent::Success)
.subscription_name(subscription.name.clone()),
"Successfully published reconfig notification to subscription {}",
subscription.name
);
}
}
}
self.on_chain_configs = new_configs;
if publish_success {
counters::RECONFIG_PUBLISH_COUNT
.with_label_values(&[counters::SUCCESS_LABEL])
.inc();
Ok(())
} else {
Err(Error::UnexpectedError(
"Failed to publish at least one subscription!".into(),
))
}
}
}
fn extract_reconfig_events(events: Vec<ContractEvent>) -> Vec<ContractEvent> {
let new_epoch_event_key = on_chain_config::new_epoch_event_key();
events
.into_iter()
.filter(|event| *event.key() == new_epoch_event_key)
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use channel::dijets_channel::Receiver;
use dijets_crypto::{ed25519::*, PrivateKey, Uniform};
use dijets_transaction_builder::stdlib::{
encode_peer_to_peer_with_metadata_script,
encode_set_validator_config_and_reconfigure_script, encode_update_dijets_version_script,
};
use dijets_types::{
account_address::AccountAddress,
account_config::{dijets_root_address, xus_tag},
block_metadata::BlockMetadata,
contract_event::ContractEvent,
ledger_info::LedgerInfoWithSignatures,
on_chain_config::{
DijetsVersion, OnChainConfig, OnChainConfigPayload, VMConfig, ValidatorSet,
},
transaction::{Transaction, WriteSetPayload},
};
use dijets_vm::DijetsVM;
use dijetsdb::DijetsDB;
use executor::Executor;
use executor_test_helpers::{
bootstrap_genesis, gen_block_id, gen_ledger_info_with_sigs, get_test_signed_transaction,
};
use executor_types::BlockExecutor;
use futures::{future::FutureExt, stream::StreamExt};
use storage_interface::DbReaderWriter;
use subscription_service::ReconfigSubscription;
use vm_genesis::TestValidator;
// TODO(joshlind): add unit tests for general executor proxy behaviour!
// TODO(joshlind): add unit tests for subscription events.. seems like these are missing?
#[test]
fn test_pub_sub_different_subscription() {
let (subscription, mut reconfig_receiver) =
ReconfigSubscription::subscribe_all("", vec![VMConfig::CONFIG_ID], vec![]);
let (validators, mut block_executor, mut executor_proxy) =
bootstrap_genesis_and_set_subscription(subscription, &mut reconfig_receiver);
// Create a dummy prologue transaction that will bump the timer, and update the validator set
let validator_account = validators[0].data.address;
let dummy_txn = create_dummy_transaction(1, validator_account);
let reconfig_txn = create_new_update_dijets_version_transaction(1);
// Execute and commit the block
let block = vec![dummy_txn, reconfig_txn];
let (reconfig_events, _) = execute_and_commit_block(&mut block_executor, block, 1);
// Publish the on chain config updates
executor_proxy
.publish_on_chain_config_updates(reconfig_events)
.unwrap();
// Verify no reconfig notification is sent (we only subscribed to VMConfig)
assert!(reconfig_receiver
.select_next_some()
.now_or_never()
.is_none());
}
#[test]
fn test_pub_sub_drop_receiver() {
let (subscription, mut reconfig_receiver) =
ReconfigSubscription::subscribe_all("", vec![DijetsVersion::CONFIG_ID], vec![]);
let (validators, mut block_executor, mut executor_proxy) =
bootstrap_genesis_and_set_subscription(subscription, &mut reconfig_receiver);
// Create a dummy prologue transaction that will bump the timer, and update the Dijets version
let validator_account = validators[0].data.address;
let dummy_txn = create_dummy_transaction(1, validator_account);
let reconfig_txn = create_new_update_dijets_version_transaction(1);
// Execute and commit the reconfig block
let block = vec![dummy_txn, reconfig_txn];
let (reconfig_events, _) = execute_and_commit_block(&mut block_executor, block, 1);
// Drop the reconfig receiver
drop(reconfig_receiver);
// Verify publishing on-chain config updates fails due to dropped receiver
assert!(executor_proxy
.publish_on_chain_config_updates(reconfig_events)
.is_err());
}
#[test]
fn test_pub_sub_multiple_subscriptions() {
let (subscription, mut reconfig_receiver) = ReconfigSubscription::subscribe_all(
"",
vec![ValidatorSet::CONFIG_ID, DijetsVersion::CONFIG_ID],
vec![],
);
let (validators, mut block_executor, mut executor_proxy) =
bootstrap_genesis_and_set_subscription(subscription, &mut reconfig_receiver);
// Create a dummy prologue transaction that will bump the timer, and update the Dijets version
let validator_account = validators[0].data.address;
let dummy_txn = create_dummy_transaction(1, validator_account);
let reconfig_txn = create_new_update_dijets_version_transaction(1);
// Give the validator some money so it can send a rotation tx and rotate the validator's consensus key.
let money_txn = create_transfer_to_validator_transaction(validator_account, 2);
let rotation_txn = create_consensus_key_rotation_transaction(&validators[0], 0);
// Execute and commit the reconfig block
let block = vec![dummy_txn, reconfig_txn, money_txn, rotation_txn];
let (reconfig_events, _) = execute_and_commit_block(&mut block_executor, block, 1);
// Publish the on chain config updates
executor_proxy
.publish_on_chain_config_updates(reconfig_events)
.unwrap();
// Verify reconfig notification is sent
assert!(reconfig_receiver
.select_next_some()
.now_or_never()
.is_some());
}
#[test]
fn test_pub_sub_no_reconfig_events() {
let (subscription, mut reconfig_receiver) =
ReconfigSubscription::subscribe_all("", vec![DijetsVersion::CONFIG_ID], vec![]);
let (_, _, mut executor_proxy) =
bootstrap_genesis_and_set_subscription(subscription, &mut reconfig_receiver);
// Publish no on chain config updates
executor_proxy
.publish_on_chain_config_updates(vec![])
.unwrap();
// Verify no reconfig notification is sent
assert!(reconfig_receiver
.select_next_some()
.now_or_never()
.is_none());
}
#[test]
fn test_pub_sub_no_subscriptions() {
let (subscription, mut reconfig_receiver) =
ReconfigSubscription::subscribe_all("", vec![], vec![]);
let (validators, mut block_executor, mut executor_proxy) =
bootstrap_genesis_and_set_subscription(subscription, &mut reconfig_receiver);
// Create a dummy prologue transaction that will bump the timer, and update the Dijets version
let validator_account = validators[0].data.address;
let dummy_txn = create_dummy_transaction(1, validator_account);
let reconfig_txn = create_new_update_dijets_version_transaction(1);
// Execute and commit the reconfig block
let block = vec![dummy_txn, reconfig_txn];
let (reconfig_events, _) = execute_and_commit_block(&mut block_executor, block, 1);
// Publish the on chain config updates
executor_proxy
.publish_on_chain_config_updates(reconfig_events)
.unwrap();
// Verify no reconfig notification is sent
assert!(reconfig_receiver
.select_next_some()
.now_or_never()
.is_none());
}
#[test]
fn test_pub_sub_dijets_version() {
let (subscription, mut reconfig_receiver) =
ReconfigSubscription::subscribe_all("", vec![DijetsVersion::CONFIG_ID], vec![]);
let (validators, mut block_executor, mut executor_proxy) =
bootstrap_genesis_and_set_subscription(subscription, &mut reconfig_receiver);
// Create a dummy prologue transaction that will bump the timer, and update the Dijets version
let validator_account = validators[0].data.address;
let dummy_txn = create_dummy_transaction(1, validator_account);
let allowlist_txn = create_new_update_dijets_version_transaction(1);
// Execute and commit the reconfig block
let block = vec![dummy_txn, allowlist_txn];
let (reconfig_events, _) = execute_and_commit_block(&mut block_executor, block, 1);
// Publish the on chain config updates
executor_proxy
.publish_on_chain_config_updates(reconfig_events)
.unwrap();
// Verify the correct reconfig notification is sent
let payload = reconfig_receiver.select_next_some().now_or_never().unwrap();
let received_config = payload.get::<DijetsVersion>().unwrap();
assert_eq!(received_config, DijetsVersion { major: 7 });
}
#[test]
fn test_pub_sub_with_executor_proxy() {
let (subscription, mut reconfig_receiver) = ReconfigSubscription::subscribe_all(
"",
vec![ValidatorSet::CONFIG_ID, DijetsVersion::CONFIG_ID],
vec![],
);
let (validators, mut block_executor, mut executor_proxy) =
bootstrap_genesis_and_set_subscription(subscription, &mut reconfig_receiver);
// Create a dummy prologue transaction that will bump the timer and update the Dijets version
let validator_account = validators[0].data.address;
let dummy_txn_1 = create_dummy_transaction(1, validator_account);
let reconfig_txn = create_new_update_dijets_version_transaction(1);
// Execute and commit the reconfig block
let block = vec![dummy_txn_1.clone(), reconfig_txn.clone()];
let (_, ledger_info_epoch_1) = execute_and_commit_block(&mut block_executor, block, 1);
// Give the validator some money so it can send a rotation tx, create another dummy prologue
// to bump the timer and rotate the validator's consensus key.
let money_txn = create_transfer_to_validator_transaction(validator_account, 2);
let dummy_txn_2 = create_dummy_transaction(2, validator_account);
let rotation_txn = create_consensus_key_rotation_transaction(&validators[0], 0);
// Execute and commit the reconfig block
let block = vec![money_txn.clone(), dummy_txn_2.clone(), rotation_txn.clone()];
let (_, ledger_info_epoch_2) = execute_and_commit_block(&mut block_executor, block, 2);
// Grab the first two executed transactions and verify responses
let txns = executor_proxy.get_chunk(0, 2, 2).unwrap();
assert_eq!(txns.transactions, vec![dummy_txn_1, reconfig_txn]);
assert!(executor_proxy
.execute_chunk(txns, ledger_info_epoch_1.clone(), None)
.is_ok());
assert_eq!(
ledger_info_epoch_1,
executor_proxy.get_epoch_change_ledger_info(1).unwrap()
);
assert_eq!(
ledger_info_epoch_1,
executor_proxy.get_epoch_ending_ledger_info(2).unwrap()
);
// Grab the next two executed transactions (forced by limit) and verify responses
let txns = executor_proxy.get_chunk(2, 2, 5).unwrap();
assert_eq!(txns.transactions, vec![money_txn, dummy_txn_2]);
executor_proxy.get_epoch_ending_ledger_info(4).unwrap_err();
// Grab the last transaction and verify responses
let txns = executor_proxy.get_chunk(4, 1, 5).unwrap();
assert_eq!(txns.transactions, vec![rotation_txn]);
assert!(executor_proxy
.execute_chunk(txns, ledger_info_epoch_2.clone(), None)
.is_ok());
assert_eq!(
ledger_info_epoch_2,
executor_proxy.get_epoch_change_ledger_info(2).unwrap()
);
assert_eq!(
ledger_info_epoch_2,
executor_proxy.get_epoch_ending_ledger_info(5).unwrap()
);
}
#[test]
fn test_pub_sub_with_executor_sync_state() {
let (subscription, mut reconfig_receiver) = ReconfigSubscription::subscribe_all(
"",
vec![ValidatorSet::CONFIG_ID, DijetsVersion::CONFIG_ID],
vec![],
);
let (validators, mut block_executor, executor_proxy) =
bootstrap_genesis_and_set_subscription(subscription, &mut reconfig_receiver);
// Create a dummy prologue transaction that will bump the timer and update the Dijets version
let validator_account = validators[0].data.address;
let dummy_txn = create_dummy_transaction(1, validator_account);
let reconfig_txn = create_new_update_dijets_version_transaction(1);
// Execute and commit the reconfig block
let block = vec![dummy_txn, reconfig_txn];
let _ = execute_and_commit_block(&mut block_executor, block, 1);
// Verify executor proxy sync state
let sync_state = executor_proxy.get_local_storage_state().unwrap();
assert_eq!(sync_state.trusted_epoch(), 2); // 1 reconfiguration has occurred, trusted = next
assert_eq!(sync_state.committed_version(), 2); // 2 transactions have committed
assert_eq!(sync_state.synced_version(), 2); // 2 transactions have synced
// Give the validator some money so it can send a rotation tx, create another dummy prologue
// to bump the timer and rotate the validator's consensus key.
let money_txn = create_transfer_to_validator_transaction(validator_account, 2);
let dummy_txn = create_dummy_transaction(2, validator_account);
let rotation_txn = create_consensus_key_rotation_transaction(&validators[0], 0);
// Execute and commit the reconfig block
let block = vec![money_txn, dummy_txn, rotation_txn];
let _ = execute_and_commit_block(&mut block_executor, block, 2);
// Verify executor proxy sync state
let sync_state = executor_proxy.get_local_storage_state().unwrap();
assert_eq!(sync_state.trusted_epoch(), 3); // 2 reconfigurations have occurred, trusted = next
assert_eq!(sync_state.committed_version(), 5); // 5 transactions have committed
assert_eq!(sync_state.synced_version(), 5); // 5 transactions have synced
}
/// Executes a genesis transaction, creates the executor proxy and sets the given reconfig
/// subscription.
fn bootstrap_genesis_and_set_subscription(
subscription: ReconfigSubscription,
reconfig_receiver: &mut Receiver<(), OnChainConfigPayload>,
) -> (Vec<TestValidator>, Box<Executor<DijetsVM>>, ExecutorProxy) {
// Generate a genesis change set
let (genesis, validators) = vm_genesis::test_genesis_change_set_and_validators(Some(1));
// Create test dijets database
let db_path = dijets_temppath::TempPath::new();
db_path.create_as_dir().unwrap();
let (db, db_rw) = DbReaderWriter::wrap(DijetsDB::new_for_test(db_path.path()));
// Boostrap the genesis transaction
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis));
bootstrap_genesis::<DijetsVM>(&db_rw, &genesis_txn).unwrap();
// Create executor proxy with given subscription
let block_executor = Box::new(Executor::<DijetsVM>::new(db_rw.clone()));
let chunk_executor = Box::new(Executor::<DijetsVM>::new(db_rw));
let executor_proxy = ExecutorProxy::new(db, chunk_executor, vec![subscription]);
// Verify initial reconfiguration notification is sent
assert!(
reconfig_receiver
.select_next_some()
.now_or_never()
.is_some(),
"Expected an initial reconfig notification on executor proxy creation!",
);
(validators, block_executor, executor_proxy)
}
/// Creates a transaction that rotates the consensus key of the given validator account.
fn create_consensus_key_rotation_transaction(
validator: &TestValidator,
sequence_number: u64,
) -> Transaction {
let operator_key = validator.key.clone();
let operator_public_key = operator_key.public_key();
let operator_account = validator.data.operator_address;
let new_consensus_key = Ed25519PrivateKey::generate_for_testing().public_key();
get_test_signed_transaction(
operator_account,
sequence_number,
operator_key,
operator_public_key,
Some(encode_set_validator_config_and_reconfigure_script(
validator.data.address,
new_consensus_key.to_bytes().to_vec(),
Vec::new(),
Vec::new(),
)),
)
}
/// Creates a dummy transaction (useful for bumping the timer).
fn create_dummy_transaction(index: u8, validator_account: AccountAddress) -> Transaction {
Transaction::BlockMetadata(BlockMetadata::new(
gen_block_id(index),
index as u64,
(index as u64 + 1) * 100000010,
vec![],
validator_account,
))
}
/// Creates a transaction that creates a reconfiguration event by changing the Dijets version
fn create_new_update_dijets_version_transaction(sequence_number: u64) -> Transaction {
let genesis_key = vm_genesis::GENESIS_KEYPAIR.0.clone();
get_test_signed_transaction(
dijets_root_address(),
sequence_number,
genesis_key.clone(),
genesis_key.public_key(),
Some(encode_update_dijets_version_script(
0, 7, // version
)),
)
}
/// Creates a transaction that sends funds to the specified validator account.
fn create_transfer_to_validator_transaction(
validator_account: AccountAddress,
sequence_number: u64,
) -> Transaction {
let genesis_key = vm_genesis::GENESIS_KEYPAIR.0.clone();
get_test_signed_transaction(
dijets_root_address(),
sequence_number,
genesis_key.clone(),
genesis_key.public_key(),
Some(encode_peer_to_peer_with_metadata_script(
xus_tag(),
validator_account,
1_000_000,
vec![],
vec![],
)),
)
}
/// Executes and commits a given block that will cause a reconfiguration event.
fn execute_and_commit_block(
block_executor: &mut Box<Executor<DijetsVM>>,
block: Vec<Transaction>,
block_id: u8,
) -> (Vec<ContractEvent>, LedgerInfoWithSignatures) {
let block_hash = gen_block_id(block_id);
// Execute block
let output = block_executor
.execute_block((block_hash, block), block_executor.committed_block_id())
.expect("Failed to execute block!");
assert!(
output.has_reconfiguration(),
"Block execution is missing a reconfiguration!"
);
// Commit block
let ledger_info_with_sigs =
gen_ledger_info_with_sigs(block_id.into(), &output, block_hash, vec![]);
block_executor
.commit_blocks(vec![block_hash], ledger_info_with_sigs.clone())
.unwrap();
(output.reconfig_events().to_vec(), ledger_info_with_sigs)
}
}
| 40.862205 | 111 | 0.633523 |
4be5aed202baad9ac8580bcf2b887a44aa97f26d
| 340 |
use dyn_fmt::AsStrFormatExt;
use setup::{assert_css, parse};
#[path = "../setup/mod.rs"]
mod setup;
const TEMPLATE: &str = r#"
.name {{
direction: {};
}}"#;
#[test]
pub fn keyword() {
for value in ["ltr", "rtl"].iter() {
let css = &TEMPLATE.format(&[&value]);
let (stylesheet, _) = parse(css);
assert_css(&stylesheet, css);
}
}
| 17 | 40 | 0.602941 |
5d43c1f7eeabc1185cadcbdd34003d4fa0138cef
| 25,092 |
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use lazy_static::lazy_static;
use prometheus::*;
use prometheus_static_metric::*;
make_auto_flush_static_metric! {
pub label_enum PerfContextType {
write_wal_time,
write_delay_time,
write_scheduling_flushes_compactions_time,
db_condition_wait_nanos,
write_memtable_time,
pre_and_post_process,
write_thread_wait,
db_mutex_lock_nanos,
}
pub label_enum ProposalType {
all,
local_read,
read_index,
unsafe_read_index,
normal,
transfer_leader,
conf_change,
batch,
}
pub label_enum AdminCmdType {
conf_change,
add_peer,
remove_peer,
add_learner,
batch_split : "batch-split",
prepare_merge,
commit_merge,
rollback_merge,
compact,
}
pub label_enum AdminCmdStatus {
reject_unsafe,
all,
success,
}
pub label_enum RaftReadyType {
message,
commit,
append,
snapshot,
pending_region,
has_ready_region,
}
pub label_enum MessageCounterType {
append,
append_resp,
prevote,
prevote_resp,
vote,
vote_resp,
snapshot,
heartbeat,
heartbeat_resp,
transfer_leader,
timeout_now,
read_index,
read_index_resp,
}
pub label_enum RaftDroppedMessage {
mismatch_store_id,
mismatch_region_epoch,
stale_msg,
region_overlap,
region_no_peer,
region_tombstone_peer,
region_nonexistent,
applying_snap,
disk_full,
}
pub label_enum SnapValidationType {
stale,
decode,
epoch,
}
pub label_enum RegionHashType {
verify,
compute,
}
pub label_enum RegionHashResult {
miss,
matched,
all,
failed,
}
pub label_enum CfNames {
default,
lock,
write,
raft,
ver_default,
}
pub label_enum RaftEntryType {
hit,
miss
}
pub label_enum RaftInvalidProposal {
mismatch_store_id,
region_not_found,
not_leader,
mismatch_peer_id,
stale_command,
epoch_not_match,
read_index_no_leader,
region_not_initialized,
is_applying_snapshot,
}
pub label_enum RaftEventDurationType {
compact_check,
pd_store_heartbeat,
snap_gc,
compact_lock_cf,
consistency_check,
cleanup_import_sst,
raft_engine_purge,
}
pub label_enum CompactionGuardAction {
init,
init_failure,
partition,
skip_partition,
}
pub label_enum SendStatus {
accept,
drop,
}
pub label_enum RaftLogGcSkippedReason {
reserve_log,
compact_idx_too_small,
threshold_limit,
}
pub struct RaftEventDuration : LocalHistogram {
"type" => RaftEventDurationType
}
pub struct RaftInvalidProposalCount : LocalIntCounter {
"type" => RaftInvalidProposal
}
pub struct RaftEntryFetches : LocalIntCounter {
"type" => RaftEntryType
}
pub struct SnapCf : LocalHistogram {
"type" => CfNames,
}
pub struct SnapCfSize : LocalHistogram {
"type" => CfNames,
}
pub struct RegionHashCounter: LocalIntCounter {
"type" => RegionHashType,
"result" => RegionHashResult,
}
pub struct ProposalVec: LocalIntCounter {
"type" => ProposalType,
}
pub struct AdminCmdVec : LocalIntCounter {
"type" => AdminCmdType,
"status" => AdminCmdStatus,
}
pub struct RaftReadyVec : LocalIntCounter {
"type" => RaftReadyType,
}
pub struct MessageCounterVec : LocalIntCounter {
"type" => MessageCounterType,
"status" => SendStatus,
}
pub struct RaftDropedVec : LocalIntCounter {
"type" => RaftDroppedMessage,
}
pub struct SnapValidVec : LocalIntCounter {
"type" => SnapValidationType
}
pub struct PerfContextTimeDuration : LocalHistogram {
"type" => PerfContextType
}
pub struct CompactionGuardActionVec: LocalIntCounter {
"cf" => CfNames,
"type" => CompactionGuardAction,
}
pub struct RaftLogGcSkippedVec: LocalIntCounter {
"reason" => RaftLogGcSkippedReason,
}
}
make_static_metric! {
pub struct HibernatedPeerStateGauge: IntGauge {
"state" => {
awaken,
hibernated,
},
}
}
lazy_static! {
pub static ref STORE_TIME_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_duration_secs",
"Bucketed histogram of store time duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref APPLY_TIME_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_apply_duration_secs",
"Bucketed histogram of apply time duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WRITE_TASK_WAIT_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_write_task_wait_duration_secs",
"Bucketed histogram of store write task wait time duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WRITE_HANDLE_MSG_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_write_handle_msg_duration_secs",
"Bucketed histogram of handle store write msg duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WRITE_TRIGGER_SIZE_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_write_trigger_wb_bytes",
"Bucketed histogram of store write task size of raft writebatch.",
exponential_buckets(8.0, 2.0, 24).unwrap()
).unwrap();
pub static ref STORE_WRITE_KVDB_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_write_kvdb_duration_seconds",
"Bucketed histogram of store write kv db duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WRITE_RAFTDB_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_append_log_duration_seconds",
"Bucketed histogram of peer appending log duration.",
exponential_buckets(0.0005, 2.0, 20).unwrap()
).unwrap();
pub static ref STORE_WRITE_SEND_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_write_send_duration_seconds",
"Bucketed histogram of sending msg duration after writing db.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WRITE_CALLBACK_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_write_callback_duration_seconds",
"Bucketed histogram of sending callback to store thread duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WRITE_LOOP_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_write_loop_duration_seconds",
"Bucketed histogram of store write loop duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WRITE_MSG_BLOCK_WAIT_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_write_msg_block_wait_duration_seconds",
"Bucketed histogram of write msg block wait duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
/// Waterfall Metrics
pub static ref STORE_WF_BATCH_WAIT_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_wf_batch_wait_duration_seconds",
"Bucketed histogram of proposals' wait batch duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WF_SEND_TO_QUEUE_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_wf_send_to_queue_duration_seconds",
"Bucketed histogram of proposals' send to write queue duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WF_BEFORE_WRITE_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_wf_before_write_duration_seconds",
"Bucketed histogram of proposals' before write duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WF_WRITE_KVDB_END_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_wf_write_kvdb_end_duration_seconds",
"Bucketed histogram of proposals' write kv db end duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WF_WRITE_END_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_wf_write_end_duration_seconds",
"Bucketed histogram of proposals' write db end duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WF_PERSIST_LOG_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_wf_persist_duration_seconds",
"Bucketed histogram of proposals' persist duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WF_COMMIT_LOG_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_wf_commit_log_duration_seconds",
"Bucketed histogram of proposals' commit and persist duration.",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref STORE_WF_COMMIT_NOT_PERSIST_LOG_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_store_wf_commit_not_persist_log_duration_seconds",
"Bucketed histogram of proposals' commit but not persist duration",
exponential_buckets(0.00001, 2.0, 26).unwrap()
).unwrap();
pub static ref PEER_PROPOSAL_COUNTER_VEC: IntCounterVec =
register_int_counter_vec!(
"tikv_raftstore_proposal_total",
"Total number of proposal made.",
&["type"]
).unwrap();
pub static ref PEER_PROPOSAL_COUNTER: ProposalVec =
auto_flush_from!(PEER_PROPOSAL_COUNTER_VEC, ProposalVec);
pub static ref PEER_ADMIN_CMD_COUNTER_VEC: IntCounterVec =
register_int_counter_vec!(
"tikv_raftstore_admin_cmd_total",
"Total number of admin cmd processed.",
&["type", "status"]
).unwrap();
pub static ref PEER_ADMIN_CMD_COUNTER: AdminCmdVec =
auto_flush_from!(PEER_ADMIN_CMD_COUNTER_VEC, AdminCmdVec);
pub static ref CHECK_LEADER_DURATION_HISTOGRAM: Histogram =
register_histogram!(
"tikv_resolved_ts_check_leader_duration_seconds",
"Bucketed histogram of handling check leader request duration",
exponential_buckets(0.005, 2.0, 20).unwrap()
).unwrap();
pub static ref PEER_COMMIT_LOG_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_commit_log_duration_seconds",
"Bucketed histogram of peer commits logs duration.",
exponential_buckets(0.0005, 2.0, 20).unwrap()
).unwrap();
pub static ref STORE_APPLY_LOG_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_apply_log_duration_seconds",
"Bucketed histogram of peer applying log duration.",
exponential_buckets(0.0005, 2.0, 20).unwrap()
).unwrap();
pub static ref APPLY_TASK_WAIT_TIME_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_apply_wait_time_duration_secs",
"Bucketed histogram of apply task wait time duration.",
exponential_buckets(0.0005, 2.0, 20).unwrap()
).unwrap();
pub static ref STORE_RAFT_READY_COUNTER_VEC: IntCounterVec =
register_int_counter_vec!(
"tikv_raftstore_raft_ready_handled_total",
"Total number of raft ready handled.",
&["type"]
).unwrap();
pub static ref STORE_RAFT_READY_COUNTER: RaftReadyVec =
auto_flush_from!(STORE_RAFT_READY_COUNTER_VEC, RaftReadyVec);
pub static ref STORE_RAFT_SENT_MESSAGE_COUNTER_VEC: IntCounterVec =
register_int_counter_vec!(
"tikv_raftstore_raft_sent_message_total",
"Total number of raft ready sent messages.",
&["type", "status"]
).unwrap();
pub static ref STORE_RAFT_SENT_MESSAGE_COUNTER: MessageCounterVec =
auto_flush_from!(STORE_RAFT_SENT_MESSAGE_COUNTER_VEC, MessageCounterVec);
pub static ref STORE_RAFT_DROPPED_MESSAGE_COUNTER_VEC: IntCounterVec =
register_int_counter_vec!(
"tikv_raftstore_raft_dropped_message_total",
"Total number of raft dropped messages.",
&["type"]
).unwrap();
pub static ref STORE_RAFT_DROPPED_MESSAGE_COUNTER: RaftDropedVec =
auto_flush_from!(STORE_RAFT_DROPPED_MESSAGE_COUNTER_VEC, RaftDropedVec);
pub static ref STORE_SNAPSHOT_TRAFFIC_GAUGE_VEC: IntGaugeVec =
register_int_gauge_vec!(
"tikv_raftstore_snapshot_traffic_total",
"Total number of raftstore snapshot traffic.",
&["type"]
).unwrap();
pub static ref STORE_SNAPSHOT_VALIDATION_FAILURE_COUNTER_VEC: IntCounterVec =
register_int_counter_vec!(
"tikv_raftstore_snapshot_validation_failure_total",
"Total number of raftstore snapshot validation failure.",
&["type"]
).unwrap();
pub static ref STORE_SNAPSHOT_VALIDATION_FAILURE_COUNTER: SnapValidVec =
auto_flush_from!(STORE_SNAPSHOT_VALIDATION_FAILURE_COUNTER_VEC, SnapValidVec);
pub static ref PEER_RAFT_PROCESS_DURATION: HistogramVec =
register_histogram_vec!(
"tikv_raftstore_raft_process_duration_secs",
"Bucketed histogram of peer processing raft duration.",
&["type"],
exponential_buckets(0.0005, 2.0, 20).unwrap()
).unwrap();
pub static ref PEER_PROPOSE_LOG_SIZE_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_propose_log_size",
"Bucketed histogram of peer proposing log size.",
exponential_buckets(8.0, 2.0, 22).unwrap()
).unwrap();
pub static ref REGION_HASH_COUNTER_VEC: IntCounterVec =
register_int_counter_vec!(
"tikv_raftstore_hash_total",
"Total number of hash has been computed.",
&["type", "result"]
).unwrap();
pub static ref REGION_HASH_COUNTER: RegionHashCounter =
auto_flush_from!(REGION_HASH_COUNTER_VEC, RegionHashCounter);
pub static ref REGION_MAX_LOG_LAG: Histogram =
register_histogram!(
"tikv_raftstore_log_lag",
"Bucketed histogram of log lag in a region.",
vec![2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0,
512.0, 1024.0, 5120.0, 10240.0]
).unwrap();
pub static ref REQUEST_WAIT_TIME_HISTOGRAM: Histogram =
register_histogram!(
"tikv_raftstore_request_wait_time_duration_secs",
"Bucketed histogram of request wait time duration.",
exponential_buckets(0.0005, 2.0, 20).unwrap()
).unwrap();
pub static ref PEER_GC_RAFT_LOG_COUNTER: IntCounter =
register_int_counter!(
"tikv_raftstore_gc_raft_log_total",
"Total number of GC raft log."
).unwrap();
pub static ref UPDATE_REGION_SIZE_BY_COMPACTION_COUNTER: IntCounter =
register_int_counter!(
"update_region_size_count_by_compaction",
"Total number of update region size caused by compaction."
).unwrap();
pub static ref COMPACTION_RELATED_REGION_COUNT: HistogramVec =
register_histogram_vec!(
"compaction_related_region_count",
"Associated number of regions for each compaction job.",
&["output_level"],
exponential_buckets(1.0, 2.0, 20).unwrap()
).unwrap();
pub static ref COMPACTION_DECLINED_BYTES: HistogramVec =
register_histogram_vec!(
"compaction_declined_bytes",
"Total bytes declined for each compaction job.",
&["output_level"],
exponential_buckets(1024.0, 2.0, 30).unwrap()
).unwrap();
pub static ref SNAPSHOT_CF_KV_COUNT_VEC: HistogramVec =
register_histogram_vec!(
"tikv_snapshot_cf_kv_count",
"Total number of kv in each cf file of snapshot.",
&["type"],
exponential_buckets(100.0, 2.0, 20).unwrap()
).unwrap();
pub static ref SNAPSHOT_CF_KV_COUNT: SnapCf =
auto_flush_from!(SNAPSHOT_CF_KV_COUNT_VEC, SnapCf);
pub static ref SNAPSHOT_CF_SIZE_VEC: HistogramVec =
register_histogram_vec!(
"tikv_snapshot_cf_size",
"Total size of each cf file of snapshot.",
&["type"],
exponential_buckets(1024.0, 2.0, 31).unwrap()
).unwrap();
pub static ref SNAPSHOT_CF_SIZE: SnapCfSize =
auto_flush_from!(SNAPSHOT_CF_SIZE_VEC, SnapCfSize);
pub static ref SNAPSHOT_BUILD_TIME_HISTOGRAM: Histogram =
register_histogram!(
"tikv_snapshot_build_time_duration_secs",
"Bucketed histogram of snapshot build time duration.",
exponential_buckets(0.05, 2.0, 20).unwrap()
).unwrap();
pub static ref SNAPSHOT_KV_COUNT_HISTOGRAM: Histogram =
register_histogram!(
"tikv_snapshot_kv_count",
"Total number of kv in snapshot.",
exponential_buckets(100.0, 2.0, 20).unwrap() //100,100*2^1,...100M
).unwrap();
pub static ref SNAPSHOT_SIZE_HISTOGRAM: Histogram =
register_histogram!(
"tikv_snapshot_size",
"Size of snapshot.",
exponential_buckets(1024.0, 2.0, 22).unwrap() // 1024,1024*2^1,..,4G
).unwrap();
pub static ref RAFT_ENTRY_FETCHES_VEC: IntCounterVec =
register_int_counter_vec!(
"tikv_raftstore_entry_fetches",
"Total number of raft entry fetches.",
&["type"]
).unwrap();
pub static ref RAFT_ENTRY_FETCHES: RaftEntryFetches =
auto_flush_from!(RAFT_ENTRY_FETCHES_VEC, RaftEntryFetches);
pub static ref LEADER_MISSING: IntGauge =
register_int_gauge!(
"tikv_raftstore_leader_missing",
"Total number of leader missed region."
).unwrap();
pub static ref INGEST_SST_DURATION_SECONDS: Histogram =
register_histogram!(
"tikv_snapshot_ingest_sst_duration_seconds",
"Bucketed histogram of rocksdb ingestion durations.",
exponential_buckets(0.005, 2.0, 20).unwrap()
).unwrap();
pub static ref RAFT_INVALID_PROPOSAL_COUNTER_VEC: IntCounterVec =
register_int_counter_vec!(
"tikv_raftstore_raft_invalid_proposal_total",
"Total number of raft invalid proposal.",
&["type"]
).unwrap();
pub static ref RAFT_INVALID_PROPOSAL_COUNTER: RaftInvalidProposalCount =
auto_flush_from!(RAFT_INVALID_PROPOSAL_COUNTER_VEC, RaftInvalidProposalCount);
pub static ref RAFT_EVENT_DURATION_VEC: HistogramVec =
register_histogram_vec!(
"tikv_raftstore_event_duration",
"Duration of raft store events.",
&["type"],
exponential_buckets(0.001, 1.59, 20).unwrap() // max 10s
).unwrap();
pub static ref RAFT_EVENT_DURATION: RaftEventDuration =
auto_flush_from!(RAFT_EVENT_DURATION_VEC, RaftEventDuration);
pub static ref RAFT_READ_INDEX_PENDING_DURATION: Histogram =
register_histogram!(
"tikv_raftstore_read_index_pending_duration",
"Duration of pending read index.",
exponential_buckets(0.001, 2.0, 20).unwrap() // max 1000s
).unwrap();
pub static ref RAFT_READ_INDEX_PENDING_COUNT: IntGauge =
register_int_gauge!(
"tikv_raftstore_read_index_pending",
"Pending read index count."
).unwrap();
pub static ref APPLY_PERF_CONTEXT_TIME_HISTOGRAM: HistogramVec =
register_histogram_vec!(
"tikv_raftstore_apply_perf_context_time_duration_secs",
"Bucketed histogram of request wait time duration.",
&["type"],
exponential_buckets(0.0005, 2.0, 20).unwrap()
).unwrap();
pub static ref STORE_PERF_CONTEXT_TIME_HISTOGRAM: HistogramVec =
register_histogram_vec!(
"tikv_raftstore_store_perf_context_time_duration_secs",
"Bucketed histogram of request wait time duration.",
&["type"],
exponential_buckets(0.0005, 2.0, 20).unwrap()
).unwrap();
pub static ref APPLY_PERF_CONTEXT_TIME_HISTOGRAM_STATIC: PerfContextTimeDuration=
auto_flush_from!(APPLY_PERF_CONTEXT_TIME_HISTOGRAM, PerfContextTimeDuration);
pub static ref STORE_PERF_CONTEXT_TIME_HISTOGRAM_STATIC: PerfContextTimeDuration=
auto_flush_from!(STORE_PERF_CONTEXT_TIME_HISTOGRAM, PerfContextTimeDuration);
pub static ref READ_QPS_TOPN: GaugeVec =
register_gauge_vec!(
"tikv_read_qps_topn",
"Collect topN of read qps.",
&["order"]
).unwrap();
pub static ref LOAD_BASE_SPLIT_EVENT: IntCounterVec =
register_int_counter_vec!(
"tikv_load_base_split_event",
"Load base split event.",
&["type"]
).unwrap();
pub static ref RAFT_ENTRIES_CACHES_GAUGE: IntGauge = register_int_gauge!(
"tikv_raft_entries_caches",
"Total memory size of raft entries caches."
).unwrap();
pub static ref RAFT_ENTRIES_EVICT_BYTES: IntCounter = register_int_counter!(
"tikv_raft_entries_evict_bytes",
"Cache evict bytes."
).unwrap();
pub static ref COMPACTION_GUARD_ACTION_COUNTER_VEC: IntCounterVec =
register_int_counter_vec!(
"tikv_raftstore_compaction_guard_action_total",
"Total number of compaction guard actions.",
&["cf", "type"]
).unwrap();
pub static ref COMPACTION_GUARD_ACTION_COUNTER: CompactionGuardActionVec =
auto_flush_from!(COMPACTION_GUARD_ACTION_COUNTER_VEC, CompactionGuardActionVec);
pub static ref RAFT_PEER_PENDING_DURATION: Histogram =
register_histogram!(
"tikv_raftstore_peer_pending_duration_seconds",
"Bucketed histogram of region peer pending duration.",
exponential_buckets(0.1, 1.5, 30).unwrap() // 0.1s ~ 5.3 hours
).unwrap();
pub static ref HIBERNATED_PEER_STATE_GAUGE: HibernatedPeerStateGauge = register_static_int_gauge_vec!(
HibernatedPeerStateGauge,
"tikv_raftstore_hibernated_peer_state",
"Number of peers in hibernated state.",
&["state"],
).unwrap();
pub static ref STORE_IO_RESCHEDULE_PEER_TOTAL_GAUGE: IntGauge = register_int_gauge!(
"tikv_raftstore_io_reschedule_region_total",
"Total number of io rescheduling peers"
).unwrap();
pub static ref STORE_IO_RESCHEDULE_PENDING_TASKS_TOTAL_GAUGE: IntGauge = register_int_gauge!(
"tikv_raftstore_io_reschedule_pending_tasks_total",
"Total number of pending write tasks from io rescheduling peers"
).unwrap();
pub static ref STORE_INSPECT_DURTION_HISTOGRAM: HistogramVec =
register_histogram_vec!(
"tikv_raftstore_inspect_duration_seconds",
"Bucketed histogram of inspect duration.",
&["type"],
exponential_buckets(0.0005, 2.0, 20).unwrap()
).unwrap();
pub static ref STORE_SLOW_SCORE_GAUGE: Gauge =
register_gauge!("tikv_raftstore_slow_score", "Slow score of the store.").unwrap();
pub static ref RAFT_LOG_GC_SKIPPED_VEC: IntCounterVec = register_int_counter_vec!(
"tikv_raftstore_raft_log_gc_skipped",
"Total number of skipped raft log gc.",
&["reason"]
)
.unwrap();
pub static ref RAFT_LOG_GC_SKIPPED: RaftLogGcSkippedVec =
auto_flush_from!(RAFT_LOG_GC_SKIPPED_VEC, RaftLogGcSkippedVec);
}
| 36.47093 | 106 | 0.652638 |
67a9b10b504ca18652cb5c31825a51b5e06bdc29
| 3,851 |
// cominterfaces.rs - MIT License
// MIT License
// Copyright (c) 2018 Tyler Laing (ZerothLaw)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//.Net Framework 4.7.2 Reference Source - mscorlib { system.cominterfaces.cs }
//RIDL!{#[uuid()]}
//"([\w\d]{8})-([\w\d]{4})-([\w\d]{4})-([\w\d]{2})([\w\d]{2})-([\w\d]{2})([\w\d]{2})([\w\d]{2})([\w\d]{2})([\w\d]{2})([\w\d]{2})"
//0x$1, 0x$2, 0x$3, 0x$4, 0x$5, 0x$6, 0x$7, 0x$8, 0x$9, 0x$10, 0x$11
use winapi::shared::guiddef::REFIID;
use winapi::shared::minwindef::{WORD,UINT};
use winapi::shared::winerror::HRESULT;
use winapi::um::unknwnbase::{IUnknown, IUnknownVtbl};
use crate::system::intptr::IntPtr;
RIDL!{#[uuid(0x03973551, 0x57A1, 0x3900, 0xA2, 0xB5, 0x90, 0x83, 0xE3, 0xFF, 0x29, 0x43)]
interface _Activator(_ActivatorVtbl): IUnknown(IUnknown){
fn GetTypeInfoCount(
pcTInfo: *mut UINT,
) -> HRESULT,
fn GetTypeInfo(
iTInfo: UINT,
lcid: UINT,
ppTInfo: IntPtr,
) -> HRESULT,
fn GetIDsOfNames(
riid: REFIID,
rgszNames: IntPtr,
cNames: UINT,
lcid: UINT,
rgDispId: IntPtr,
) -> HRESULT,
fn Invoke(
dispIdMember: UINT,
riid: REFIID,
wFlags: WORD,
pDispParams: IntPtr,
pVarResult: IntPtr,
pExcepInfo: IntPtr,
puArgError: IntPtr,
) -> HRESULT,
}}
RIDL!{#[uuid(0x917B14D0, 0x2D9E, 0x38B8, 0x92, 0xA9, 0x38, 0x1A, 0xCF, 0x52, 0xF7, 0xC0)]
interface _Attribute(_AttributeVtbl): IUnknown(IUnknownVtbl){
fn GetTypeInfoCount(
pcTInfo: *mut UINT,
) -> HRESULT,
fn GetTypeInfo(
iTInfo: UINT,
lcid: UINT,
ppTInfo: IntPtr,
) -> HRESULT,
fn GetIDsOfNames(
riid: REFIID,
rgszNames: IntPtr,
cNames: UINT,
lcid: UINT,
rgDispId: IntPtr,
) -> HRESULT,
fn Invoke(
dispIdMember: UINT,
riid: REFIID,
wFlags: WORD,
pDispParams: IntPtr,
pVarResult: IntPtr,
pExcepInfo: IntPtr,
puArgError: IntPtr,
) -> HRESULT,
}}
RIDL!{#[uuid(0xC281C7F1, 0x4AA9, 0x3517, 0x96, 0x1A, 0x46, 0x3C, 0xFE, 0xD5, 0x7E, 0x75)]
interface _Thread(_ThreadVtbl): IUnknown(IUnknownVtbl){
fn GetTypeInfoCount(
pcTInfo: *mut UINT,
) -> HRESULT,
fn GetTypeInfo(
iTInfo: UINT,
lcid: UINT,
ppTInfo: IntPtr,
) -> HRESULT,
fn GetIDsOfNames(
riid: REFIID,
rgszNames: IntPtr,
cNames: UINT,
lcid: UINT,
rgDispId: IntPtr,
) -> HRESULT,
fn Invoke(
dispIdMember: UINT,
riid: REFIID,
wFlags: WORD,
pDispParams: IntPtr,
pVarResult: IntPtr,
pExcepInfo: IntPtr,
puArgError: IntPtr,
) -> HRESULT,
}}
| 32.091667 | 129 | 0.616983 |
f99a3942e39ec90eb166ca92ae4c9c9715a0d91e
| 15,054 |
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
impl super::ISR {
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
}
#[doc = "Possible values of the field `TEIF7`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TEIF7R {
#[doc = "No transfer error"]
NOERROR,
#[doc = "A transfer error has occured"]
ERROR,
}
impl TEIF7R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
TEIF7R::NOERROR => false,
TEIF7R::ERROR => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> TEIF7R {
match value {
false => TEIF7R::NOERROR,
true => TEIF7R::ERROR,
}
}
#[doc = "Checks if the value of the field is `NOERROR`"]
#[inline]
pub fn is_no_error(&self) -> bool {
*self == TEIF7R::NOERROR
}
#[doc = "Checks if the value of the field is `ERROR`"]
#[inline]
pub fn is_error(&self) -> bool {
*self == TEIF7R::ERROR
}
}
#[doc = "Possible values of the field `HTIF7`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum HTIF7R {
#[doc = "No half transfer event"]
NOTHALT,
#[doc = "A half transfer event has occured"]
HALF,
}
impl HTIF7R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
HTIF7R::NOTHALT => false,
HTIF7R::HALF => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> HTIF7R {
match value {
false => HTIF7R::NOTHALT,
true => HTIF7R::HALF,
}
}
#[doc = "Checks if the value of the field is `NOTHALT`"]
#[inline]
pub fn is_not_halt(&self) -> bool {
*self == HTIF7R::NOTHALT
}
#[doc = "Checks if the value of the field is `HALF`"]
#[inline]
pub fn is_half(&self) -> bool {
*self == HTIF7R::HALF
}
}
#[doc = "Possible values of the field `TCIF7`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TCIF7R {
#[doc = "No transfer complete event"]
NOTCOMPLETE,
#[doc = "A transfer complete event has occured"]
COMPLETE,
}
impl TCIF7R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
TCIF7R::NOTCOMPLETE => false,
TCIF7R::COMPLETE => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> TCIF7R {
match value {
false => TCIF7R::NOTCOMPLETE,
true => TCIF7R::COMPLETE,
}
}
#[doc = "Checks if the value of the field is `NOTCOMPLETE`"]
#[inline]
pub fn is_not_complete(&self) -> bool {
*self == TCIF7R::NOTCOMPLETE
}
#[doc = "Checks if the value of the field is `COMPLETE`"]
#[inline]
pub fn is_complete(&self) -> bool {
*self == TCIF7R::COMPLETE
}
}
#[doc = "Possible values of the field `GIF7`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum GIF7R {
#[doc = "No transfer error, half event, complete event"]
NOEVENT,
#[doc = "A transfer error, half event or complete event has occured"]
EVENT,
}
impl GIF7R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
GIF7R::NOEVENT => false,
GIF7R::EVENT => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> GIF7R {
match value {
false => GIF7R::NOEVENT,
true => GIF7R::EVENT,
}
}
#[doc = "Checks if the value of the field is `NOEVENT`"]
#[inline]
pub fn is_no_event(&self) -> bool {
*self == GIF7R::NOEVENT
}
#[doc = "Checks if the value of the field is `EVENT`"]
#[inline]
pub fn is_event(&self) -> bool {
*self == GIF7R::EVENT
}
}
#[doc = "Possible values of the field `TEIF6`"]
pub type TEIF6R = TEIF7R;
#[doc = "Possible values of the field `HTIF6`"]
pub type HTIF6R = HTIF7R;
#[doc = "Possible values of the field `TCIF6`"]
pub type TCIF6R = TCIF7R;
#[doc = "Possible values of the field `GIF6`"]
pub type GIF6R = GIF7R;
#[doc = "Possible values of the field `TEIF5`"]
pub type TEIF5R = TEIF7R;
#[doc = "Possible values of the field `HTIF5`"]
pub type HTIF5R = HTIF7R;
#[doc = "Possible values of the field `TCIF5`"]
pub type TCIF5R = TCIF7R;
#[doc = "Possible values of the field `GIF5`"]
pub type GIF5R = GIF7R;
#[doc = "Possible values of the field `TEIF4`"]
pub type TEIF4R = TEIF7R;
#[doc = "Possible values of the field `HTIF4`"]
pub type HTIF4R = HTIF7R;
#[doc = "Possible values of the field `TCIF4`"]
pub type TCIF4R = TCIF7R;
#[doc = "Possible values of the field `GIF4`"]
pub type GIF4R = GIF7R;
#[doc = "Possible values of the field `TEIF3`"]
pub type TEIF3R = TEIF7R;
#[doc = "Possible values of the field `HTIF3`"]
pub type HTIF3R = HTIF7R;
#[doc = "Possible values of the field `TCIF3`"]
pub type TCIF3R = TCIF7R;
#[doc = "Possible values of the field `GIF3`"]
pub type GIF3R = GIF7R;
#[doc = "Possible values of the field `TEIF2`"]
pub type TEIF2R = TEIF7R;
#[doc = "Possible values of the field `HTIF2`"]
pub type HTIF2R = HTIF7R;
#[doc = "Possible values of the field `TCIF2`"]
pub type TCIF2R = TCIF7R;
#[doc = "Possible values of the field `GIF2`"]
pub type GIF2R = GIF7R;
#[doc = "Possible values of the field `TEIF1`"]
pub type TEIF1R = TEIF7R;
#[doc = "Possible values of the field `HTIF1`"]
pub type HTIF1R = HTIF7R;
#[doc = "Possible values of the field `TCIF1`"]
pub type TCIF1R = TCIF7R;
#[doc = "Possible values of the field `GIF1`"]
pub type GIF1R = GIF7R;
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 27 - Channel x transfer error flag (x = 1 ..7)"]
#[inline]
pub fn teif7(&self) -> TEIF7R {
TEIF7R::_from({
const MASK: bool = true;
const OFFSET: u8 = 27;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 26 - Channel x half transfer flag (x = 1 ..7)"]
#[inline]
pub fn htif7(&self) -> HTIF7R {
HTIF7R::_from({
const MASK: bool = true;
const OFFSET: u8 = 26;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 25 - Channel x transfer complete flag (x = 1 ..7)"]
#[inline]
pub fn tcif7(&self) -> TCIF7R {
TCIF7R::_from({
const MASK: bool = true;
const OFFSET: u8 = 25;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 24 - Channel x global interrupt flag (x = 1 ..7)"]
#[inline]
pub fn gif7(&self) -> GIF7R {
GIF7R::_from({
const MASK: bool = true;
const OFFSET: u8 = 24;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 23 - Channel x transfer error flag (x = 1 ..7)"]
#[inline]
pub fn teif6(&self) -> TEIF6R {
TEIF6R::_from({
const MASK: bool = true;
const OFFSET: u8 = 23;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 22 - Channel x half transfer flag (x = 1 ..7)"]
#[inline]
pub fn htif6(&self) -> HTIF6R {
HTIF6R::_from({
const MASK: bool = true;
const OFFSET: u8 = 22;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 21 - Channel x transfer complete flag (x = 1 ..7)"]
#[inline]
pub fn tcif6(&self) -> TCIF6R {
TCIF6R::_from({
const MASK: bool = true;
const OFFSET: u8 = 21;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 20 - Channel x global interrupt flag (x = 1 ..7)"]
#[inline]
pub fn gif6(&self) -> GIF6R {
GIF6R::_from({
const MASK: bool = true;
const OFFSET: u8 = 20;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 19 - Channel x transfer error flag (x = 1 ..7)"]
#[inline]
pub fn teif5(&self) -> TEIF5R {
TEIF5R::_from({
const MASK: bool = true;
const OFFSET: u8 = 19;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 18 - Channel x half transfer flag (x = 1 ..7)"]
#[inline]
pub fn htif5(&self) -> HTIF5R {
HTIF5R::_from({
const MASK: bool = true;
const OFFSET: u8 = 18;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 17 - Channel x transfer complete flag (x = 1 ..7)"]
#[inline]
pub fn tcif5(&self) -> TCIF5R {
TCIF5R::_from({
const MASK: bool = true;
const OFFSET: u8 = 17;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 16 - Channel x global interrupt flag (x = 1 ..7)"]
#[inline]
pub fn gif5(&self) -> GIF5R {
GIF5R::_from({
const MASK: bool = true;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 15 - Channel x transfer error flag (x = 1 ..7)"]
#[inline]
pub fn teif4(&self) -> TEIF4R {
TEIF4R::_from({
const MASK: bool = true;
const OFFSET: u8 = 15;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 14 - Channel x half transfer flag (x = 1 ..7)"]
#[inline]
pub fn htif4(&self) -> HTIF4R {
HTIF4R::_from({
const MASK: bool = true;
const OFFSET: u8 = 14;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 13 - Channel x transfer complete flag (x = 1 ..7)"]
#[inline]
pub fn tcif4(&self) -> TCIF4R {
TCIF4R::_from({
const MASK: bool = true;
const OFFSET: u8 = 13;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 12 - Channel x global interrupt flag (x = 1 ..7)"]
#[inline]
pub fn gif4(&self) -> GIF4R {
GIF4R::_from({
const MASK: bool = true;
const OFFSET: u8 = 12;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 11 - Channel x transfer error flag (x = 1 ..7)"]
#[inline]
pub fn teif3(&self) -> TEIF3R {
TEIF3R::_from({
const MASK: bool = true;
const OFFSET: u8 = 11;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 10 - Channel x half transfer flag (x = 1 ..7)"]
#[inline]
pub fn htif3(&self) -> HTIF3R {
HTIF3R::_from({
const MASK: bool = true;
const OFFSET: u8 = 10;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 9 - Channel x transfer complete flag (x = 1 ..7)"]
#[inline]
pub fn tcif3(&self) -> TCIF3R {
TCIF3R::_from({
const MASK: bool = true;
const OFFSET: u8 = 9;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 8 - Channel x global interrupt flag (x = 1 ..7)"]
#[inline]
pub fn gif3(&self) -> GIF3R {
GIF3R::_from({
const MASK: bool = true;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 7 - Channel x transfer error flag (x = 1 ..7)"]
#[inline]
pub fn teif2(&self) -> TEIF2R {
TEIF2R::_from({
const MASK: bool = true;
const OFFSET: u8 = 7;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 6 - Channel x half transfer flag (x = 1 ..7)"]
#[inline]
pub fn htif2(&self) -> HTIF2R {
HTIF2R::_from({
const MASK: bool = true;
const OFFSET: u8 = 6;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 5 - Channel x transfer complete flag (x = 1 ..7)"]
#[inline]
pub fn tcif2(&self) -> TCIF2R {
TCIF2R::_from({
const MASK: bool = true;
const OFFSET: u8 = 5;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 4 - Channel x global interrupt flag (x = 1 ..7)"]
#[inline]
pub fn gif2(&self) -> GIF2R {
GIF2R::_from({
const MASK: bool = true;
const OFFSET: u8 = 4;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 3 - Channel x transfer error flag (x = 1 ..7)"]
#[inline]
pub fn teif1(&self) -> TEIF1R {
TEIF1R::_from({
const MASK: bool = true;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 2 - Channel x half transfer flag (x = 1 ..7)"]
#[inline]
pub fn htif1(&self) -> HTIF1R {
HTIF1R::_from({
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 1 - Channel x transfer complete flag (x = 1 ..7)"]
#[inline]
pub fn tcif1(&self) -> TCIF1R {
TCIF1R::_from({
const MASK: bool = true;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 0 - Channel x global interrupt flag (x = 1 ..7)"]
#[inline]
pub fn gif1(&self) -> GIF1R {
GIF1R::_from({
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
}
| 29.575639 | 73 | 0.505978 |
de3f770586b113a9a5cf1236cef4c8c5d9362d6e
| 107 |
use cpp_build;
fn main() {
println!("cargo:rustc-link-lib=ngraph");
cpp_build::build("src/lib.rs");
}
| 15.285714 | 42 | 0.654206 |
62eda023eaa49115e5fa48946deeee651b541b5f
| 6,229 |
//! GFF reader and iterators.
mod lines;
mod records;
pub use self::{lines::Lines, records::Records};
use std::io::{self, BufRead};
const LINE_FEED: char = '\n';
const CARRIAGE_RETURN: char = '\r';
/// A GFF reader.
pub struct Reader<R> {
inner: R,
}
impl<R> Reader<R>
where
R: BufRead,
{
/// Creates a GFF reader.
///
/// # Examples
///
/// ```
/// use noodles_gff as gff;
/// let data = b"##gff-version 3\n";
/// let mut reader = gff::Reader::new(&data[..]);
/// ```
pub fn new(inner: R) -> Self {
Self { inner }
}
/// Returns a reference to the underlying reader.
///
/// # Examples
///
/// ```
/// use noodles_gff as gff;
///
/// let data = b"##gff-version 3\n";
/// let reader = gff::Reader::new(&data[..]);
///
/// let _ = reader.get_ref();
/// ```
pub fn get_ref(&self) -> &R {
&self.inner
}
/// Unwraps and returns the underlying reader.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use noodles_gff as gff;
///
/// let data = b"##gff-version 3
/// #format: gff3
/// ";
/// let mut reader = gff::Reader::new(&data[..]);
/// reader.read_line(&mut String::new())?;
///
/// assert_eq!(reader.into_inner(), b"#format: gff3\n");
/// # Ok::<_, io::Error>(())
/// ```
pub fn into_inner(self) -> R {
self.inner
}
/// Reads a raw GFF line.
///
/// This reads from the underlying stream until a newline is reached and appends it to the
/// given buffer, sans the final newline character. The buffer can subsequently be parsed as a
/// [`crate::Line`].
///
/// It is more ergonomic to read records using an iterator (see [`Self::lines`]), but using
/// this method allows control of the line buffer and whether the raw line should be parsed.
///
/// If successful, the number of bytes read is returned. If the number of bytes read is 0, the
/// stream reached EOF.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use noodles_gff as gff;
///
/// let data = b"##gff-version 3
/// sq0\tNOODLES\tgene\t8\t13\t.\t+\t.\tgene_id=ndls0;gene_name=gene0
/// ";
/// let mut reader = gff::Reader::new(&data[..]);
///
/// let mut buf = String::new();
/// reader.read_line(&mut buf)?;
/// assert_eq!(buf, "##gff-version 3");
/// # Ok::<_, io::Error>(())
/// ```
pub fn read_line(&mut self, buf: &mut String) -> io::Result<usize> {
read_line(&mut self.inner, buf)
}
/// Returns an iterator over lines starting from the current stream position.
///
/// When using this, the caller is responsible to stop reading at either EOF or when the
/// `FASTA` directive is read, whichever comes first.
///
/// Unlike [`Self::read_line`], each line is parsed as a [`crate::Line`].
///
/// # Examples
///
/// ```
/// # use std::io;
/// use noodles_gff as gff;
///
/// let data = b"##gff-version 3
/// sq0\tNOODLES\tgene\t8\t13\t.\t+\t.\tgene_id=ndls0;gene_name=gene0
/// ";
/// let mut reader = gff::Reader::new(&data[..]);
/// let mut lines = reader.lines();
///
/// let line = lines.next().transpose()?;
/// assert!(matches!(line, Some(gff::Line::Directive(_))));
///
/// let line = lines.next().transpose()?;
/// assert!(matches!(line, Some(gff::Line::Record(_))));
///
/// assert!(lines.next().is_none());
/// # Ok::<_, io::Error>(())
/// ```
pub fn lines(&mut self) -> Lines<'_, R> {
Lines::new(self)
}
/// Returns an iterator over records starting from the current stream position.
///
/// This filters lines for only records. It stops at either EOF or when the `FASTA` directive
/// is read, whichever comes first.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use noodles_gff as gff;
///
/// let data = b"##gff-version 3
/// sq0\tNOODLES\tgene\t8\t13\t.\t+\t.\tgene_id=ndls0;gene_name=gene0
/// ";
/// let mut reader = gff::Reader::new(&data[..]);
/// let mut records = reader.records();
///
/// assert!(records.next().transpose()?.is_some());
/// assert!(records.next().is_none());
/// # Ok::<_, io::Error>(())
/// ```
pub fn records(&mut self) -> Records<'_, R> {
Records::new(self.lines())
}
}
fn read_line<R>(reader: &mut R, buf: &mut String) -> io::Result<usize>
where
R: BufRead,
{
match reader.read_line(buf) {
Ok(0) => Ok(0),
Ok(n) => {
if buf.ends_with(LINE_FEED) {
buf.pop();
if buf.ends_with(CARRIAGE_RETURN) {
buf.pop();
}
}
Ok(n)
}
Err(e) => Err(e),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_records() -> io::Result<()> {
let data = b"\
##gff-version 3
sq0\tNOODLES\tgene\t8\t13\t.\t+\t.\tgene_id=ndls0;gene_name=gene0
";
let mut reader = Reader::new(&data[..]);
let mut n = 0;
for result in reader.records() {
let _ = result?;
n += 1;
}
assert_eq!(n, 1);
Ok(())
}
#[test]
fn test_records_with_fasta_directive() -> io::Result<()> {
let data = b"\
##gff-version 3
sq0\tNOODLES\tgene\t8\t13\t.\t+\t.\tgene_id=ndls0;gene_name=gene0
##FASTA
>sq0
ACGT
";
let mut reader = Reader::new(&data[..]);
let mut n = 0;
for result in reader.records() {
let _ = result?;
n += 1;
}
assert_eq!(n, 1);
Ok(())
}
#[test]
fn test_read_line() -> io::Result<()> {
fn t(buf: &mut String, mut reader: &[u8], expected: &str) -> io::Result<()> {
buf.clear();
read_line(&mut reader, buf)?;
assert_eq!(buf, expected);
Ok(())
}
let mut buf = String::new();
t(&mut buf, b"noodles\n", "noodles")?;
t(&mut buf, b"noodles\r\n", "noodles")?;
t(&mut buf, b"noodles", "noodles")?;
Ok(())
}
}
| 25.116935 | 98 | 0.50586 |
29276eeedf35d32361d9e364a17656935b4df493
| 1,687 |
//! Support for functional tests.
use std::sync::Mutex;
use lazy_static::lazy_static;
#[cfg(not(unix))]
use winreg::{
enums::{HKEY_CURRENT_USER, KEY_READ, KEY_WRITE},
RegKey, RegValue,
};
#[cfg(not(unix))]
pub fn get_path() -> std::io::Result<Option<RegValue>> {
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
match environment.get_raw_value("PATH") {
Ok(val) => Ok(Some(val)),
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => Ok(None),
Err(e) => Err(e),
}
}
#[cfg(not(unix))]
fn restore_path(p: Option<RegValue>) {
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
if let Some(p) = p.as_ref() {
environment.set_raw_value("PATH", &p).unwrap();
} else {
let _ = environment.delete_value("PATH");
}
}
/// Support testing of code that mutates global path state
pub fn with_saved_path(f: &dyn Fn()) {
// Lock protects concurrent mutation of registry
lazy_static! {
static ref LOCK: Mutex<()> = Mutex::new(());
}
let _g = LOCK.lock();
// On windows these tests mess with the user's PATH. Save
// and restore them here to keep from trashing things.
let saved_path = get_path().expect("Error getting PATH: Better abort to avoid trashing it.");
let _g = scopeguard::guard(saved_path, restore_path);
f();
}
#[cfg(unix)]
pub fn get_path() -> std::io::Result<Option<()>> {
Ok(None)
}
#[cfg(unix)]
fn restore_path(_: Option<()>) {}
| 27.655738 | 97 | 0.628334 |
085fa1073a99eab9ad9b8acc18f39562e5d27a42
| 137 |
Postavi zagrade tako da budu tačne jednakosti.
@repeat(6)@
@center@ @answ@
@vspace@
@/repeat@
| 6.85 | 46 | 0.49635 |
5b3dd982e0f407438a27c2a8df249778fba9977f
| 612 |
use crate::{common::preview_features::PreviewFeature, configuration::StringFromEnvVar};
use serde::Serialize;
use std::collections::HashMap;
#[derive(Debug, Serialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Generator {
pub name: String,
pub provider: StringFromEnvVar,
pub output: Option<StringFromEnvVar>,
pub config: HashMap<String, String>,
#[serde(default)]
pub binary_targets: Vec<StringFromEnvVar>,
#[serde(default)]
pub preview_features: Vec<PreviewFeature>,
#[serde(skip_serializing_if = "Option::is_none")]
pub documentation: Option<String>,
}
| 27.818182 | 87 | 0.720588 |
cce5c091f30eb081398b1efe59bc1180e1202d54
| 21,435 |
use area8051::{Addr, Mem};
use crate::Ec;
#[cfg(feature = "debug_xram")]
macro_rules! debug {
($($arg:tt)*) => (eprint!($($arg)*));
}
#[cfg(not(feature = "debug_xram"))]
macro_rules! debug {
($($arg:tt)*) => (());
}
pub fn xram(ec: &Ec, address: u16, new_opt: Option<u8>) -> u8 {
let mut mcu = ec.mcu.lock().unwrap();
let mut spi = ec.spi.lock().unwrap();
let mut xmem = ec.xmem.lock().unwrap();
debug!("\n[xram 0x{:04X}", address);
let mut old = mcu.load(Addr::XRam(address));
match address {
// Scratch SRAM
0x0000 ..= 0x0FFF => {
debug!(" (SRAM)");
},
0x8000 ..= 0x97FF if ec.id == 0x5570 => {
debug!(" (SRAM)");
//TODO: SRAM is double mapped from 0x8000 - 0x8FFF
},
// SMFI
0x1000 ..= 0x10FF => {
let base = 0x1000;
let offset = address - base;
debug!(" (SMFI 0x{:02X}", offset);
let mut scar_dma = |scar| {
let (reg, base, size) = ec.scar()[scar];
let l = mcu.xram[reg];
let m = mcu.xram[reg + 1];
let h = mcu.xram[reg + 2];
let value = {
(l as usize) |
(m as usize) << 8 |
((h as usize) & 0b11) << 16
};
debug!(" [SCAR{} DMA 0x{:04X} = 0x{:04X}]", scar, base, value);
for i in 0..size {
mcu.xram[base + i] = mcu.pmem[value + i];
}
};
match offset {
0x00 => debug!(" FBCFG"),
0x01 => debug!(" FPCFG"),
0x07 => debug!(" UNKNOWN"),
0x20 => debug!(" SMECCS"),
0x32 => debug!(" FLHCTRL2R"),
0x33 => debug!(" CACHDISR"),
0x36 => debug!(" HCTRL2R"),
0x3B => debug!(" ECINDAR0"),
0x3C => debug!(" ECINDAR1"),
0x3D => debug!(" ECINDAR2"),
0x3E => debug!(" ECINDAR3"),
0x3F => {
debug!(" ECINDDR");
let a0 = mcu.load(Addr::XRam(base + 0x3B));
let a1 = mcu.load(Addr::XRam(base + 0x3C));
let a2 = mcu.load(Addr::XRam(base + 0x3D));
let a3 = mcu.load(Addr::XRam(base + 0x3E));
let a = {
(a0 as usize) |
(a1 as usize) << 8 |
(a2 as usize) << 16 |
(a3 as usize) << 24
};
debug!(" [flash address 0x{:08X}", a);
let (flash, flash_name): (&mut [u8], &str) = match (a3 >> 6) & 0b11 {
0b00 | 0b11 => {
(&mut xmem, "external")
},
0b01 => {
(&mut mcu.pmem, "internal")
},
unknown => {
panic!("unknown ECIND flash chip 0b{:02b}", unknown);
}
};
debug!(" ({})]", flash_name);
if a3 & 0xF == 0xF {
match a1 {
0xFD => {
// Enable chip, send or receive
debug!(" [follow enable]");
if let Some(new) = new_opt {
spi.input.push_back(new);
} else {
spi.step(flash, flash_name);
old = spi.output.pop_front().expect("tried to read missing flash follow output");
}
},
0xFE => {
// Disable chip
debug!(" [follow disable]");
spi.step(flash, flash_name);
},
_ => {
panic!("Unknown follow address 0x{:02X}", a1);
}
}
} else {
let i = a & 0xFFFFFF;
old = flash[i];
if let Some(new) = new_opt {
flash[i] = new;
}
}
},
0x40 => debug!(" SCAR0L"),
0x41 => debug!(" SCAR0M"),
0x42 => {
debug!(" SCAR0H");
if let Some(new) = new_opt {
if old & 0x80 != 0 && new & 0x80 == 0 {
scar_dma(0);
}
}
},
0x43 if ec.id == 0x8587 => debug!(" SCAR1L"),
0x44 if ec.id == 0x8587 => debug!(" SCAR1M"),
0x45 if ec.id == 0x8587 => {
debug!(" SCAR1H");
if let Some(new) = new_opt {
if old & 0x80 != 0 && new & 0x80 == 0 {
scar_dma(1);
}
}
},
0x46 if ec.id == 0x8587 => debug!(" SCAR2L"),
0x47 if ec.id == 0x8587 => debug!(" SCAR2M"),
0x48 if ec.id == 0x8587 => {
debug!(" SCAR2H");
if let Some(new) = new_opt {
if old & 0x80 != 0 && new & 0x80 == 0 {
scar_dma(2);
}
}
},
0x49 if ec.id == 0x8587 => debug!(" SCAR3L"),
0x4A if ec.id == 0x8587 => debug!(" SCAR3M"),
0x4B if ec.id == 0x8587 => {
debug!(" SCAR3H");
if let Some(new) = new_opt {
if old & 0x80 != 0 && new & 0x80 == 0 {
scar_dma(3);
}
}
},
0x4C if ec.id == 0x8587 => debug!(" SCAR4L"),
0x4D if ec.id == 0x8587 => debug!(" SCAR4M"),
0x4E if ec.id == 0x8587 => {
debug!(" SCAR4H");
if let Some(new) = new_opt {
if old & 0x80 != 0 && new & 0x80 == 0 {
scar_dma(4);
}
}
},
0x58 => debug!(" HINSTC1"),
0x63 => debug!(" FLHCTRL3R"),
_ => panic!("xram unimplemented SMFI register 0x{:02X}", offset)
}
debug!(")");
},
// INTC
0x1100 ..= 0x11FF => {
let base = 0x1100;
let offset = address - base;
debug!(" (INTC 0x{:02X}", offset);
match offset {
0x01 => debug!(" ISR1"),
0x05 => debug!(" IER1"),
0x07 => debug!(" IER3"),
0x10 => debug!(" IVECT"),
0x51 => debug!(" IER19"),
_ => panic!("xram unimplemented INTC register 0x{:02X}", offset)
}
debug!(")");
},
// KBC
0x1300 ..= 0x13FF => {
let base = 0x1300;
let offset = address - base;
debug!(" (KBC 0x{:02X}", offset);
match offset {
0x00 => debug!(" KBHICR"),
0x02 => debug!(" KBIRQR"),
0x04 => debug!(" KBHISR"),
0x06 => {
debug!(" KBHIKDOR");
//TODO: Enforce write-only
// Set output buffer full flag
mcu.xram[0x1304] |= 1 << 0;
},
0x08 => {
debug!(" KBHIMDOR");
//TODO: Enforce write-only
// Set output buffer full flag
mcu.xram[0x1304] |= 1 << 0;
},
0x0A => {
debug!(" KBHIDIR");
//TODO: Enforce read-only
// Clear input buffer full flag
mcu.xram[0x1304] &= !(1 << 1);
}
_ => panic!("xram unimplemented KBC register 0x{:02X}", offset)
}
debug!(")");
},
// PMC
0x1500 ..= 0x15FF => {
let base = 0x1500;
let offset = address - base;
debug!(" (PMC 0x{:02X}", offset);
match offset {
0x00 => debug!(" PM1STS"),
0x01 => {
debug!(" PM1DO");
//TODO: Enforce write-only
// Set output buffer full flag
mcu.xram[0x1500] |= 1 << 0;
},
0x04 => {
debug!(" PM1DI");
//TODO: Enforce read-only
// Clear input buffer full flag
mcu.xram[0x1500] &= !(1 << 1);
}
0x06 => debug!(" PM1CTL"),
0x16 => debug!(" PM2CTL"),
0x30 => debug!(" PM4STS"),
_ => panic!("xram unimplemented PMC register 0x{:02X}", offset)
}
debug!(")");
},
// GPIO
0x1600 ..= 0x16FF => {
let base = 0x1600;
let offset = address - base;
debug!(" (GPIO 0x{:02X}", offset);
match offset {
0x00 => debug!(" GCR"),
0x01 => debug!(" GPDRA"),
0x02 => debug!(" GPDRB"),
0x03 => debug!(" GPDRC"),
0x04 => debug!(" GPDRD"),
0x05 => debug!(" GPDRE"),
0x06 => debug!(" GPDRF"),
0x07 => debug!(" GPDRG"),
0x08 => debug!(" GPDRH"),
0x09 => debug!(" GPDRI"),
0x0A => debug!(" GPDRJ"),
0x0D => debug!(" GPDRM"),
0x61 => debug!(" GPDRA"),
0x62 => debug!(" GPDRB"),
0x63 => debug!(" GPDRC"),
0x64 => debug!(" GPDRD"),
0x65 => debug!(" GPDRE"),
0x66 => debug!(" GPDRF"),
0x67 => debug!(" GPDRG"),
0x68 => debug!(" GPDRH"),
0x69 => debug!(" GPDRI"),
0x6A => debug!(" GPDRJ"),
0x6D => debug!(" GPDRM"),
0x71 => debug!(" GPOTA"),
0x72 => debug!(" GPOTB"),
0x73 => debug!(" GPOTC"),
0x74 => debug!(" GPOTD"),
0x75 => debug!(" GPOTE"),
0x76 => debug!(" GPOTF"),
0x77 => debug!(" GPOTG"),
0x78 => debug!(" GPOTH"),
0x79 => debug!(" GPOTI"),
0x7A => debug!(" GPOTJ"),
0x7D => debug!(" GPOTM"),
0x10 ..= 0x17 => debug!(" GPCRA{}", offset - 0x10),
0x18 ..= 0x1F => debug!(" GPCRB{}", offset - 0x18),
0x20 ..= 0x27 => debug!(" GPCRC{}", offset - 0x20),
0x28 ..= 0x2F => debug!(" GPCRD{}", offset - 0x28),
0x30 ..= 0x37 => debug!(" GPCRE{}", offset - 0x30),
0x38 ..= 0x3F => debug!(" GPCRF{}", offset - 0x38),
0x40 ..= 0x47 => debug!(" GPCRG{}", offset - 0x40),
0x48 ..= 0x4F => debug!(" GPCRH{}", offset - 0x48),
0x50 ..= 0x57 => debug!(" GPCRI{}", offset - 0x50),
0x58 ..= 0x5F => debug!(" GPCRJ{}", offset - 0x58),
0xA0 ..= 0xA7 => debug!(" GPCRM{}", offset - 0xA0),
0xF0 ..= 0xFE => debug!(" GCR{}", offset - 0xF0 + 1),
0xE0 ..= 0xE2 => debug!(" GCR{}", offset - 0xE0 + 16),
0xE4 ..= 0xE8 if ec.id == 0x5570 => debug!(" GCR{}", offset - 0xE4 + 19),
_ => panic!("xram unimplemented GPIO register 0x{:02X}", offset)
}
debug!(")");
},
// PS/2
0x1700 ..= 0x17FF => {
let base = 0x1700;
let offset = address - base;
debug!(" (PS/2 0x{:02X}", offset);
match offset {
0x00 => debug!(" PSCTL1"),
0x01 => debug!(" PSCTL2"),
0x02 => debug!(" PSCTL3"),
0x04 => debug!(" PSINT1"),
0x05 => debug!(" PSINT2"),
0x06 => debug!(" PSINT3"),
0x0A => debug!(" PSSTS3"),
_ => panic!("xram unimplemented PS/2 register 0x{:02X}", offset)
}
debug!(")");
},
// PWM
0x1800 ..= 0x18FF => {
let base = 0x1800;
let offset = address - base;
debug!(" (PWM 0x{:02X}", offset);
match offset {
0x00 => debug!(" C0CPRS"),
0x01 => debug!(" CTR0"),
0x02 ..= 0x09 => debug!(" DCR{}", offset - 0x02),
0x0B => debug!(" PCFSR"),
0x0C => debug!(" PCSSGL"),
0x0D => debug!(" PCSSGH"),
0x0F => debug!(" PCSGR"),
0x23 => debug!(" ZTIER"),
0x27 => debug!(" C4CPRS"),
0x2B => debug!(" C6CPRS"),
0x2C => debug!(" C6MCPRS"),
0x2D => debug!(" C7CPRS"),
0x2E => debug!(" C7MCPRS"),
0x40 => debug!(" CLK6MSEL"),
0x43 => debug!(" CTR3"),
0x48 => debug!(" TSWCTLR"),
_ => panic!("xram unimplemented PWM register 0x{:02X}", offset)
}
debug!(")");
},
// ADC
0x1900 ..= 0x19FF => {
let base = 0x1900;
let offset = address - base;
debug!(" (ADC 0x{:02X}", offset);
match offset {
0x00 => debug!(" ADCSTS"),
0x01 => debug!(" ADCCFG"),
0x04 => debug!(" VCH0CTL"),
0x05 => debug!(" KDCTL"),
0x06 => debug!(" VCH1CTL"),
0x09 => debug!(" VCH2CTL"),
0x0C => debug!(" VCH3CTL"),
0x18 => debug!(" VCH0DATL"),
0x19 => debug!(" VCH0DATM"),
0x38 => debug!(" VCH4CTL"),
0x3B => debug!(" VCH5CTL"),
0x3E => debug!(" VCH6CTL"),
_ => panic!("xram unimplemented ADC register 0x{:02X}", offset)
}
debug!(")");
},
// DAC
0x1A00 ..= 0x1AFF => {
let base = 0x1A00;
let offset = address - base;
debug!(" (DAC 0x{:02X}", offset);
match offset {
0x00 => debug!(" UNKNOWN"),
0x01 => debug!(" DACPDREG"),
0x04 => debug!(" DACDAT2"),
_ => panic!("xram unimplemented DAC register 0x{:02X}", offset)
}
debug!(")");
},
// SMBus
0x1C00 ..= 0x1CFF => {
let base = 0x1C00;
let offset = address - base;
debug!(" (SMBUS 0x{:02X}", offset);
match offset {
0x00 => debug!(" HOSTAA"),
0x01 => debug!(" HOCTLA"),
0x02 => debug!(" HOCMDA"),
0x03 => debug!(" TRASLAA"),
0x04 => debug!(" D0REGA"),
0x05 => debug!(" D1REGA"),
0x06 => debug!(" HOBDBA"),
0x10 => debug!(" HOCTL2A"),
0x11 => debug!(" HOSTAB"),
0x12 => debug!(" HOCTLB"),
0x21 => debug!(" HOCTL2B"),
0x22 => debug!(" 4P7USL"),
0x23 => debug!(" 4P0USL"),
0x24 => debug!(" 300NS"),
0x25 => debug!(" 250NS"),
0x26 => debug!(" 25MS"),
0x27 => debug!(" 45P3USL"),
0x28 => debug!(" 45P3USH"),
0x29 => debug!(" HOSTAC"),
0x2A => debug!(" HOCTLC"),
0x32 => debug!(" HOCTL2C"),
0x35 => debug!(" HOSTAD"),
0x36 => debug!(" HOCTLD"),
0x3E => debug!(" HOCTL2D"),
0x41 => debug!(" SCLKTSB"),
0xA0 if ec.id == 0x5570 => debug!(" HOSTAE"),
0xA1 if ec.id == 0x5570 => debug!(" HOCTLE"),
0xA3 if ec.id == 0x5570 => debug!(" TRASLAE"),
0xA7 if ec.id == 0x5570 => debug!(" HOBDBE"),
0xAA if ec.id == 0x5570 => debug!(" HOCTL2E"),
0xB0 if ec.id == 0x5570 => debug!(" HOSTAF"),
0xB1 if ec.id == 0x5570 => debug!(" HOCTLF"),
0xBA if ec.id == 0x5570 => debug!(" HOCTL2F"),
_ => panic!("xram unimplemented SMBUS register 0x{:02X}", offset)
}
debug!(")");
},
// KB Scan
0x1D00 ..= 0x1DFF => {
let base = 0x1D00;
let offset = address - base;
debug!(" (KBSCAN 0x{:02X}", offset);
match offset {
0x00 => debug!(" KSOL"),
0x01 => {
debug!(" KSOH1");
if let Some(new) = new_opt {
if new & 1 == 0 {
let byte = mcu.xram[0x1D00];
print!("{}", byte as char);
}
}
},
0x02 => debug!(" KSOCTRL"),
0x03 => debug!(" KSOH2"),
0x04 => debug!(" KSI"),
0x05 => debug!(" KSICTRLR"),
0x06 => debug!(" KSIGCTRL"),
0x07 => debug!(" KSIGOEN"),
0x08 => debug!(" KSIGDAT"),
0x09 => debug!(" KSIGDMRR"),
0x0A => debug!(" KSOHGCTRL"),
0x0B => debug!(" KSOHGOEN"),
0x0C => debug!(" KSOHGDMRR"),
0x0D => debug!(" KSOLGCTRL"),
0x0E => debug!(" KSOLGOEN"),
0x0F => debug!(" KSOLGDMRR"),
_ => panic!("xram unimplemented KBSCAN register 0x{:02X}", offset)
}
debug!(")");
},
// EC power management
0x1E00 ..= 0x1EFF => {
let base = 0x1E00;
let offset = address - base;
debug!(" (ECPM 0x{:02X}", offset);
match offset {
0x02 => debug!(" CGCTRL2"),
0x05 => debug!(" CGCTRL3"),
0x09 => debug!(" CGCTRL4"),
_ => panic!("xram unimplemented ECPM register 0x{:02X}", offset)
}
debug!(")");
},
// General Control
0x2000 ..= 0x20FF => {
let base = 0x2000;
let offset = address - base;
debug!(" (GCTRL 0x{:02X}", offset);
match offset {
0x00 => debug!(" ECHIPID1"),
0x01 => debug!(" ECHIPID2"),
0x02 => debug!(" ECHIPVER"),
0x06 => debug!(" RSTS"),
0x0A => debug!(" BADRSEL"),
0x0B => debug!(" WNCKR"),
0x0D => debug!(" SPCTRL1"),
_ => panic!("xram unimplemented GCTRL register 0x{:02X}", offset)
}
debug!(")");
},
// BRAM
0x2200 ..= 0x22FF => {
let base = 0x2200;
let offset = address - base;
debug!(" (BRAM 0x{:02X})", offset);
},
// PECI
0x3000 ..= 0x30FF => {
let base = 0x3000;
let offset = address - base;
debug!(" (PECI 0x{:02X}", offset);
match offset {
0x08 => debug!(" HOCTL2R"),
0x0E => debug!(" PADCTLR"),
_ => panic!("xram unimplemented PECI register 0x{:02X}", offset)
}
debug!(")");
},
// eSPI
0x3100 ..= 0x32FF if ec.id == 0x5570 => {
let base = 0x3100;
let offset = address - base;
debug!(" (eSPI 0x{:02X}", offset);
match offset {
// Peripheral
0x04 => debug!(" General Capabilities and Configurations 3"),
0x05 => debug!(" General Capabilities and Configurations 2"),
0x06 => debug!(" General Capabilities and Configurations 1"),
0x07 => debug!(" General Capabilities and Configurations 0"),
0xA1 => debug!(" ESGCTRL1"),
0xA2 => debug!(" ESGCTRL2"),
// Virtual wire
0x190 => debug!(" VWCTRL0"),
_ => panic!("xram unimplemented eSPI register 0x{:02X}", offset)
}
debug!(")");
},
0x8000 ..= 0x97FF if ec.id == 0x5570 => {
let base = 0x8000;
let offset = address - base;
debug!(" (SRAM 0x{:02X})", offset);
}
_ => panic!("xram unimplemented register 0x{:04X}", address),
}
debug!(" load 0x{:02X}", old);
if let Some(new) = new_opt {
debug!(" store 0x{:02X}", new);
mcu.store(Addr::XRam(address), new);
}
debug!("]");
old
}
| 38.345259 | 117 | 0.360299 |
e2110383eca069f22c509db5ac546c35d24df4e4
| 1,096 |
use crate::utils::shared_lru_cache::SharedLruCache;
use zksync_storage::chain::{
block::records::BlockDetails,
operations_ext::records::{PriorityOpReceiptResponse, TxReceiptResponse},
};
use zksync_types::ExecutedOperations;
/// Caches used by REST API server.
#[derive(Debug, Clone)]
pub struct Caches {
pub transaction_receipts: SharedLruCache<Vec<u8>, TxReceiptResponse>,
pub priority_op_receipts: SharedLruCache<u32, PriorityOpReceiptResponse>,
pub block_executed_ops: SharedLruCache<u32, Vec<ExecutedOperations>>,
pub blocks_info: SharedLruCache<u32, BlockDetails>,
pub blocks_by_height_or_hash: SharedLruCache<String, BlockDetails>,
}
impl Caches {
pub fn new(caches_size: usize) -> Self {
Self {
transaction_receipts: SharedLruCache::new(caches_size),
priority_op_receipts: SharedLruCache::new(caches_size),
block_executed_ops: SharedLruCache::new(caches_size),
blocks_info: SharedLruCache::new(caches_size),
blocks_by_height_or_hash: SharedLruCache::new(caches_size),
}
}
}
| 37.793103 | 77 | 0.730839 |
b92fff42d7077aed73f5767f1341d156645560c2
| 2,652 |
//! A very simple shader example.
use gfx::{self, *};
use ggez;
use glam;
use ggez::event;
use ggez::graphics::{self, DrawMode};
use ggez::timer;
use ggez::{Context, GameResult};
use std::env;
use std::path;
// Define the input struct for our shader.
gfx_defines! {
constant Dim {
rate: f32 = "u_Rate",
}
}
struct MainState {
dim: Dim,
shader: graphics::Shader<Dim>,
}
impl MainState {
fn new(ctx: &mut Context) -> GameResult<MainState> {
let dim = Dim { rate: 0.5 };
let shader = graphics::Shader::new(
ctx,
"/basic_150.glslv",
"/dimmer_150.glslf",
dim,
"Dim",
None,
)?;
Ok(MainState { dim, shader })
}
}
impl event::EventHandler for MainState {
fn update(&mut self, ctx: &mut Context) -> GameResult {
self.dim.rate = 0.5 + (((timer::ticks(ctx) as f32) / 100.0).cos() / 2.0);
Ok(())
}
fn draw(&mut self, ctx: &mut Context) -> GameResult {
graphics::clear(ctx, [0.1, 0.2, 0.3, 1.0].into());
let circle = graphics::Mesh::new_circle(
ctx,
DrawMode::fill(),
glam::Vec2::new(100.0, 300.0),
100.0,
2.0,
graphics::WHITE,
)?;
graphics::draw(ctx, &circle, (glam::Vec2::new(0.0, 0.0),))?;
{
let _lock = graphics::use_shader(ctx, &self.shader);
self.shader.send(ctx, self.dim)?;
let circle = graphics::Mesh::new_circle(
ctx,
DrawMode::fill(),
glam::Vec2::new(400.0, 300.0),
100.0,
2.0,
graphics::WHITE,
)?;
graphics::draw(ctx, &circle, (glam::Vec2::new(0.0, 0.0),))?;
}
let circle = graphics::Mesh::new_circle(
ctx,
DrawMode::fill(),
glam::Vec2::new(700.0, 300.0),
100.0,
2.0,
graphics::WHITE,
)?;
graphics::draw(ctx, &circle, (glam::Vec2::new(0.0, 0.0),))?;
graphics::present(ctx)?;
Ok(())
}
}
pub fn main() -> GameResult {
let resource_dir = if let Ok(manifest_dir) = env::var("CARGO_MANIFEST_DIR") {
let mut path = path::PathBuf::from(manifest_dir);
path.push("resources");
path
} else {
path::PathBuf::from("./resources")
};
let cb = ggez::ContextBuilder::new("shader", "ggez").add_resource_path(resource_dir);
let (mut ctx, event_loop) = cb.build()?;
let state = MainState::new(&mut ctx)?;
event::run(ctx, event_loop, state)
}
| 25.5 | 89 | 0.502262 |
62672e54aeac09f049200daa39b2a1f04a15b303
| 137 |
error_chain! {
types {
Error, ErrorKind, ResultExt, Result;
}
foreign_links {
Io(::std::io::Error);
}
}
| 13.7 | 44 | 0.510949 |
33bd18db9103db15e6f6e680a1c3bab1f631173a
| 7,908 |
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// from gst-gir-files (https://gitlab.freedesktop.org/gstreamer/gir-files-rs.git)
// DO NOT EDIT
use crate::Asset;
use crate::ClipAsset;
use crate::MetaContainer;
#[cfg(any(feature = "v1_18", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_18")))]
use crate::SourceClipAsset;
use crate::UriSourceAsset;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
#[cfg(any(feature = "v1_18", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_18")))]
use glib::StaticType;
use glib::ToValue;
use std::boxed::Box as Box_;
use std::mem::transmute;
use std::ptr;
#[cfg(any(feature = "v1_18", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_18")))]
glib::wrapper! {
#[doc(alias = "GESUriClipAsset")]
pub struct UriClipAsset(Object<ffi::GESUriClipAsset, ffi::GESUriClipAssetClass>) @extends SourceClipAsset, ClipAsset, Asset, @implements MetaContainer;
match fn {
type_ => || ffi::ges_uri_clip_asset_get_type(),
}
}
#[cfg(not(any(feature = "v1_18", feature = "dox")))]
glib::wrapper! {
#[doc(alias = "GESUriClipAsset")]
pub struct UriClipAsset(Object<ffi::GESUriClipAsset, ffi::GESUriClipAssetClass>) @extends ClipAsset, Asset, @implements MetaContainer;
match fn {
type_ => || ffi::ges_uri_clip_asset_get_type(),
}
}
impl UriClipAsset {
pub const NONE: Option<&'static UriClipAsset> = None;
//#[cfg(any(feature = "v1_16", feature = "dox"))]
//#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_16")))]
//#[doc(alias = "ges_uri_clip_asset_finish")]
//pub fn finish(res: /*Ignored*/&gio::AsyncResult) -> Result<UriClipAsset, glib::Error> {
// unsafe { TODO: call ffi:ges_uri_clip_asset_finish() }
//}
//#[doc(alias = "ges_uri_clip_asset_new")]
//pub fn new<P: FnOnce(Result<(), glib::Error>) + 'static>(uri: &str, cancellable: Option<&impl IsA<gio::Cancellable>>, callback: P) {
// unsafe { TODO: call ffi:ges_uri_clip_asset_new() }
//}
#[doc(alias = "ges_uri_clip_asset_request_sync")]
pub fn request_sync(uri: &str) -> Result<UriClipAsset, glib::Error> {
assert_initialized_main_thread!();
unsafe {
let mut error = ptr::null_mut();
let ret = ffi::ges_uri_clip_asset_request_sync(uri.to_glib_none().0, &mut error);
if error.is_null() {
Ok(from_glib_full(ret))
} else {
Err(from_glib_full(error))
}
}
}
}
pub trait UriClipAssetExt: 'static {
#[doc(alias = "ges_uri_clip_asset_get_duration")]
#[doc(alias = "get_duration")]
fn duration(&self) -> Option<gst::ClockTime>;
#[doc(alias = "ges_uri_clip_asset_get_info")]
#[doc(alias = "get_info")]
fn info(&self) -> Option<gst_pbutils::DiscovererInfo>;
#[cfg(any(feature = "v1_18", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_18")))]
#[doc(alias = "ges_uri_clip_asset_get_max_duration")]
#[doc(alias = "get_max_duration")]
fn max_duration(&self) -> Option<gst::ClockTime>;
#[doc(alias = "ges_uri_clip_asset_get_stream_assets")]
#[doc(alias = "get_stream_assets")]
fn stream_assets(&self) -> Vec<UriSourceAsset>;
#[cfg(any(feature = "v1_18", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_18")))]
#[doc(alias = "ges_uri_clip_asset_is_image")]
fn is_image(&self) -> bool;
fn set_duration(&self, duration: u64);
#[cfg(any(feature = "v1_18", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_18")))]
#[doc(alias = "is-nested-timeline")]
fn is_nested_timeline(&self) -> bool;
#[doc(alias = "duration")]
fn connect_duration_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[cfg(any(feature = "v1_18", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_18")))]
#[doc(alias = "is-nested-timeline")]
fn connect_is_nested_timeline_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<UriClipAsset>> UriClipAssetExt for O {
fn duration(&self) -> Option<gst::ClockTime> {
unsafe {
from_glib(ffi::ges_uri_clip_asset_get_duration(
self.as_ref().to_glib_none().0,
))
}
}
fn info(&self) -> Option<gst_pbutils::DiscovererInfo> {
unsafe {
from_glib_none(ffi::ges_uri_clip_asset_get_info(const_override(
self.as_ref().to_glib_none().0,
)))
}
}
#[cfg(any(feature = "v1_18", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_18")))]
fn max_duration(&self) -> Option<gst::ClockTime> {
unsafe {
from_glib(ffi::ges_uri_clip_asset_get_max_duration(
self.as_ref().to_glib_none().0,
))
}
}
fn stream_assets(&self) -> Vec<UriSourceAsset> {
unsafe {
FromGlibPtrContainer::from_glib_none(ffi::ges_uri_clip_asset_get_stream_assets(
self.as_ref().to_glib_none().0,
))
}
}
#[cfg(any(feature = "v1_18", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_18")))]
fn is_image(&self) -> bool {
unsafe {
from_glib(ffi::ges_uri_clip_asset_is_image(
self.as_ref().to_glib_none().0,
))
}
}
fn set_duration(&self, duration: u64) {
glib::ObjectExt::set_property(self.as_ref(), "duration", &duration)
}
#[cfg(any(feature = "v1_18", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_18")))]
fn is_nested_timeline(&self) -> bool {
glib::ObjectExt::property(self.as_ref(), "is-nested-timeline")
}
fn connect_duration_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_duration_trampoline<
P: IsA<UriClipAsset>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::GESUriClipAsset,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(UriClipAsset::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::duration\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_duration_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
#[cfg(any(feature = "v1_18", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_18")))]
fn connect_is_nested_timeline_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_is_nested_timeline_trampoline<
P: IsA<UriClipAsset>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::GESUriClipAsset,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(UriClipAsset::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::is-nested-timeline\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_is_nested_timeline_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
| 35.303571 | 155 | 0.581057 |
1454253fdb6e742bdb2d5059499d2af53a662263
| 3,972 |
%
%This file replicates the estimation of the cash in advance model described
%Frank Schorfheide (2000): "Loss function-based evaluation of DSGE models",
%Journal of Applied Econometrics, 15(6), 645-670.
%
%The data are in file "fsdat_simul.m", and have been artificially generated.
%They are therefore different from the original dataset used by Schorfheide.
%
%The equations are taken from J. Nason and T. Cogley (1994): "Testing the
%implications of long-run neutrality for monetary business cycle models",
%Journal of Applied Econometrics, 9, S37-S70.
%Note that there is an initial minus sign missing in equation (A1), p. S63.
%
%This implementation was written by Michel Juillard. Please note that the
%following copyright notice only applies to this Dynare implementation of the
%model.
%
endogenous m P c e W R k d n l gy_obs gp_obs y dA
exogenous e_a e_m
parameters alp bet gam mst rho psi del sig_a sig_m
observables gp_obs gy_obs
model
dA = exp(gam+sig_a*e_a);
log(m) = (1-rho)*log(mst) + rho*log(m(-1))+sig_m*e_m;
-P/(c(+1)*P(+1)*m)+bet*P(+1)*(alp*exp(-alp*(gam+log(e(+1))))*k^(alp-1)*n(+1)^(1-alp)+(1-del)*exp(-(gam+log(e(+1)))))/(c(+2)*P(+2)*m(+1))=0;
W = l/n;
-(psi/(1-psi))*(c*P/(1-n))+l/n = 0;
R = P*(1-alp)*exp(-alp*(gam+sig_a*e_a))*k(-1)^alp*n^(-alp)/W;
1/(c*P)-bet*P*(1-alp)*exp(-alp*(gam+sig_a*e_a))*k(-1)^alp*n^(1-alp)/(m*l*c(+1)*P(+1)) = 0;
c+k = exp(-alp*(gam+sig_a*e_a))*k(-1)^alp*n^(1-alp)+(1-del)*exp(-(gam+sig_a*e_a))*k(-1);
P*c = m;
m-1+d = l;
e = exp(sig_a*e_a);
y = k(-1)^alp*n^(1-alp)*exp(-alp*(gam+sig_a*e_a));
% We have to take into account the loglinear option of dynare
% (which will take the log of the steady state during estimation)
% In rise, we have to explicitly put the variables in log terms
exp(gy_obs) = dA*y/y(-1); % gy_obs = dA*y/y(-1);
exp(gp_obs) = (P/P(-1))*m(-1)/dA; % gp_obs = (P/P(-1))*m(-1)/dA;
%steady_state_model; % if this does not work, rise will use it as initial guess
% k = 6;
% m = mst;
% P = 2.25;
% c = 0.45;
% e = 1;
% W = 4;
% R = 1.02;
% d = 0.85;
% n = 0.19;
% l = 0.86;
% y = 0.6;
% gy_obs = gam; % gy_obs = exp(gam);
% gp_obs = -gam; % gp_obs = exp(-gam);
% dA = exp(gam);
%end;
%xx_ssmdef_1 ... xx_ssmdef_9 are known words to rise
steady_state_model;
dA = exp(gam);
xx_ssmdef_1 = 1/dA;
m = mst;
xx_ssmdef_2 = ( (1-xx_ssmdef_1*bet*(1-del)) / (alp*xx_ssmdef_1^alp*bet) )^(1/(alp-1));
xx_ssmdef_3 = ( ((xx_ssmdef_2*xx_ssmdef_1)^alp - (1-xx_ssmdef_1*(1-del))*xx_ssmdef_2)/mst )^(-1);
xx_ssmdef_4 = psi*mst^2/( (1-alp)*(1-psi)*bet*xx_ssmdef_1^alp*xx_ssmdef_2^alp );
n = xx_ssmdef_3/(xx_ssmdef_4+xx_ssmdef_3);
P = xx_ssmdef_3 + xx_ssmdef_4;
k = xx_ssmdef_2*n;
l = psi*mst*n/( (1-psi)*(1-n) );
c = mst/P;
d = l - mst + 1;
y = k^alp*n^(1-alp)*xx_ssmdef_1^alp;
R = mst/bet;
W = l/n;
%% ist = y-c;
% q = 1 - d;
e = 1;
gp_obs = log(m/dA);% accommodating dynare's loglinear option
gy_obs = log(dA); % accommodating dynare's loglinear option
parameterization; % point at which dynare starts estimation % dynare calibration
alp , 0.356000, 0.3235, 0.3891, beta_pdf(.90) ;% 0.356, 0.02; 0.330
bet , 0.993000, 0.9896, 0.9958, beta_pdf(.90) ;% 0.993, 0.002; 0.990
gam , 0.008500, 0.0036, 0.0134, normal_pdf(.90) ;% 0.0085, 0.003; 0.003
mst , 1.000200, 0.9887, 1.0117, normal_pdf(.90) ;% 1.0002, 0.007; 1.011
rho , 0.129000, 0.0001, 0.6851, beta_pdf(.90) ;% 0.129, 0.223; 0.700
psi , 0.650000, 0.5658, 0.7304, beta_pdf(.90) ;% 0.65, 0.05; 0.787
del , 0.010000, 0.0034, 0.0194, beta_pdf(.90) ;% 0.01, 0.005; 0.020
sig_a, 0.035449, 0.0075, 0.0998, inv_gamma_pdf(.90);% 0.035449, inf; 0.014
sig_m, 0.008862, 0.0019, 0.0249, inv_gamma_pdf(.90);% 0.008862, inf; 0.005
% Look into folder Moments2Bounds to see how to go from moments to bounds of the distribution
| 34.241379 | 140 | 0.611782 |
4b0fb19019b90e575bcdbc590da668497d20d621
| 11,315 |
use std::sync::Arc;
use data_types::{
router::{
WriteSink as WriteSinkConfig, WriteSinkSet as WriteSinkSetConfig,
WriteSinkVariant as WriteSinkVariantConfig,
},
server_id::ServerId,
write_buffer::WriteBufferConnection,
};
use dml::DmlOperation;
use snafu::{OptionExt, ResultExt, Snafu};
use crate::{
connection_pool::{ConnectionError, ConnectionPool},
resolver::Resolver,
};
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("No remote for server ID {}", server_id))]
NoRemote { server_id: ServerId },
#[snafu(display("Cannot connect: {}", source))]
ConnectionFailure { source: ConnectionError },
#[snafu(display("Cannot write: {}", source))]
WriteFailure {
source: Box<dyn std::error::Error + Send + Sync>,
},
}
#[derive(Debug)]
struct VariantGrpcRemote {
db_name: String,
server_id: ServerId,
resolver: Arc<Resolver>,
connection_pool: Arc<ConnectionPool>,
}
impl VariantGrpcRemote {
fn new(
db_name: String,
server_id: ServerId,
resolver: Arc<Resolver>,
connection_pool: Arc<ConnectionPool>,
) -> Self {
Self {
db_name,
server_id,
resolver,
connection_pool,
}
}
async fn write(&self, write: &DmlOperation) -> Result<(), Error> {
let connection_string = self
.resolver
.resolve_remote(self.server_id)
.context(NoRemote {
server_id: self.server_id,
})?;
let client = self
.connection_pool
.grpc_client(&connection_string)
.await
.context(ConnectionFailure)?;
client
.write(&self.db_name, write)
.await
.context(WriteFailure)
}
}
#[derive(Debug)]
struct VariantWriteBuffer {
db_name: String,
write_buffer_cfg: WriteBufferConnection,
connection_pool: Arc<ConnectionPool>,
}
impl VariantWriteBuffer {
fn new(
db_name: String,
write_buffer_cfg: WriteBufferConnection,
connection_pool: Arc<ConnectionPool>,
) -> Self {
Self {
db_name,
write_buffer_cfg,
connection_pool,
}
}
async fn write(&self, operation: &DmlOperation) -> Result<(), Error> {
let write_buffer = self
.connection_pool
.write_buffer_producer(&self.db_name, &self.write_buffer_cfg)
.await
.context(ConnectionFailure)?;
// TODO(marco): use multiple sequencers
write_buffer
.store_operation(0, operation)
.await
.context(WriteFailure)?;
Ok(())
}
}
#[derive(Debug)]
enum WriteSinkVariant {
/// Send write to a remote server via gRPC
GrpcRemote(VariantGrpcRemote),
/// Send write to a write buffer (which may be backed by kafka, local disk, etc)
WriteBuffer(VariantWriteBuffer),
}
/// Write sink abstraction.
#[derive(Debug)]
pub struct WriteSink {
ignore_errors: bool,
variant: WriteSinkVariant,
}
impl WriteSink {
pub fn new(
db_name: &str,
config: WriteSinkConfig,
resolver: Arc<Resolver>,
connection_pool: Arc<ConnectionPool>,
) -> Self {
let variant = match config.sink {
WriteSinkVariantConfig::GrpcRemote(server_id) => WriteSinkVariant::GrpcRemote(
VariantGrpcRemote::new(db_name.to_string(), server_id, resolver, connection_pool),
),
WriteSinkVariantConfig::WriteBuffer(write_buffer_cfg) => WriteSinkVariant::WriteBuffer(
VariantWriteBuffer::new(db_name.to_string(), write_buffer_cfg, connection_pool),
),
};
Self {
ignore_errors: config.ignore_errors,
variant,
}
}
pub async fn write(&self, write: &DmlOperation) -> Result<(), Error> {
let res = match &self.variant {
WriteSinkVariant::GrpcRemote(v) => v.write(write).await,
WriteSinkVariant::WriteBuffer(v) => v.write(write).await,
};
match res {
Ok(()) => Ok(()),
Err(_) if self.ignore_errors => Ok(()),
e => e,
}
}
}
/// A set of [`WriteSink`]s.
#[derive(Debug)]
pub struct WriteSinkSet {
sinks: Vec<WriteSink>,
}
impl WriteSinkSet {
/// Create new set from config.
pub fn new(
db_name: &str,
config: WriteSinkSetConfig,
resolver: Arc<Resolver>,
connection_pool: Arc<ConnectionPool>,
) -> Self {
Self {
sinks: config
.sinks
.into_iter()
.map(|sink_config| {
WriteSink::new(
db_name,
sink_config,
Arc::clone(&resolver),
Arc::clone(&connection_pool),
)
})
.collect(),
}
}
/// Write to sinks. Fails on first error.
pub async fn write(&self, operation: &DmlOperation) -> Result<(), Error> {
for sink in &self.sinks {
sink.write(operation).await?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use dml::DmlWrite;
use mutable_batch_lp::lines_to_batches;
use time::SystemProvider;
use write_buffer::config::WriteBufferConfigFactory;
use crate::grpc_client::MockClient;
use super::*;
#[tokio::test]
async fn test_write_sink_error_handling() {
let server_id = ServerId::try_from(1).unwrap();
let resolver = Arc::new(Resolver::new(None));
resolver.update_remote(server_id, String::from("1.2.3.4"));
let time_provider = Arc::new(SystemProvider::new());
let metric_registry = Arc::new(metric::Registry::new());
let wb_factory = Arc::new(WriteBufferConfigFactory::new(
time_provider,
metric_registry,
));
wb_factory.register_always_fail_mock(String::from("failing_wb"));
let connection_pool = Arc::new(ConnectionPool::new(true, wb_factory).await);
let client_grpc = connection_pool.grpc_client("1.2.3.4").await.unwrap();
let client_grpc = client_grpc.as_any().downcast_ref::<MockClient>().unwrap();
client_grpc.poison();
let write = DmlOperation::Write(DmlWrite::new(
lines_to_batches("foo x=1 1", 0).unwrap(),
Default::default(),
));
// gRPC, do NOT ignore errors
let config = WriteSinkConfig {
sink: WriteSinkVariantConfig::GrpcRemote(server_id),
ignore_errors: false,
};
let sink = WriteSink::new(
"my_db",
config,
Arc::clone(&resolver),
Arc::clone(&connection_pool),
);
sink.write(&write).await.unwrap_err();
// gRPC, ignore errors
let config = WriteSinkConfig {
sink: WriteSinkVariantConfig::GrpcRemote(server_id),
ignore_errors: true,
};
let sink = WriteSink::new(
"my_db",
config,
Arc::clone(&resolver),
Arc::clone(&connection_pool),
);
sink.write(&write).await.unwrap();
// write buffer, do NOT ignore errors
let write_buffer_cfg = WriteBufferConnection {
type_: String::from("mock"),
connection: String::from("failing_wb"),
..Default::default()
};
let config = WriteSinkConfig {
sink: WriteSinkVariantConfig::WriteBuffer(write_buffer_cfg.clone()),
ignore_errors: false,
};
let sink = WriteSink::new(
"my_db",
config,
Arc::clone(&resolver),
Arc::clone(&connection_pool),
);
sink.write(&write).await.unwrap_err();
// write buffer, ignore errors
let config = WriteSinkConfig {
sink: WriteSinkVariantConfig::WriteBuffer(write_buffer_cfg),
ignore_errors: true,
};
let sink = WriteSink::new(
"my_db",
config,
Arc::clone(&resolver),
Arc::clone(&connection_pool),
);
sink.write(&write).await.unwrap();
}
#[tokio::test]
async fn test_write_sink_set() {
let server_id_1 = ServerId::try_from(1).unwrap();
let server_id_2 = ServerId::try_from(2).unwrap();
let server_id_3 = ServerId::try_from(3).unwrap();
let resolver = Arc::new(Resolver::new(None));
resolver.update_remote(server_id_1, String::from("1"));
resolver.update_remote(server_id_2, String::from("2"));
resolver.update_remote(server_id_3, String::from("3"));
let connection_pool = Arc::new(ConnectionPool::new_testing().await);
let client_1 = connection_pool.grpc_client("1").await.unwrap();
let client_2 = connection_pool.grpc_client("2").await.unwrap();
let client_3 = connection_pool.grpc_client("3").await.unwrap();
let client_1 = client_1.as_any().downcast_ref::<MockClient>().unwrap();
let client_2 = client_2.as_any().downcast_ref::<MockClient>().unwrap();
let client_3 = client_3.as_any().downcast_ref::<MockClient>().unwrap();
let sink_set = WriteSinkSet::new(
"my_db",
WriteSinkSetConfig {
sinks: vec![
WriteSinkConfig {
sink: WriteSinkVariantConfig::GrpcRemote(server_id_1),
ignore_errors: false,
},
WriteSinkConfig {
sink: WriteSinkVariantConfig::GrpcRemote(server_id_2),
ignore_errors: false,
},
WriteSinkConfig {
sink: WriteSinkVariantConfig::GrpcRemote(server_id_3),
ignore_errors: false,
},
],
},
resolver,
connection_pool,
);
let write_1 = DmlOperation::Write(DmlWrite::new(
lines_to_batches("foo x=1 1", 0).unwrap(),
Default::default(),
));
sink_set.write(&write_1).await.unwrap();
let writes_1 = [(String::from("my_db"), write_1.clone())];
client_1.assert_writes(&writes_1);
client_2.assert_writes(&writes_1);
client_3.assert_writes(&writes_1);
client_2.poison();
let write_2 = DmlOperation::Write(DmlWrite::new(
lines_to_batches("foo x=2 2", 0).unwrap(),
Default::default(),
));
sink_set.write(&write_2).await.unwrap_err();
// The sink set stops on first non-ignored error. So
// - client 1 got the new data
// - client 2 failed, but still has the data from the first write
// - client 3 got skipped due to the failure, but still has the data from the first write
let writes_2 = [
(String::from("my_db"), write_1.clone()),
(String::from("my_db"), write_2.clone()),
];
client_1.assert_writes(&writes_2);
client_2.assert_writes(&writes_1);
client_3.assert_writes(&writes_1);
}
}
| 30.335121 | 99 | 0.564472 |
e507d896b0ed510b9eb421896802754ceb82436d
| 3,842 |
//! I2C/SPI interfaces
use crate::{private, Error, DEVICE_ADDRESS};
use embedded_hal::{
blocking::{i2c, spi},
digital::v2::OutputPin,
};
/// I2C interface
#[derive(Debug, Default)]
pub struct I2cInterface<I2C> {
pub(crate) i2c: I2C,
}
/// SPI interface
#[derive(Debug, Default)]
pub struct SpiInterface<SPI, CS> {
pub(crate) spi: SPI,
pub(crate) cs: CS,
}
/// Write data
pub trait WriteData: private::Sealed {
/// Error type
type Error;
/// Write to an u8 register
fn write_register(&mut self, register: u8, data: u8) -> Result<(), Self::Error>;
/// Write data. The first element corresponds to the starting address.
fn write_data(&mut self, payload: &mut [u8]) -> Result<(), Self::Error>;
}
impl<I2C, E> WriteData for I2cInterface<I2C>
where
I2C: i2c::Write<Error = E>,
{
type Error = Error<E, ()>;
fn write_register(&mut self, register: u8, data: u8) -> Result<(), Self::Error> {
let payload: [u8; 2] = [register, data];
self.i2c
.write(DEVICE_ADDRESS, &payload)
.map_err(Error::Comm)
}
fn write_data(&mut self, payload: &mut [u8]) -> Result<(), Self::Error> {
self.i2c
.write(DEVICE_ADDRESS, &payload)
.map_err(Error::Comm)
}
}
impl<SPI, CS, CommE, PinE> WriteData for SpiInterface<SPI, CS>
where
SPI: spi::Write<u8, Error = CommE>,
CS: OutputPin<Error = PinE>,
{
type Error = Error<CommE, PinE>;
fn write_register(&mut self, register: u8, data: u8) -> Result<(), Self::Error> {
self.cs.set_low().map_err(Error::Pin)?;
let payload: [u8; 2] = [register + 0x80, data];
let result = self.spi.write(&payload).map_err(Error::Comm);
self.cs.set_high().map_err(Error::Pin)?;
result
}
fn write_data(&mut self, payload: &mut [u8]) -> Result<(), Self::Error> {
self.cs.set_low().map_err(Error::Pin)?;
payload[0] += 0x80;
let result = self.spi.write(&payload).map_err(Error::Comm);
self.cs.set_high().map_err(Error::Pin)?;
result
}
}
/// Read data
pub trait ReadData: private::Sealed {
/// Error type
type Error;
/// Read an u8 register
fn read_register(&mut self, register: u8) -> Result<u8, Self::Error>;
/// Read some data. The first element corresponds to the starting address.
fn read_data(&mut self, payload: &mut [u8]) -> Result<(), Self::Error>;
}
impl<I2C, E> ReadData for I2cInterface<I2C>
where
I2C: i2c::WriteRead<Error = E>,
{
type Error = Error<E, ()>;
fn read_register(&mut self, register: u8) -> Result<u8, Self::Error> {
let mut data = [0];
self.i2c
.write_read(DEVICE_ADDRESS, &[register], &mut data)
.map_err(Error::Comm)
.and(Ok(data[0]))
}
fn read_data(&mut self, payload: &mut [u8]) -> Result<(), Self::Error> {
let len = payload.len();
self.i2c
.write_read(DEVICE_ADDRESS, &[payload[0]], &mut payload[1..len])
.map_err(Error::Comm)
}
}
impl<SPI, CS, CommE, PinE> ReadData for SpiInterface<SPI, CS>
where
SPI: spi::Transfer<u8, Error = CommE>,
CS: OutputPin<Error = PinE>,
{
type Error = Error<CommE, PinE>;
fn read_register(&mut self, register: u8) -> Result<u8, Self::Error> {
self.cs.set_low().map_err(Error::Pin)?;
let mut data = [register, 0];
let result = self.spi.transfer(&mut data).map_err(Error::Comm);
self.cs.set_high().map_err(Error::Pin)?;
Ok(result?[1])
}
fn read_data(&mut self, mut payload: &mut [u8]) -> Result<(), Self::Error> {
self.cs.set_low().map_err(Error::Pin)?;
let result = self.spi.transfer(&mut payload).map_err(Error::Comm);
self.cs.set_high().map_err(Error::Pin)?;
result?;
Ok(())
}
}
| 29.553846 | 85 | 0.590838 |
ac1c0aa3fb655a07227da8d0aa5d678c633a721c
| 54,161 |
// Generated from definition io.k8s.api.core.v1.PersistentVolumeSpec
/// PersistentVolumeSpec is the specification of a persistent volume.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct PersistentVolumeSpec {
/// AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
pub access_modes: Option<Vec<String>>,
/// AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
pub aws_elastic_block_store: Option<crate::api::core::v1::AWSElasticBlockStoreVolumeSource>,
/// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
pub azure_disk: Option<crate::api::core::v1::AzureDiskVolumeSource>,
/// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
pub azure_file: Option<crate::api::core::v1::AzureFilePersistentVolumeSource>,
/// A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
pub capacity: Option<std::collections::BTreeMap<String, crate::apimachinery::pkg::api::resource::Quantity>>,
/// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
pub cephfs: Option<crate::api::core::v1::CephFSPersistentVolumeSource>,
/// Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
pub cinder: Option<crate::api::core::v1::CinderPersistentVolumeSource>,
/// ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
pub claim_ref: Option<crate::api::core::v1::ObjectReference>,
/// CSI represents storage that handled by an external CSI driver (Beta feature).
pub csi: Option<crate::api::core::v1::CSIPersistentVolumeSource>,
/// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
pub fc: Option<crate::api::core::v1::FCVolumeSource>,
/// FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.
pub flex_volume: Option<crate::api::core::v1::FlexPersistentVolumeSource>,
/// Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
pub flocker: Option<crate::api::core::v1::FlockerVolumeSource>,
/// GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
pub gce_persistent_disk: Option<crate::api::core::v1::GCEPersistentDiskVolumeSource>,
/// Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
pub glusterfs: Option<crate::api::core::v1::GlusterfsVolumeSource>,
/// HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
pub host_path: Option<crate::api::core::v1::HostPathVolumeSource>,
/// ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.
pub iscsi: Option<crate::api::core::v1::ISCSIPersistentVolumeSource>,
/// Local represents directly-attached storage with node affinity
pub local: Option<crate::api::core::v1::LocalVolumeSource>,
/// A list of mount options, e.g. \["ro", "soft"\]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
pub mount_options: Option<Vec<String>>,
/// NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
pub nfs: Option<crate::api::core::v1::NFSVolumeSource>,
/// NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.
pub node_affinity: Option<crate::api::core::v1::VolumeNodeAffinity>,
/// What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
pub persistent_volume_reclaim_policy: Option<String>,
/// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
pub photon_persistent_disk: Option<crate::api::core::v1::PhotonPersistentDiskVolumeSource>,
/// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
pub portworx_volume: Option<crate::api::core::v1::PortworxVolumeSource>,
/// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
pub quobyte: Option<crate::api::core::v1::QuobyteVolumeSource>,
/// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
pub rbd: Option<crate::api::core::v1::RBDPersistentVolumeSource>,
/// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
pub scale_io: Option<crate::api::core::v1::ScaleIOPersistentVolumeSource>,
/// Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.
pub storage_class_name: Option<String>,
/// StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
pub storageos: Option<crate::api::core::v1::StorageOSPersistentVolumeSource>,
/// volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.
pub volume_mode: Option<String>,
/// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
pub vsphere_volume: Option<crate::api::core::v1::VsphereVirtualDiskVolumeSource>,
}
impl<'de> crate::serde::Deserialize<'de> for PersistentVolumeSpec {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_access_modes,
Key_aws_elastic_block_store,
Key_azure_disk,
Key_azure_file,
Key_capacity,
Key_cephfs,
Key_cinder,
Key_claim_ref,
Key_csi,
Key_fc,
Key_flex_volume,
Key_flocker,
Key_gce_persistent_disk,
Key_glusterfs,
Key_host_path,
Key_iscsi,
Key_local,
Key_mount_options,
Key_nfs,
Key_node_affinity,
Key_persistent_volume_reclaim_policy,
Key_photon_persistent_disk,
Key_portworx_volume,
Key_quobyte,
Key_rbd,
Key_scale_io,
Key_storage_class_name,
Key_storageos,
Key_volume_mode,
Key_vsphere_volume,
Other,
}
impl<'de> crate::serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
Ok(match v {
"accessModes" => Field::Key_access_modes,
"awsElasticBlockStore" => Field::Key_aws_elastic_block_store,
"azureDisk" => Field::Key_azure_disk,
"azureFile" => Field::Key_azure_file,
"capacity" => Field::Key_capacity,
"cephfs" => Field::Key_cephfs,
"cinder" => Field::Key_cinder,
"claimRef" => Field::Key_claim_ref,
"csi" => Field::Key_csi,
"fc" => Field::Key_fc,
"flexVolume" => Field::Key_flex_volume,
"flocker" => Field::Key_flocker,
"gcePersistentDisk" => Field::Key_gce_persistent_disk,
"glusterfs" => Field::Key_glusterfs,
"hostPath" => Field::Key_host_path,
"iscsi" => Field::Key_iscsi,
"local" => Field::Key_local,
"mountOptions" => Field::Key_mount_options,
"nfs" => Field::Key_nfs,
"nodeAffinity" => Field::Key_node_affinity,
"persistentVolumeReclaimPolicy" => Field::Key_persistent_volume_reclaim_policy,
"photonPersistentDisk" => Field::Key_photon_persistent_disk,
"portworxVolume" => Field::Key_portworx_volume,
"quobyte" => Field::Key_quobyte,
"rbd" => Field::Key_rbd,
"scaleIO" => Field::Key_scale_io,
"storageClassName" => Field::Key_storage_class_name,
"storageos" => Field::Key_storageos,
"volumeMode" => Field::Key_volume_mode,
"vsphereVolume" => Field::Key_vsphere_volume,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = PersistentVolumeSpec;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("PersistentVolumeSpec")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
let mut value_access_modes: Option<Vec<String>> = None;
let mut value_aws_elastic_block_store: Option<crate::api::core::v1::AWSElasticBlockStoreVolumeSource> = None;
let mut value_azure_disk: Option<crate::api::core::v1::AzureDiskVolumeSource> = None;
let mut value_azure_file: Option<crate::api::core::v1::AzureFilePersistentVolumeSource> = None;
let mut value_capacity: Option<std::collections::BTreeMap<String, crate::apimachinery::pkg::api::resource::Quantity>> = None;
let mut value_cephfs: Option<crate::api::core::v1::CephFSPersistentVolumeSource> = None;
let mut value_cinder: Option<crate::api::core::v1::CinderPersistentVolumeSource> = None;
let mut value_claim_ref: Option<crate::api::core::v1::ObjectReference> = None;
let mut value_csi: Option<crate::api::core::v1::CSIPersistentVolumeSource> = None;
let mut value_fc: Option<crate::api::core::v1::FCVolumeSource> = None;
let mut value_flex_volume: Option<crate::api::core::v1::FlexPersistentVolumeSource> = None;
let mut value_flocker: Option<crate::api::core::v1::FlockerVolumeSource> = None;
let mut value_gce_persistent_disk: Option<crate::api::core::v1::GCEPersistentDiskVolumeSource> = None;
let mut value_glusterfs: Option<crate::api::core::v1::GlusterfsVolumeSource> = None;
let mut value_host_path: Option<crate::api::core::v1::HostPathVolumeSource> = None;
let mut value_iscsi: Option<crate::api::core::v1::ISCSIPersistentVolumeSource> = None;
let mut value_local: Option<crate::api::core::v1::LocalVolumeSource> = None;
let mut value_mount_options: Option<Vec<String>> = None;
let mut value_nfs: Option<crate::api::core::v1::NFSVolumeSource> = None;
let mut value_node_affinity: Option<crate::api::core::v1::VolumeNodeAffinity> = None;
let mut value_persistent_volume_reclaim_policy: Option<String> = None;
let mut value_photon_persistent_disk: Option<crate::api::core::v1::PhotonPersistentDiskVolumeSource> = None;
let mut value_portworx_volume: Option<crate::api::core::v1::PortworxVolumeSource> = None;
let mut value_quobyte: Option<crate::api::core::v1::QuobyteVolumeSource> = None;
let mut value_rbd: Option<crate::api::core::v1::RBDPersistentVolumeSource> = None;
let mut value_scale_io: Option<crate::api::core::v1::ScaleIOPersistentVolumeSource> = None;
let mut value_storage_class_name: Option<String> = None;
let mut value_storageos: Option<crate::api::core::v1::StorageOSPersistentVolumeSource> = None;
let mut value_volume_mode: Option<String> = None;
let mut value_vsphere_volume: Option<crate::api::core::v1::VsphereVirtualDiskVolumeSource> = None;
while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_access_modes => value_access_modes = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_aws_elastic_block_store => value_aws_elastic_block_store = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_azure_disk => value_azure_disk = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_azure_file => value_azure_file = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_capacity => value_capacity = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_cephfs => value_cephfs = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_cinder => value_cinder = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_claim_ref => value_claim_ref = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_csi => value_csi = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_fc => value_fc = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_flex_volume => value_flex_volume = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_flocker => value_flocker = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_gce_persistent_disk => value_gce_persistent_disk = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_glusterfs => value_glusterfs = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_host_path => value_host_path = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_iscsi => value_iscsi = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_local => value_local = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_mount_options => value_mount_options = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_nfs => value_nfs = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_node_affinity => value_node_affinity = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_persistent_volume_reclaim_policy => value_persistent_volume_reclaim_policy = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_photon_persistent_disk => value_photon_persistent_disk = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_portworx_volume => value_portworx_volume = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_quobyte => value_quobyte = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_rbd => value_rbd = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_scale_io => value_scale_io = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_storage_class_name => value_storage_class_name = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_storageos => value_storageos = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_volume_mode => value_volume_mode = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_vsphere_volume => value_vsphere_volume = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(PersistentVolumeSpec {
access_modes: value_access_modes,
aws_elastic_block_store: value_aws_elastic_block_store,
azure_disk: value_azure_disk,
azure_file: value_azure_file,
capacity: value_capacity,
cephfs: value_cephfs,
cinder: value_cinder,
claim_ref: value_claim_ref,
csi: value_csi,
fc: value_fc,
flex_volume: value_flex_volume,
flocker: value_flocker,
gce_persistent_disk: value_gce_persistent_disk,
glusterfs: value_glusterfs,
host_path: value_host_path,
iscsi: value_iscsi,
local: value_local,
mount_options: value_mount_options,
nfs: value_nfs,
node_affinity: value_node_affinity,
persistent_volume_reclaim_policy: value_persistent_volume_reclaim_policy,
photon_persistent_disk: value_photon_persistent_disk,
portworx_volume: value_portworx_volume,
quobyte: value_quobyte,
rbd: value_rbd,
scale_io: value_scale_io,
storage_class_name: value_storage_class_name,
storageos: value_storageos,
volume_mode: value_volume_mode,
vsphere_volume: value_vsphere_volume,
})
}
}
deserializer.deserialize_struct(
"PersistentVolumeSpec",
&[
"accessModes",
"awsElasticBlockStore",
"azureDisk",
"azureFile",
"capacity",
"cephfs",
"cinder",
"claimRef",
"csi",
"fc",
"flexVolume",
"flocker",
"gcePersistentDisk",
"glusterfs",
"hostPath",
"iscsi",
"local",
"mountOptions",
"nfs",
"nodeAffinity",
"persistentVolumeReclaimPolicy",
"photonPersistentDisk",
"portworxVolume",
"quobyte",
"rbd",
"scaleIO",
"storageClassName",
"storageos",
"volumeMode",
"vsphereVolume",
],
Visitor,
)
}
}
impl crate::serde::Serialize for PersistentVolumeSpec {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
let mut state = serializer.serialize_struct(
"PersistentVolumeSpec",
self.access_modes.as_ref().map_or(0, |_| 1) +
self.aws_elastic_block_store.as_ref().map_or(0, |_| 1) +
self.azure_disk.as_ref().map_or(0, |_| 1) +
self.azure_file.as_ref().map_or(0, |_| 1) +
self.capacity.as_ref().map_or(0, |_| 1) +
self.cephfs.as_ref().map_or(0, |_| 1) +
self.cinder.as_ref().map_or(0, |_| 1) +
self.claim_ref.as_ref().map_or(0, |_| 1) +
self.csi.as_ref().map_or(0, |_| 1) +
self.fc.as_ref().map_or(0, |_| 1) +
self.flex_volume.as_ref().map_or(0, |_| 1) +
self.flocker.as_ref().map_or(0, |_| 1) +
self.gce_persistent_disk.as_ref().map_or(0, |_| 1) +
self.glusterfs.as_ref().map_or(0, |_| 1) +
self.host_path.as_ref().map_or(0, |_| 1) +
self.iscsi.as_ref().map_or(0, |_| 1) +
self.local.as_ref().map_or(0, |_| 1) +
self.mount_options.as_ref().map_or(0, |_| 1) +
self.nfs.as_ref().map_or(0, |_| 1) +
self.node_affinity.as_ref().map_or(0, |_| 1) +
self.persistent_volume_reclaim_policy.as_ref().map_or(0, |_| 1) +
self.photon_persistent_disk.as_ref().map_or(0, |_| 1) +
self.portworx_volume.as_ref().map_or(0, |_| 1) +
self.quobyte.as_ref().map_or(0, |_| 1) +
self.rbd.as_ref().map_or(0, |_| 1) +
self.scale_io.as_ref().map_or(0, |_| 1) +
self.storage_class_name.as_ref().map_or(0, |_| 1) +
self.storageos.as_ref().map_or(0, |_| 1) +
self.volume_mode.as_ref().map_or(0, |_| 1) +
self.vsphere_volume.as_ref().map_or(0, |_| 1),
)?;
if let Some(value) = &self.access_modes {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "accessModes", value)?;
}
if let Some(value) = &self.aws_elastic_block_store {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "awsElasticBlockStore", value)?;
}
if let Some(value) = &self.azure_disk {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "azureDisk", value)?;
}
if let Some(value) = &self.azure_file {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "azureFile", value)?;
}
if let Some(value) = &self.capacity {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "capacity", value)?;
}
if let Some(value) = &self.cephfs {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "cephfs", value)?;
}
if let Some(value) = &self.cinder {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "cinder", value)?;
}
if let Some(value) = &self.claim_ref {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "claimRef", value)?;
}
if let Some(value) = &self.csi {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "csi", value)?;
}
if let Some(value) = &self.fc {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "fc", value)?;
}
if let Some(value) = &self.flex_volume {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "flexVolume", value)?;
}
if let Some(value) = &self.flocker {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "flocker", value)?;
}
if let Some(value) = &self.gce_persistent_disk {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "gcePersistentDisk", value)?;
}
if let Some(value) = &self.glusterfs {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "glusterfs", value)?;
}
if let Some(value) = &self.host_path {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "hostPath", value)?;
}
if let Some(value) = &self.iscsi {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "iscsi", value)?;
}
if let Some(value) = &self.local {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "local", value)?;
}
if let Some(value) = &self.mount_options {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "mountOptions", value)?;
}
if let Some(value) = &self.nfs {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "nfs", value)?;
}
if let Some(value) = &self.node_affinity {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "nodeAffinity", value)?;
}
if let Some(value) = &self.persistent_volume_reclaim_policy {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "persistentVolumeReclaimPolicy", value)?;
}
if let Some(value) = &self.photon_persistent_disk {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "photonPersistentDisk", value)?;
}
if let Some(value) = &self.portworx_volume {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "portworxVolume", value)?;
}
if let Some(value) = &self.quobyte {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "quobyte", value)?;
}
if let Some(value) = &self.rbd {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "rbd", value)?;
}
if let Some(value) = &self.scale_io {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "scaleIO", value)?;
}
if let Some(value) = &self.storage_class_name {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "storageClassName", value)?;
}
if let Some(value) = &self.storageos {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "storageos", value)?;
}
if let Some(value) = &self.volume_mode {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "volumeMode", value)?;
}
if let Some(value) = &self.vsphere_volume {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "vsphereVolume", value)?;
}
crate::serde::ser::SerializeStruct::end(state)
}
}
#[cfg(feature = "schemars")]
impl crate::schemars::JsonSchema for PersistentVolumeSpec {
fn schema_name() -> String {
"io.k8s.api.core.v1.PersistentVolumeSpec".to_owned()
}
fn json_schema(__gen: &mut crate::schemars::gen::SchemaGenerator) -> crate::schemars::schema::Schema {
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("PersistentVolumeSpec is the specification of a persistent volume.".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Object))),
object: Some(Box::new(crate::schemars::schema::ObjectValidation {
properties: std::array::IntoIter::new([
(
"accessModes".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Array))),
array: Some(Box::new(crate::schemars::schema::ArrayValidation {
items: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
})
))),
..Default::default()
})),
..Default::default()
}),
),
(
"awsElasticBlockStore".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::AWSElasticBlockStoreVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"azureDisk".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::AzureDiskVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"azureFile".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::AzureFilePersistentVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("AzureFile represents an Azure File Service mount on the host and bind mount to the pod.".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"capacity".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Object))),
object: Some(Box::new(crate::schemars::schema::ObjectValidation {
additional_properties: Some(Box::new(__gen.subschema_for::<crate::apimachinery::pkg::api::resource::Quantity>())),
..Default::default()
})),
..Default::default()
}),
),
(
"cephfs".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::CephFSPersistentVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("CephFS represents a Ceph FS mount on the host that shares a pod's lifetime".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"cinder".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::CinderPersistentVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"claimRef".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::ObjectReference>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"csi".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::CSIPersistentVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("CSI represents storage that handled by an external CSI driver (Beta feature).".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"fc".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::FCVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"flexVolume".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::FlexPersistentVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"flocker".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::FlockerVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"gcePersistentDisk".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::GCEPersistentDiskVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"glusterfs".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::GlusterfsVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"hostPath".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::HostPathVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"iscsi".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::ISCSIPersistentVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"local".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::LocalVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Local represents directly-attached storage with node affinity".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"mountOptions".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Array))),
array: Some(Box::new(crate::schemars::schema::ArrayValidation {
items: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
})
))),
..Default::default()
})),
..Default::default()
}),
),
(
"nfs".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::NFSVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"nodeAffinity".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::VolumeNodeAffinity>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"persistentVolumeReclaimPolicy".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"photonPersistentDisk".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::PhotonPersistentDiskVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"portworxVolume".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::PortworxVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("PortworxVolume represents a portworx volume attached and mounted on kubelets host machine".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"quobyte".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::QuobyteVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Quobyte represents a Quobyte mount on the host that shares a pod's lifetime".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"rbd".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::RBDPersistentVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"scaleIO".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::ScaleIOPersistentVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"storageClassName".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"storageos".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::StorageOSPersistentVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"volumeMode".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"vsphereVolume".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::VsphereVirtualDiskVolumeSource>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
]).collect(),
..Default::default()
})),
..Default::default()
})
}
}
| 64.477381 | 461 | 0.544746 |
e45a4f1dccb7018720f721140950e7d90736e931
| 16,462 |
//! Functions on arrays.
use core::cmp::Ordering;
use num_traits::{FromPrimitive, One, Zero};
use crate::{
alloc::{vec, Vec},
error::AuxErrorInfo,
fns::{extract_array, extract_fn, extract_primitive},
CallContext, ErrorKind, EvalResult, NativeFn, SpannedValue, Value,
};
/// Function generating an array by mapping its indexes.
///
/// # Type
///
/// (using [`arithmetic-typing`](https://docs.rs/arithmetic-typing/) notation)
///
/// ```text
/// (Num, (Num) -> 'T) -> ['T]
/// ```
///
/// # Examples
///
/// ```
/// # use arithmetic_parser::grammars::{F32Grammar, Parse, Untyped};
/// # use arithmetic_eval::{fns, Environment, Value, VariableMap};
/// # fn main() -> anyhow::Result<()> {
/// let program = r#"array(3, |i| 2 * i + 1) == (1, 3, 5)"#;
/// let program = Untyped::<F32Grammar>::parse_statements(program)?;
///
/// let module = Environment::new()
/// .insert_native_fn("array", fns::Array)
/// .compile_module("test_array", &program)?;
/// assert_eq!(module.run()?, Value::Bool(true));
/// # Ok(())
/// # }
/// ```
#[derive(Debug, Clone, Copy, Default)]
pub struct Array;
impl<T> NativeFn<T> for Array
where
T: Clone + Zero + One,
{
fn evaluate<'a>(
&self,
mut args: Vec<SpannedValue<'a, T>>,
ctx: &mut CallContext<'_, 'a, T>,
) -> EvalResult<'a, T> {
ctx.check_args_count(&args, 2)?;
let generation_fn = extract_fn(
ctx,
args.pop().unwrap(),
"`array` requires second arg to be a generation function",
)?;
let len = extract_primitive(
ctx,
args.pop().unwrap(),
"`array` requires first arg to be a number",
)?;
let mut index = T::zero();
let mut array = vec![];
loop {
let next_index = ctx
.arithmetic()
.add(index.clone(), T::one())
.map_err(|err| ctx.call_site_error(ErrorKind::Arithmetic(err)))?;
let cmp = ctx.arithmetic().partial_cmp(&next_index, &len);
if matches!(cmp, Some(Ordering::Less) | Some(Ordering::Equal)) {
let spanned = ctx.apply_call_span(Value::Prim(index));
array.push(generation_fn.evaluate(vec![spanned], ctx)?);
index = next_index;
} else {
break;
}
}
Ok(Value::Tuple(array))
}
}
/// Function returning array / object length.
///
/// # Type
///
/// (using [`arithmetic-typing`](https://docs.rs/arithmetic-typing/) notation)
///
/// ```text
/// ([T]) -> Num
/// ```
///
/// # Examples
///
/// ```
/// # use arithmetic_parser::grammars::{F32Grammar, Parse, Untyped};
/// # use arithmetic_eval::{fns, Environment, Value, VariableMap};
/// # fn main() -> anyhow::Result<()> {
/// let program = r#"().len() == 0 && (1, 2, 3).len() == 3"#;
/// let program = Untyped::<F32Grammar>::parse_statements(program)?;
///
/// let module = Environment::new()
/// .insert_native_fn("len", fns::Len)
/// .compile_module("test_len", &program)?;
/// assert_eq!(module.run()?, Value::Bool(true));
/// # Ok(())
/// # }
/// ```
#[derive(Debug, Clone, Copy, Default)]
pub struct Len;
impl<T: FromPrimitive> NativeFn<T> for Len {
fn evaluate<'a>(
&self,
mut args: Vec<SpannedValue<'a, T>>,
ctx: &mut CallContext<'_, 'a, T>,
) -> EvalResult<'a, T> {
ctx.check_args_count(&args, 1)?;
let arg = args.pop().unwrap();
let len = match arg.extra {
Value::Tuple(array) => array.len(),
Value::Object(object) => object.len(),
_ => {
let err = ErrorKind::native("`len` requires object or tuple arg");
return Err(ctx
.call_site_error(err)
.with_span(&arg, AuxErrorInfo::InvalidArg));
}
};
let len = T::from_usize(len).ok_or_else(|| {
let err = ErrorKind::native("Cannot convert length to number");
ctx.call_site_error(err)
})?;
Ok(Value::Prim(len))
}
}
/// Map function that evaluates the provided function on each item of the tuple.
///
/// # Type
///
/// (using [`arithmetic-typing`](https://docs.rs/arithmetic-typing/) notation)
///
/// ```text
/// (['T; N], ('T) -> 'U) -> ['U; N]
/// ```
///
/// # Examples
///
/// ```
/// # use arithmetic_parser::grammars::{F32Grammar, Parse, Untyped};
/// # use arithmetic_eval::{fns, Environment, Value, VariableMap};
/// # fn main() -> anyhow::Result<()> {
/// let program = r#"
/// xs = (1, -2, 3, -0.3);
/// map(xs, |x| if(x > 0, x, 0)) == (1, 0, 3, 0)
/// "#;
/// let program = Untyped::<F32Grammar>::parse_statements(program)?;
///
/// let module = Environment::new()
/// .insert_native_fn("if", fns::If)
/// .insert_native_fn("map", fns::Map)
/// .compile_module("test_map", &program)?;
/// assert_eq!(module.run()?, Value::Bool(true));
/// # Ok(())
/// # }
/// ```
#[derive(Debug, Clone, Copy, Default)]
pub struct Map;
impl<T: Clone> NativeFn<T> for Map {
fn evaluate<'a>(
&self,
mut args: Vec<SpannedValue<'a, T>>,
ctx: &mut CallContext<'_, 'a, T>,
) -> EvalResult<'a, T> {
ctx.check_args_count(&args, 2)?;
let map_fn = extract_fn(
ctx,
args.pop().unwrap(),
"`map` requires second arg to be a mapping function",
)?;
let array = extract_array(
ctx,
args.pop().unwrap(),
"`map` requires first arg to be a tuple",
)?;
let mapped: Result<Vec<_>, _> = array
.into_iter()
.map(|value| {
let spanned = ctx.apply_call_span(value);
map_fn.evaluate(vec![spanned], ctx)
})
.collect();
mapped.map(Value::Tuple)
}
}
/// Filter function that evaluates the provided function on each item of the tuple and retains
/// only elements for which the function returned `true`.
///
/// # Type
///
/// (using [`arithmetic-typing`](https://docs.rs/arithmetic-typing/) notation)
///
/// ```text
/// (['T; N], ('T) -> Bool) -> ['T]
/// ```
///
/// # Examples
///
/// ```
/// # use arithmetic_parser::grammars::{F32Grammar, Parse, Untyped};
/// # use arithmetic_eval::{fns, Environment, Value, VariableMap};
/// # fn main() -> anyhow::Result<()> {
/// let program = r#"
/// xs = (1, -2, 3, -7, -0.3);
/// filter(xs, |x| x > -1) == (1, 3, -0.3)
/// "#;
/// let program = Untyped::<F32Grammar>::parse_statements(program)?;
///
/// let module = Environment::new()
/// .insert_native_fn("filter", fns::Filter)
/// .compile_module("test_filter", &program)?;
/// assert_eq!(module.run()?, Value::Bool(true));
/// # Ok(())
/// # }
/// ```
#[derive(Debug, Clone, Copy, Default)]
pub struct Filter;
impl<T: Clone> NativeFn<T> for Filter {
fn evaluate<'a>(
&self,
mut args: Vec<SpannedValue<'a, T>>,
ctx: &mut CallContext<'_, 'a, T>,
) -> EvalResult<'a, T> {
ctx.check_args_count(&args, 2)?;
let filter_fn = extract_fn(
ctx,
args.pop().unwrap(),
"`filter` requires second arg to be a filter function",
)?;
let array = extract_array(
ctx,
args.pop().unwrap(),
"`filter` requires first arg to be a tuple",
)?;
let mut filtered = vec![];
for value in array {
let spanned = ctx.apply_call_span(value.clone());
match filter_fn.evaluate(vec![spanned], ctx)? {
Value::Bool(true) => filtered.push(value),
Value::Bool(false) => { /* do nothing */ }
_ => {
let err = ErrorKind::native(
"`filter` requires filtering function to return booleans",
);
return Err(ctx.call_site_error(err));
}
}
}
Ok(Value::Tuple(filtered))
}
}
/// Reduce (aka fold) function that reduces the provided tuple to a single value.
///
/// # Type
///
/// (using [`arithmetic-typing`](https://docs.rs/arithmetic-typing/) notation)
///
/// ```text
/// (['T], 'Acc, ('Acc, 'T) -> 'Acc) -> 'Acc
/// ```
///
/// # Examples
///
/// ```
/// # use arithmetic_parser::grammars::{F32Grammar, Parse, Untyped};
/// # use arithmetic_eval::{fns, Environment, Value, VariableMap};
/// # fn main() -> anyhow::Result<()> {
/// let program = r#"
/// xs = (1, -2, 3, -7);
/// fold(xs, 1, |acc, x| acc * x) == 42
/// "#;
/// let program = Untyped::<F32Grammar>::parse_statements(program)?;
///
/// let module = Environment::new()
/// .insert_native_fn("fold", fns::Fold)
/// .compile_module("test_fold", &program)?;
/// assert_eq!(module.run()?, Value::Bool(true));
/// # Ok(())
/// # }
/// ```
#[derive(Debug, Clone, Copy, Default)]
pub struct Fold;
impl<T: Clone> NativeFn<T> for Fold {
fn evaluate<'a>(
&self,
mut args: Vec<SpannedValue<'a, T>>,
ctx: &mut CallContext<'_, 'a, T>,
) -> EvalResult<'a, T> {
ctx.check_args_count(&args, 3)?;
let fold_fn = extract_fn(
ctx,
args.pop().unwrap(),
"`fold` requires third arg to be a folding function",
)?;
let acc = args.pop().unwrap().extra;
let array = extract_array(
ctx,
args.pop().unwrap(),
"`fold` requires first arg to be a tuple",
)?;
array.into_iter().try_fold(acc, |acc, value| {
let spanned_args = vec![ctx.apply_call_span(acc), ctx.apply_call_span(value)];
fold_fn.evaluate(spanned_args, ctx)
})
}
}
/// Function that appends a value onto a tuple.
///
/// # Type
///
/// (using [`arithmetic-typing`](https://docs.rs/arithmetic-typing/) notation)
///
/// ```text
/// (['T; N], 'T) -> ['T; N + 1]
/// ```
///
/// # Examples
///
/// ```
/// # use arithmetic_parser::grammars::{F32Grammar, Parse, Untyped};
/// # use arithmetic_eval::{fns, Environment, Value, VariableMap};
/// # fn main() -> anyhow::Result<()> {
/// let program = r#"
/// repeat = |x, times| {
/// (_, acc) = (0, ()).while(
/// |(i, _)| i < times,
/// |(i, acc)| (i + 1, push(acc, x)),
/// );
/// acc
/// };
/// repeat(-2, 3) == (-2, -2, -2) &&
/// repeat((7,), 4) == ((7,), (7,), (7,), (7,))
/// "#;
/// let program = Untyped::<F32Grammar>::parse_statements(program)?;
///
/// let module = Environment::new()
/// .insert_native_fn("while", fns::While)
/// .insert_native_fn("push", fns::Push)
/// .compile_module("test_push", &program)?;
/// assert_eq!(module.run()?, Value::Bool(true));
/// # Ok(())
/// # }
/// ```
#[derive(Debug, Clone, Copy, Default)]
pub struct Push;
impl<T> NativeFn<T> for Push {
fn evaluate<'a>(
&self,
mut args: Vec<SpannedValue<'a, T>>,
ctx: &mut CallContext<'_, 'a, T>,
) -> EvalResult<'a, T> {
ctx.check_args_count(&args, 2)?;
let elem = args.pop().unwrap().extra;
let mut array = extract_array(
ctx,
args.pop().unwrap(),
"`fold` requires first arg to be a tuple",
)?;
array.push(elem);
Ok(Value::Tuple(array))
}
}
/// Function that merges two tuples.
///
/// # Type
///
/// (using [`arithmetic-typing`](https://docs.rs/arithmetic-typing/) notation)
///
/// ```text
/// (['T], ['T]) -> ['T]
/// ```
///
/// # Examples
///
/// ```
/// # use arithmetic_parser::grammars::{F32Grammar, Parse, Untyped};
/// # use arithmetic_eval::{fns, Environment, Value, VariableMap};
/// # fn main() -> anyhow::Result<()> {
/// let program = r#"
/// // Merges all arguments (which should be tuples) into a single tuple.
/// super_merge = |...xs| fold(xs, (), merge);
/// super_merge((1, 2), (3,), (), (4, 5, 6)) == (1, 2, 3, 4, 5, 6)
/// "#;
/// let program = Untyped::<F32Grammar>::parse_statements(program)?;
///
/// let module = Environment::new()
/// .insert_native_fn("fold", fns::Fold)
/// .insert_native_fn("merge", fns::Merge)
/// .compile_module("test_merge", &program)?;
/// assert_eq!(module.run()?, Value::Bool(true));
/// # Ok(())
/// # }
/// ```
#[derive(Debug, Clone, Copy, Default)]
pub struct Merge;
impl<T: Clone> NativeFn<T> for Merge {
fn evaluate<'a>(
&self,
mut args: Vec<SpannedValue<'a, T>>,
ctx: &mut CallContext<'_, 'a, T>,
) -> EvalResult<'a, T> {
ctx.check_args_count(&args, 2)?;
let second = extract_array(
ctx,
args.pop().unwrap(),
"`merge` requires second arg to be a tuple",
)?;
let mut first = extract_array(
ctx,
args.pop().unwrap(),
"`merge` requires first arg to be a tuple",
)?;
first.extend_from_slice(&second);
Ok(Value::Tuple(first))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
arith::{OrdArithmetic, StdArithmetic, WrappingArithmetic},
Environment, VariableMap,
};
use arithmetic_parser::grammars::{NumGrammar, NumLiteral, Parse, Untyped};
use assert_matches::assert_matches;
fn test_len_function<T: NumLiteral>(arithmetic: &dyn OrdArithmetic<T>)
where
Len: NativeFn<T>,
{
let code = r#"
(1, 2, 3).len() == 3 && ().len() == 0 &&
#{}.len() == 0 && #{ x: 1 }.len() == 1 && #{ x: 1, y: 2 }.len() == 2
"#;
let block = Untyped::<NumGrammar<T>>::parse_statements(code).unwrap();
let mut env = Environment::new();
let module = env
.insert("len", Value::native_fn(Len))
.compile_module("len", &block)
.unwrap();
let output = module.with_arithmetic(arithmetic).run().unwrap();
assert_matches!(output, Value::Bool(true));
}
#[test]
fn len_function_in_floating_point_arithmetic() {
test_len_function::<f32>(&StdArithmetic);
test_len_function::<f64>(&StdArithmetic);
}
#[test]
fn len_function_in_int_arithmetic() {
test_len_function::<u8>(&WrappingArithmetic);
test_len_function::<i8>(&WrappingArithmetic);
test_len_function::<u64>(&WrappingArithmetic);
test_len_function::<i64>(&WrappingArithmetic);
}
#[test]
fn len_function_with_number_overflow() {
let code = "xs.len()";
let block = Untyped::<NumGrammar<i8>>::parse_statements(code).unwrap();
let mut env = Environment::new();
let module = env
.insert("xs", Value::Tuple(vec![Value::Bool(true); 128]))
.insert("len", Value::native_fn(Len))
.compile_module("len", &block)
.unwrap();
let err = module
.with_arithmetic(&WrappingArithmetic)
.run()
.unwrap_err();
assert_matches!(
err.source().kind(),
ErrorKind::NativeCall(msg) if msg.contains("length to number")
);
}
#[test]
fn array_function_in_floating_point_arithmetic() {
let code = r#"
array(0, |_| 1) == () && array(-1, |_| 1) == () &&
array(0.1, |_| 1) == () && array(0.999, |_| 1) == () &&
array(1, |_| 1) == (1,) && array(1.5, |_| 1) == (1,) &&
array(2, |_| 1) == (1, 1) && array(3, |i| i) == (0, 1, 2)
"#;
let block = Untyped::<NumGrammar<f32>>::parse_statements(code).unwrap();
let mut env = Environment::new();
let module = env
.insert("array", Value::native_fn(Array))
.compile_module("array", &block)
.unwrap();
let output = module.with_arithmetic(&StdArithmetic).run().unwrap();
assert_matches!(output, Value::Bool(true));
}
#[test]
fn array_function_in_unsigned_int_arithmetic() {
let code = r#"
array(0, |_| 1) == () && array(1, |_| 1) == (1,) && array(3, |i| i) == (0, 1, 2)
"#;
let block = Untyped::<NumGrammar<u32>>::parse_statements(code).unwrap();
let mut env = Environment::new();
let module = env
.insert("array", Value::native_fn(Array))
.compile_module("array", &block)
.unwrap();
let output = module.with_arithmetic(&WrappingArithmetic).run().unwrap();
assert_matches!(output, Value::Bool(true));
}
}
| 30.150183 | 94 | 0.525635 |
72aed0d7529a5e65ad25b54e728963f2ac216a7b
| 1,784 |
use core::convert::TryFrom;
use hdk::{
self,
holochain_core_types::{
hash::HashString,
entry::{AppEntryValue, Entry},
},
error::{ZomeApiResult, ZomeApiError},
};
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetLinksLoadElement<T> {
pub address: HashString,
pub entry: T
}
pub type GetLinksLoadResult<T> = Vec<GetLinksLoadElement<T>>;
pub fn get_links_and_load<S: Into<String>>(
base: &HashString,
tag: S
) -> ZomeApiResult<GetLinksLoadResult<Entry>> {
hdk::get_links(base, tag)
.map(|result| {
result.addresses().iter()
.map(|address| {
hdk::get_entry(address.to_owned())
.map(|entry: Option<Entry>| {
GetLinksLoadElement{
address: address.to_owned(),
entry: entry.unwrap()
}
})
.ok()
})
.filter_map(|elem| elem)
.collect()
})
}
pub fn get_links_and_load_type<
S: Into<String>,
R: TryFrom<AppEntryValue>
>(
base: &HashString,
tag: S
) -> ZomeApiResult<GetLinksLoadResult<R>> {
let link_load_results = get_links_and_load(base, tag)?;
link_load_results.iter().map(|get_links_result| {
if let Entry::App(_, entry_value) = get_links_result.entry.clone() {
let entry = R::try_from(entry_value).map_err(|_| ZomeApiError::Internal("fail".to_string()))?;
Ok(GetLinksLoadElement::<R>{
entry: entry,
address: get_links_result.address.clone()
})
} else { Err(ZomeApiError::Internal("fail".to_string())) }
}).collect::<ZomeApiResult<GetLinksLoadResult<R>>>()
}
pub fn link_entries_bidir<S: Into<String>>(a: &HashString, b: &HashString, tag_a_b: &str, tag_b_a: S) -> ZomeApiResult<()> {
hdk::link_entries(a, b, tag_a_b)?;
hdk::link_entries(b, a, tag_b_a)?;
Ok(())
}
| 26.235294 | 124 | 0.63565 |
d5984b1bb2b94ab380d76e6f3f3be1fb4fd597b7
| 3,831 |
use bytes::{Buf, BufMut};
use crate::proto::coding::{self, BufExt, BufMutExt};
#[derive(Debug, PartialEq)]
pub enum Error {
Overflow,
UnexpectedEnd,
}
pub fn decode<B: Buf>(size: u8, buf: &mut B) -> Result<(u8, usize), Error> {
assert!(size <= 8);
let mut first = buf.get::<u8>()?;
// NOTE: following casts to u8 intend to trim the most significant bits, they are used as a
// workaround for shiftoverflow errors when size == 8.
let flags = ((first as usize) >> size) as u8;
let mask = 0xFF >> (8 - size);
first &= mask;
// if first < 2usize.pow(size) - 1
if first < mask {
return Ok((flags, first as usize));
}
let mut value = mask as usize;
let mut power = 0usize;
loop {
let byte = buf.get::<u8>()? as usize;
value += (byte & 127) << power;
power += 7;
if byte & 128 == 0 {
break;
}
if power >= MAX_POWER {
return Err(Error::Overflow);
}
}
Ok((flags, value))
}
pub fn encode<B: BufMut>(size: u8, flags: u8, value: usize, buf: &mut B) {
assert!(size <= 8);
// NOTE: following casts to u8 intend to trim the most significant bits, they are used as a
// workaround for shiftoverflow errors when size == 8.
let mask = !(0xFF << size) as u8;
let flags = ((flags as usize) << size) as u8;
// if value < 2usize.pow(size) - 1
if value < (mask as usize) {
buf.write(flags | value as u8);
return;
}
buf.write(mask | flags);
let mut remaining = value - mask as usize;
while remaining >= 128 {
let rest = (remaining % 128) as u8;
buf.write(rest + 128);
remaining /= 128;
}
buf.write(remaining as u8);
}
#[cfg(target_pointer_width = "64")]
const MAX_POWER: usize = 10 * 7;
#[cfg(target_pointer_width = "32")]
const MAX_POWER: usize = 5 * 7;
impl From<coding::UnexpectedEnd> for Error {
fn from(_: coding::UnexpectedEnd) -> Self {
Error::UnexpectedEnd
}
}
#[cfg(test)]
mod test {
use std::io::Cursor;
fn check_codec(size: u8, flags: u8, value: usize, data: &[u8]) {
let mut buf = Vec::new();
super::encode(size, flags, value, &mut buf);
assert_eq!(buf, data);
let mut read = Cursor::new(&buf);
assert_eq!((flags, value), super::decode(size, &mut read).unwrap());
}
#[test]
fn codec_5_bits() {
check_codec(5, 0b101, 10, &[0b1010_1010]);
check_codec(5, 0b101, 0, &[0b1010_0000]);
check_codec(5, 0b010, 1337, &[0b0101_1111, 154, 10]);
check_codec(5, 0b010, 31, &[0b0101_1111, 0]);
check_codec(
5,
0b010,
usize::max_value(),
&[95, 224, 255, 255, 255, 255, 255, 255, 255, 255, 1],
);
}
#[test]
fn codec_8_bits() {
check_codec(8, 0, 42, &[0b0010_1010]);
check_codec(8, 0, 424_242, &[255, 179, 240, 25]);
check_codec(
8,
0,
usize::max_value(),
&[255, 128, 254, 255, 255, 255, 255, 255, 255, 255, 1],
);
}
#[test]
#[should_panic]
fn size_too_big_value() {
let mut buf = vec![];
super::encode(9, 1, 1, &mut buf);
}
#[test]
#[should_panic]
fn size_too_big_of_size() {
let buf = vec![];
let mut read = Cursor::new(&buf);
super::decode(9, &mut read).unwrap();
}
#[cfg(target_pointer_width = "64")]
#[test]
fn overflow() {
let buf = vec![255, 128, 254, 255, 255, 255, 255, 255, 255, 255, 255, 1];
let mut read = Cursor::new(&buf);
assert!(super::decode(8, &mut read).is_err());
}
#[test]
fn number_never_ends_with_0x80() {
check_codec(4, 0b0001, 143, &[31, 128, 1]);
}
}
| 26.061224 | 95 | 0.535369 |
fe99c2e2ce0251e7c55448f48f13caf48b076a4d
| 30,348 |
//! Implementation of find-usages functionality.
//!
//! It is based on the standard ide trick: first, we run a fast text search to
//! get a super-set of matches. Then, we we confirm each match using precise
//! name resolution.
use std::{convert::TryInto, mem, sync::Arc};
use base_db::{FileId, FileRange, SourceDatabase, SourceDatabaseExt};
use hir::{
AsAssocItem, DefWithBody, HasAttrs, HasSource, InFile, ModuleSource, Semantics, Visibility,
};
use once_cell::unsync::Lazy;
use rustc_hash::FxHashMap;
use syntax::{ast, match_ast, AstNode, TextRange, TextSize};
use crate::{
defs::{Definition, NameClass, NameRefClass},
RootDatabase,
};
#[derive(Debug, Default, Clone)]
pub struct UsageSearchResult {
pub references: FxHashMap<FileId, Vec<FileReference>>,
}
impl UsageSearchResult {
pub fn is_empty(&self) -> bool {
self.references.is_empty()
}
pub fn len(&self) -> usize {
self.references.len()
}
pub fn iter(&self) -> impl Iterator<Item = (&FileId, &[FileReference])> + '_ {
self.references.iter().map(|(file_id, refs)| (file_id, &**refs))
}
pub fn file_ranges(&self) -> impl Iterator<Item = FileRange> + '_ {
self.references.iter().flat_map(|(&file_id, refs)| {
refs.iter().map(move |&FileReference { range, .. }| FileRange { file_id, range })
})
}
}
impl IntoIterator for UsageSearchResult {
type Item = (FileId, Vec<FileReference>);
type IntoIter = <FxHashMap<FileId, Vec<FileReference>> as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.references.into_iter()
}
}
#[derive(Debug, Clone)]
pub struct FileReference {
pub range: TextRange,
pub name: ast::NameLike,
pub category: Option<ReferenceCategory>,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum ReferenceCategory {
// FIXME: Add this variant and delete the `retain_adt_literal_usages` function.
// Create
Write,
Read,
// FIXME: Some day should be able to search in doc comments. Would probably
// need to switch from enum to bitflags then?
// DocComment
}
/// Generally, `search_scope` returns files that might contain references for the element.
/// For `pub(crate)` things it's a crate, for `pub` things it's a crate and dependant crates.
/// In some cases, the location of the references is known to within a `TextRange`,
/// e.g. for things like local variables.
#[derive(Clone, Debug)]
pub struct SearchScope {
entries: FxHashMap<FileId, Option<TextRange>>,
}
impl SearchScope {
fn new(entries: FxHashMap<FileId, Option<TextRange>>) -> SearchScope {
SearchScope { entries }
}
fn crate_graph(db: &RootDatabase) -> SearchScope {
let mut entries = FxHashMap::default();
let graph = db.crate_graph();
for krate in graph.iter() {
let root_file = graph[krate].root_file_id;
let source_root_id = db.file_source_root(root_file);
let source_root = db.source_root(source_root_id);
entries.extend(source_root.iter().map(|id| (id, None)));
}
SearchScope { entries }
}
fn reverse_dependencies(db: &RootDatabase, of: hir::Crate) -> SearchScope {
let mut entries = FxHashMap::default();
for rev_dep in of.transitive_reverse_dependencies(db) {
let root_file = rev_dep.root_file(db);
let source_root_id = db.file_source_root(root_file);
let source_root = db.source_root(source_root_id);
entries.extend(source_root.iter().map(|id| (id, None)));
}
SearchScope { entries }
}
fn krate(db: &RootDatabase, of: hir::Crate) -> SearchScope {
let root_file = of.root_file(db);
let source_root_id = db.file_source_root(root_file);
let source_root = db.source_root(source_root_id);
SearchScope {
entries: source_root.iter().map(|id| (id, None)).collect::<FxHashMap<_, _>>(),
}
}
fn module(db: &RootDatabase, module: hir::Module) -> SearchScope {
let mut entries = FxHashMap::default();
let mut to_visit = vec![module];
let mut is_first = true;
while let Some(module) = to_visit.pop() {
let src = module.definition_source(db);
let file_id = src.file_id.original_file(db);
match src.value {
ModuleSource::Module(m) => {
if is_first {
let range = Some(m.syntax().text_range());
entries.insert(file_id, range);
} else {
// We have already added the enclosing file to the search scope,
// so do nothing.
}
}
ModuleSource::BlockExpr(b) => {
if is_first {
let range = Some(b.syntax().text_range());
entries.insert(file_id, range);
} else {
// We have already added the enclosing file to the search scope,
// so do nothing.
}
}
ModuleSource::SourceFile(_) => {
entries.insert(file_id, None);
}
};
is_first = false;
to_visit.extend(module.children(db));
}
SearchScope { entries }
}
pub fn empty() -> SearchScope {
SearchScope::new(FxHashMap::default())
}
pub fn single_file(file: FileId) -> SearchScope {
SearchScope::new(std::iter::once((file, None)).collect())
}
pub fn file_range(range: FileRange) -> SearchScope {
SearchScope::new(std::iter::once((range.file_id, Some(range.range))).collect())
}
pub fn files(files: &[FileId]) -> SearchScope {
SearchScope::new(files.iter().map(|f| (*f, None)).collect())
}
pub fn intersection(&self, other: &SearchScope) -> SearchScope {
let (mut small, mut large) = (&self.entries, &other.entries);
if small.len() > large.len() {
mem::swap(&mut small, &mut large)
}
let res = small
.iter()
.filter_map(|(file_id, r1)| {
let r2 = large.get(file_id)?;
let r = intersect_ranges(*r1, *r2)?;
Some((*file_id, r))
})
.collect();
return SearchScope::new(res);
fn intersect_ranges(
r1: Option<TextRange>,
r2: Option<TextRange>,
) -> Option<Option<TextRange>> {
match (r1, r2) {
(None, r) | (r, None) => Some(r),
(Some(r1), Some(r2)) => {
let r = r1.intersect(r2)?;
Some(Some(r))
}
}
}
}
}
impl IntoIterator for SearchScope {
type Item = (FileId, Option<TextRange>);
type IntoIter = std::collections::hash_map::IntoIter<FileId, Option<TextRange>>;
fn into_iter(self) -> Self::IntoIter {
self.entries.into_iter()
}
}
impl Definition {
fn search_scope(&self, db: &RootDatabase) -> SearchScope {
let _p = profile::span("search_scope");
if let Definition::BuiltinType(_) = self {
return SearchScope::crate_graph(db);
}
// def is crate root
// FIXME: We don't do searches for crates currently, as a crate does not actually have a single name
if let &Definition::Module(module) = self {
if module.is_crate_root(db) {
return SearchScope::reverse_dependencies(db, module.krate());
}
}
let module = match self.module(db) {
Some(it) => it,
None => return SearchScope::empty(),
};
let InFile { file_id, value: module_source } = module.definition_source(db);
let file_id = file_id.original_file(db);
if let Definition::Local(var) = self {
let def = match var.parent(db) {
DefWithBody::Function(f) => f.source(db).map(|src| src.syntax().cloned()),
DefWithBody::Const(c) => c.source(db).map(|src| src.syntax().cloned()),
DefWithBody::Static(s) => s.source(db).map(|src| src.syntax().cloned()),
};
return match def {
Some(def) => SearchScope::file_range(def.as_ref().original_file_range(db)),
None => SearchScope::single_file(file_id),
};
}
if let Definition::SelfType(impl_) = self {
return match impl_.source(db).map(|src| src.syntax().cloned()) {
Some(def) => SearchScope::file_range(def.as_ref().original_file_range(db)),
None => SearchScope::single_file(file_id),
};
}
if let Definition::GenericParam(hir::GenericParam::LifetimeParam(param)) = self {
let def = match param.parent(db) {
hir::GenericDef::Function(it) => it.source(db).map(|src| src.syntax().cloned()),
hir::GenericDef::Adt(it) => it.source(db).map(|src| src.syntax().cloned()),
hir::GenericDef::Trait(it) => it.source(db).map(|src| src.syntax().cloned()),
hir::GenericDef::TypeAlias(it) => it.source(db).map(|src| src.syntax().cloned()),
hir::GenericDef::Impl(it) => it.source(db).map(|src| src.syntax().cloned()),
hir::GenericDef::Variant(it) => it.source(db).map(|src| src.syntax().cloned()),
hir::GenericDef::Const(it) => it.source(db).map(|src| src.syntax().cloned()),
};
return match def {
Some(def) => SearchScope::file_range(def.as_ref().original_file_range(db)),
None => SearchScope::single_file(file_id),
};
}
if let Definition::Macro(macro_def) = self {
return match macro_def.kind(db) {
hir::MacroKind::Declarative => {
if macro_def.attrs(db).by_key("macro_export").exists() {
SearchScope::reverse_dependencies(db, module.krate())
} else {
SearchScope::krate(db, module.krate())
}
}
hir::MacroKind::BuiltIn => SearchScope::crate_graph(db),
// FIXME: We don't actually see derives in derive attributes as these do not
// expand to something that references the derive macro in the output.
// We could get around this by emitting dummy `use DeriveMacroPathHere as _;` items maybe?
hir::MacroKind::Derive | hir::MacroKind::Attr | hir::MacroKind::ProcMacro => {
SearchScope::reverse_dependencies(db, module.krate())
}
};
}
let vis = self.visibility(db);
if let Some(Visibility::Public) = vis {
return SearchScope::reverse_dependencies(db, module.krate());
}
if let Some(Visibility::Module(module)) = vis {
return SearchScope::module(db, module.into());
}
let range = match module_source {
ModuleSource::Module(m) => Some(m.syntax().text_range()),
ModuleSource::BlockExpr(b) => Some(b.syntax().text_range()),
ModuleSource::SourceFile(_) => None,
};
match range {
Some(range) => SearchScope::file_range(FileRange { file_id, range }),
None => SearchScope::single_file(file_id),
}
}
pub fn usages<'a>(self, sema: &'a Semantics<RootDatabase>) -> FindUsages<'a> {
FindUsages {
local_repr: match self {
Definition::Local(local) => Some(local.representative(sema.db)),
_ => None,
},
def: self,
sema,
scope: None,
include_self_kw_refs: None,
search_self_mod: false,
}
}
}
#[derive(Clone)]
pub struct FindUsages<'a> {
def: Definition,
sema: &'a Semantics<'a, RootDatabase>,
scope: Option<SearchScope>,
include_self_kw_refs: Option<hir::Type>,
local_repr: Option<hir::Local>,
search_self_mod: bool,
}
impl<'a> FindUsages<'a> {
/// Enable searching for `Self` when the definition is a type or `self` for modules.
pub fn include_self_refs(mut self) -> FindUsages<'a> {
self.include_self_kw_refs = def_to_ty(self.sema, &self.def);
self.search_self_mod = true;
self
}
pub fn in_scope(self, scope: SearchScope) -> FindUsages<'a> {
self.set_scope(Some(scope))
}
pub fn set_scope(mut self, scope: Option<SearchScope>) -> FindUsages<'a> {
assert!(self.scope.is_none());
self.scope = scope;
self
}
pub fn at_least_one(&self) -> bool {
let mut found = false;
self.search(&mut |_, _| {
found = true;
true
});
found
}
pub fn all(self) -> UsageSearchResult {
let mut res = UsageSearchResult::default();
self.search(&mut |file_id, reference| {
res.references.entry(file_id).or_default().push(reference);
false
});
res
}
fn search(&self, sink: &mut dyn FnMut(FileId, FileReference) -> bool) {
let _p = profile::span("FindUsages:search");
let sema = self.sema;
let search_scope = {
let base = self.def.search_scope(sema.db);
match &self.scope {
None => base,
Some(scope) => base.intersection(scope),
}
};
let name = match self.def {
// special case crate modules as these do not have a proper name
Definition::Module(module) if module.is_crate_root(self.sema.db) => {
// FIXME: This assumes the crate name is always equal to its display name when it really isn't
module
.krate()
.display_name(self.sema.db)
.map(|crate_name| crate_name.crate_name().as_smol_str().clone())
}
_ => {
let self_kw_refs = || {
self.include_self_kw_refs.as_ref().and_then(|ty| {
ty.as_adt()
.map(|adt| adt.name(self.sema.db))
.or_else(|| ty.as_builtin().map(|builtin| builtin.name()))
})
};
self.def.name(sema.db).or_else(self_kw_refs).map(|it| it.to_smol_str())
}
};
let name = match &name {
Some(s) => s.as_str(),
None => return,
};
// these can't be closures because rust infers the lifetimes wrong ...
fn match_indices<'a>(
text: &'a str,
name: &'a str,
search_range: TextRange,
) -> impl Iterator<Item = TextSize> + 'a {
text.match_indices(name).filter_map(move |(idx, _)| {
let offset: TextSize = idx.try_into().unwrap();
if !search_range.contains_inclusive(offset) {
return None;
}
Some(offset)
})
}
fn scope_files<'a>(
sema: &'a Semantics<RootDatabase>,
scope: &'a SearchScope,
) -> impl Iterator<Item = (Arc<String>, FileId, TextRange)> + 'a {
scope.entries.iter().map(|(&file_id, &search_range)| {
let text = sema.db.file_text(file_id);
let search_range =
search_range.unwrap_or_else(|| TextRange::up_to(TextSize::of(text.as_str())));
(text, file_id, search_range)
})
}
for (text, file_id, search_range) in scope_files(sema, &search_scope) {
let tree = Lazy::new(move || sema.parse(file_id).syntax().clone());
// Search for occurrences of the items name
for offset in match_indices(&text, name, search_range) {
for name in sema.find_nodes_at_offset_with_descend(&tree, offset) {
if match name {
ast::NameLike::NameRef(name_ref) => self.found_name_ref(&name_ref, sink),
ast::NameLike::Name(name) => self.found_name(&name, sink),
ast::NameLike::Lifetime(lifetime) => self.found_lifetime(&lifetime, sink),
} {
return;
}
}
}
// Search for occurrences of the `Self` referring to our type
if let Some(self_ty) = &self.include_self_kw_refs {
for offset in match_indices(&text, "Self", search_range) {
for name_ref in sema.find_nodes_at_offset_with_descend(&tree, offset) {
if self.found_self_ty_name_ref(self_ty, &name_ref, sink) {
return;
}
}
}
}
}
// Search for `super` and `crate` resolving to our module
match self.def {
Definition::Module(module) => {
let scope = search_scope.intersection(&SearchScope::module(self.sema.db, module));
let is_crate_root = module.is_crate_root(self.sema.db);
for (text, file_id, search_range) in scope_files(sema, &scope) {
let tree = Lazy::new(move || sema.parse(file_id).syntax().clone());
for offset in match_indices(&text, "super", search_range) {
for name_ref in sema.find_nodes_at_offset_with_descend(&tree, offset) {
if self.found_name_ref(&name_ref, sink) {
return;
}
}
}
if is_crate_root {
for offset in match_indices(&text, "crate", search_range) {
for name_ref in sema.find_nodes_at_offset_with_descend(&tree, offset) {
if self.found_name_ref(&name_ref, sink) {
return;
}
}
}
}
}
}
_ => (),
}
// search for module `self` references in our module's definition source
match self.def {
Definition::Module(module) if self.search_self_mod => {
let src = module.definition_source(sema.db);
let file_id = src.file_id.original_file(sema.db);
let (file_id, search_range) = match src.value {
ModuleSource::Module(m) => (file_id, Some(m.syntax().text_range())),
ModuleSource::BlockExpr(b) => (file_id, Some(b.syntax().text_range())),
ModuleSource::SourceFile(_) => (file_id, None),
};
let search_range = if let Some(&range) = search_scope.entries.get(&file_id) {
match (range, search_range) {
(None, range) | (range, None) => range,
(Some(range), Some(search_range)) => match range.intersect(search_range) {
Some(range) => Some(range),
None => return,
},
}
} else {
return;
};
let text = sema.db.file_text(file_id);
let search_range =
search_range.unwrap_or_else(|| TextRange::up_to(TextSize::of(text.as_str())));
let tree = Lazy::new(|| sema.parse(file_id).syntax().clone());
for offset in match_indices(&text, "self", search_range) {
for name_ref in sema.find_nodes_at_offset_with_descend(&tree, offset) {
if self.found_self_module_name_ref(&name_ref, sink) {
return;
}
}
}
}
_ => {}
}
}
fn found_self_ty_name_ref(
&self,
self_ty: &hir::Type,
name_ref: &ast::NameRef,
sink: &mut dyn FnMut(FileId, FileReference) -> bool,
) -> bool {
match NameRefClass::classify(self.sema, name_ref) {
Some(NameRefClass::Definition(Definition::SelfType(impl_)))
if impl_.self_ty(self.sema.db) == *self_ty =>
{
let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
let reference = FileReference {
range,
name: ast::NameLike::NameRef(name_ref.clone()),
category: None,
};
sink(file_id, reference)
}
_ => false,
}
}
fn found_self_module_name_ref(
&self,
name_ref: &ast::NameRef,
sink: &mut dyn FnMut(FileId, FileReference) -> bool,
) -> bool {
match NameRefClass::classify(self.sema, name_ref) {
Some(NameRefClass::Definition(def @ Definition::Module(_))) if def == self.def => {
let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
let reference = FileReference {
range,
name: ast::NameLike::NameRef(name_ref.clone()),
category: None,
};
sink(file_id, reference)
}
_ => false,
}
}
fn found_lifetime(
&self,
lifetime: &ast::Lifetime,
sink: &mut dyn FnMut(FileId, FileReference) -> bool,
) -> bool {
match NameRefClass::classify_lifetime(self.sema, lifetime) {
Some(NameRefClass::Definition(def)) if def == self.def => {
let FileRange { file_id, range } = self.sema.original_range(lifetime.syntax());
let reference = FileReference {
range,
name: ast::NameLike::Lifetime(lifetime.clone()),
category: None,
};
sink(file_id, reference)
}
_ => false,
}
}
fn found_name_ref(
&self,
name_ref: &ast::NameRef,
sink: &mut dyn FnMut(FileId, FileReference) -> bool,
) -> bool {
match NameRefClass::classify(self.sema, name_ref) {
Some(NameRefClass::Definition(def @ Definition::Local(local)))
if matches!(
self.local_repr, Some(repr) if repr == local.representative(self.sema.db)
) =>
{
let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
let reference = FileReference {
range,
name: ast::NameLike::NameRef(name_ref.clone()),
category: ReferenceCategory::new(&def, name_ref),
};
sink(file_id, reference)
}
Some(NameRefClass::Definition(def)) if def == self.def => {
let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
let reference = FileReference {
range,
name: ast::NameLike::NameRef(name_ref.clone()),
category: ReferenceCategory::new(&def, name_ref),
};
sink(file_id, reference)
}
Some(NameRefClass::Definition(def)) if self.include_self_kw_refs.is_some() => {
if self.include_self_kw_refs == def_to_ty(self.sema, &def) {
let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
let reference = FileReference {
range,
name: ast::NameLike::NameRef(name_ref.clone()),
category: ReferenceCategory::new(&def, name_ref),
};
sink(file_id, reference)
} else {
false
}
}
Some(NameRefClass::FieldShorthand { local_ref: local, field_ref: field }) => {
let field = Definition::Field(field);
let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
let access = match self.def {
Definition::Field(_) if field == self.def => {
ReferenceCategory::new(&field, name_ref)
}
Definition::Local(_) if matches!(self.local_repr, Some(repr) if repr == local.representative(self.sema.db)) => {
ReferenceCategory::new(&Definition::Local(local), name_ref)
}
_ => return false,
};
let reference = FileReference {
range,
name: ast::NameLike::NameRef(name_ref.clone()),
category: access,
};
sink(file_id, reference)
}
_ => false,
}
}
fn found_name(
&self,
name: &ast::Name,
sink: &mut dyn FnMut(FileId, FileReference) -> bool,
) -> bool {
match NameClass::classify(self.sema, name) {
Some(NameClass::PatFieldShorthand { local_def: _, field_ref })
if matches!(
self.def, Definition::Field(_) if Definition::Field(field_ref) == self.def
) =>
{
let FileRange { file_id, range } = self.sema.original_range(name.syntax());
let reference = FileReference {
range,
name: ast::NameLike::Name(name.clone()),
// FIXME: mutable patterns should have `Write` access
category: Some(ReferenceCategory::Read),
};
sink(file_id, reference)
}
Some(NameClass::ConstReference(def)) if self.def == def => {
let FileRange { file_id, range } = self.sema.original_range(name.syntax());
let reference = FileReference {
range,
name: ast::NameLike::Name(name.clone()),
category: None,
};
sink(file_id, reference)
}
Some(NameClass::Definition(def @ Definition::Local(local))) if def != self.def => {
if matches!(
self.local_repr,
Some(repr) if local.representative(self.sema.db) == repr
) {
let FileRange { file_id, range } = self.sema.original_range(name.syntax());
let reference = FileReference {
range,
name: ast::NameLike::Name(name.clone()),
category: None,
};
return sink(file_id, reference);
}
false
}
// Resolve trait impl function definitions to the trait definition's version if self.def is the trait definition's
Some(NameClass::Definition(def)) if def != self.def => {
/* poor man's try block */
(|| {
let this_trait = self
.def
.as_assoc_item(self.sema.db)?
.containing_trait_or_trait_impl(self.sema.db)?;
let trait_ = def
.as_assoc_item(self.sema.db)?
.containing_trait_or_trait_impl(self.sema.db)?;
(trait_ == this_trait && self.def.name(self.sema.db) == def.name(self.sema.db))
.then(|| {
let FileRange { file_id, range } =
self.sema.original_range(name.syntax());
let reference = FileReference {
range,
name: ast::NameLike::Name(name.clone()),
category: None,
};
sink(file_id, reference)
})
})()
.unwrap_or(false)
}
_ => false,
}
}
}
fn def_to_ty(sema: &Semantics<RootDatabase>, def: &Definition) -> Option<hir::Type> {
match def {
Definition::Adt(adt) => Some(adt.ty(sema.db)),
Definition::TypeAlias(it) => Some(it.ty(sema.db)),
Definition::BuiltinType(it) => Some(it.ty(sema.db)),
Definition::SelfType(it) => Some(it.self_ty(sema.db)),
_ => None,
}
}
impl ReferenceCategory {
fn new(def: &Definition, r: &ast::NameRef) -> Option<ReferenceCategory> {
// Only Locals and Fields have accesses for now.
if !matches!(def, Definition::Local(_) | Definition::Field(_)) {
return None;
}
let mode = r.syntax().ancestors().find_map(|node| {
match_ast! {
match node {
ast::BinExpr(expr) => {
if matches!(expr.op_kind()?, ast::BinaryOp::Assignment { .. }) {
// If the variable or field ends on the LHS's end then it's a Write (covers fields and locals).
// FIXME: This is not terribly accurate.
if let Some(lhs) = expr.lhs() {
if lhs.syntax().text_range().end() == r.syntax().text_range().end() {
return Some(ReferenceCategory::Write);
}
}
}
Some(ReferenceCategory::Read)
},
_ => None
}
}
});
// Default Locals and Fields to read
mode.or(Some(ReferenceCategory::Read))
}
}
| 39.209302 | 132 | 0.509984 |
719e447219b70cd761e754668092a9937058bd00
| 2,910 |
// Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! Common methods needed in subtle implementations.
use crate::TinkError;
use digest::Digest;
use subtle::ConstantTimeEq;
use tink_proto::HashType;
mod hkdf;
pub use self::hkdf::*;
pub mod random;
/// Return the digest size of the specified hash algorithm.
pub fn get_hash_digest_size(hash: HashType) -> Result<usize, TinkError> {
match hash {
HashType::Sha1 => Ok(20),
HashType::Sha224 => Ok(28),
HashType::Sha256 => Ok(32),
HashType::Sha384 => Ok(48),
HashType::Sha512 => Ok(64),
_ => Err("invalid hash algorithm".into()),
}
}
/// Hash function object.
pub enum HashFunc {
Sha1(sha1::Sha1),
Sha224(sha2::Sha224),
Sha256(sha2::Sha256),
Sha384(sha2::Sha384),
Sha512(sha2::Sha512),
}
/// Return the corresponding hash function of the given hash.
pub fn get_hash_func(hash: HashType) -> Option<HashFunc> {
match hash {
HashType::Sha1 => Some(HashFunc::Sha1(sha1::Sha1::new())),
HashType::Sha224 => Some(HashFunc::Sha224(sha2::Sha224::new())),
HashType::Sha256 => Some(HashFunc::Sha256(sha2::Sha256::new())),
HashType::Sha384 => Some(HashFunc::Sha384(sha2::Sha384::new())),
HashType::Sha512 => Some(HashFunc::Sha512(sha2::Sha512::new())),
_ => None,
}
}
/// Calculate a hash of the given data using the given hash function.
pub fn compute_hash(hash_fn: &mut HashFunc, data: &[u8]) -> Result<Vec<u8>, TinkError> {
Ok(match hash_fn {
HashFunc::Sha1(h) => compute_hash_with(h, data),
HashFunc::Sha224(h) => compute_hash_with(h, data),
HashFunc::Sha256(h) => compute_hash_with(h, data),
HashFunc::Sha384(h) => compute_hash_with(h, data),
HashFunc::Sha512(h) => compute_hash_with(h, data),
})
}
/// Calculate a hash of the given data with the given hash function.
fn compute_hash_with<T>(hash_func: &mut T, data: &[u8]) -> Vec<u8>
where
T: digest::Digest,
{
hash_func.reset();
hash_func.update(data);
hash_func.finalize_reset().to_vec()
}
/// Compare two slices in constant time. Return `true` if they are equal, `false` otherwise.
pub fn constant_time_compare(left: &[u8], right: &[u8]) -> bool {
left.ct_eq(right).into()
}
| 33.837209 | 92 | 0.644674 |
f7a11cc0ac7507186197baff9c0c4c483a7b697e
| 12,041 |
// This module implements the definition of the command line app.
//
// It must not have any other imports as also the build.rs file to
// automatically generate the completion scripts.
use clap::{App, AppSettings, Arg, ArgGroup, Shell};
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
pub const ABOUT: &str = "The official Sentry Relay.";
pub fn make_app() -> App<'static, 'static> {
App::new("relay")
.global_setting(AppSettings::UnifiedHelpMessage)
.global_setting(AppSettings::DisableHelpSubcommand)
.setting(AppSettings::SubcommandRequiredElseHelp)
.setting(AppSettings::GlobalVersion)
.max_term_width(79)
.help_message("Print this help message.")
.version(VERSION)
.version_message("Print version information.")
.about(ABOUT)
.arg(
Arg::with_name("config")
.value_name("CONFIG")
.long("config")
.short("c")
.global(true)
.help("The path to the config folder."),
)
.subcommand(
App::new("run")
.about("Run the relay")
.after_help(
"This runs the relay in the foreground until it's shut down. It will bind \
to the port and network interface configured in the config file.",
)
.arg(
Arg::with_name("secret_key")
.long("secret-key")
.short("s")
.value_name("KEY")
.requires("public_key")
.help("The secret key to set"),
)
.arg(
Arg::with_name("public_key")
.long("public-key")
.short("p")
.value_name("KEY")
.requires("secret_key")
.help("The public key to set"),
)
.arg(
Arg::with_name("id")
.long("id")
.short("i")
.value_name("RELAY_ID")
.help("The relay ID to set"),
)
.arg(
Arg::with_name("upstream")
.value_name("UPSTREAM_URL")
.takes_value(true)
.short("u")
.long("upstream")
.help("The upstream server URL."),
)
.arg(
Arg::with_name("host")
.value_name("HOST")
.takes_value(true)
.short("H")
.long("host")
.help("The host dns name."),
)
.arg(
Arg::with_name("port")
.value_name("PORT")
.takes_value(true)
.short("P")
.long("port")
.help("The server port."),
)
.arg(
Arg::with_name("processing")
.long("processing")
.help("Enable processing."),
)
.arg(
Arg::with_name("no_processing")
.long("no-processing")
.help("Disable processing."),
)
.group(
ArgGroup::with_name("processing_group")
.args(&["processing", "no_processing"])
.multiple(false),
)
.arg(
Arg::with_name("kafka_broker_url")
.value_name("KAFKA_BROKER_URL")
.takes_value(true)
.long("kafka-broker-url")
.help("Kafka broker URL."),
)
.arg(
Arg::with_name("redis_url")
.value_name("REDIS_URL")
.takes_value(true)
.long("redis-url")
.help("Redis server URL."),
)
.arg(
Arg::with_name("source_id")
.value_name("SOURCE_ID")
.takes_value(true)
.long("source-id")
.env("RELAY_SOURCE_ID")
.help("Names the current relay in the outcome source."),
),
)
.subcommand(
App::new("credentials")
.setting(AppSettings::SubcommandRequiredElseHelp)
.about("Manage the relay credentials")
.after_help(
"This command can be used to manage the stored credentials of \
the relay. These credentials are used to authenticate with the \
upstream sentry. A sentry organization trusts a certain public \
key and each relay is identified with a unique relay ID.\n\
\n\
Multiple relays can share the same public/secret key pair for as \
long as they use different relay IDs. Once a relay (as identified \
by the ID) has signed in with a certain key it cannot be changed \
any more.",
)
.subcommand(
App::new("generate")
.about("Generate new credentials")
.after_help(
"This generates new credentials for the relay and stores \
them. In case the relay already has credentials stored \
this command will error unless the '--overwrite' option \
has been passed.",
)
.arg(
Arg::with_name("overwrite")
.long("overwrite")
.help("Overwrite already existing credentials instead of failing"),
)
.arg(
Arg::with_name("stdout")
.long("stdout")
.help("Write credentials to stdout instead of credentials.json"),
),
)
.subcommand(
App::new("remove")
.about("Remove credentials")
.after_help(
"This command removes already stored credentials from the \
relay.",
)
.arg(
Arg::with_name("yes")
.long("yes")
.help("Do not prompt for confirmation"),
),
)
.subcommand(
App::new("show")
.about("Show currently stored credentials.")
.after_help("This prints out the agent ID and public key."),
)
.subcommand(
App::new("set")
.about("Set new credentials")
.after_help(
"Credentials can be stored by providing them on the command \
line. If just an agent id (or secret/public key pair) is \
provided that part of the credentials are overwritten. If \
no credentials are stored yet at all and no parameters are \
supplied the command will prompt for the appropriate values.",
)
.arg(
Arg::with_name("secret_key")
.long("secret-key")
.short("s")
.value_name("KEY")
.requires("public_key")
.help("The secret key to set"),
)
.arg(
Arg::with_name("public_key")
.long("public-key")
.short("p")
.value_name("KEY")
.requires("secret_key")
.help("The public key to set"),
)
.arg(
Arg::with_name("id")
.long("id")
.short("i")
.value_name("RELAY_ID")
.help("The relay ID to set"),
),
),
)
.subcommand(
App::new("config")
.about("Manage the relay config")
.after_help(
"This command provides basic config management. It can be \
used primarily to initialize a new relay config and to \
print out the current config.",
)
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand(
App::new("init")
.about("Initialize a new relay config")
.after_help(
"For new relay installations this will guide through \
the initial config process and create the necessary \
files. It will create an initial config as well as \
set of credentials.",
),
)
.subcommand(
App::new("show")
.about("Show the entire config out for debugging purposes")
.after_help(
"This dumps out the entire config including the values \
which are not in the config file but filled in from \
defaults. The default output format is YAML but \
a debug format can also be specific which is useful \
to understand how the relay interprets the individual \
values.",
)
.arg(
Arg::with_name("format")
.short("f")
.long("format")
.possible_values(&["debug", "yaml"])
.default_value("yaml")
.help("The output format"),
),
),
)
.subcommand(
App::new("generate-completions")
.about("Generate shell completion file")
.after_help(
"This generates a completions file for the shell of choice. \
The default selection will be an educated guess for the currently \
running shell.",
)
.arg(
Arg::with_name("format")
.short("f")
.long("format")
.value_name("FORMAT")
.possible_values(&Shell::variants()[..])
.help(
"Explicitly pick the shell to generate a completion file \
for. The default is autodetection",
),
),
)
}
| 44.596296 | 99 | 0.390167 |
e44067f73a3f9967164249e91ad633530035ab05
| 1,379 |
use crate::plan::barriers::NoBarrier;
use crate::plan::mutator_context::Mutator;
use crate::plan::mutator_context::MutatorConfig;
use crate::plan::nogc::NoGC;
use crate::plan::AllocationSemantics as AllocationType;
use crate::util::alloc::allocators::{AllocatorSelector, Allocators};
use crate::util::OpaquePointer;
use crate::vm::VMBinding;
use enum_map::enum_map;
use enum_map::EnumMap;
lazy_static! {
pub static ref ALLOCATOR_MAPPING: EnumMap<AllocationType, AllocatorSelector> = enum_map! {
AllocationType::Default | AllocationType::Immortal | AllocationType::Code | AllocationType::ReadOnly | AllocationType::Los => AllocatorSelector::BumpPointer(0),
};
}
pub fn nogc_mutator_noop<VM: VMBinding>(_mutator: &mut Mutator<NoGC<VM>>, _tls: OpaquePointer) {
unreachable!();
}
pub fn create_nogc_mutator<VM: VMBinding>(
mutator_tls: OpaquePointer,
plan: &'static NoGC<VM>,
) -> Mutator<NoGC<VM>> {
let config = MutatorConfig {
allocator_mapping: &*ALLOCATOR_MAPPING,
space_mapping: box vec![(AllocatorSelector::BumpPointer(0), &plan.nogc_space)],
prepare_func: &nogc_mutator_noop,
release_func: &nogc_mutator_noop,
};
Mutator {
allocators: Allocators::<VM>::new(mutator_tls, plan, &config.space_mapping),
barrier: box NoBarrier,
mutator_tls,
config,
plan,
}
}
| 33.634146 | 168 | 0.707034 |
c1de59afaa6abf3ad75d326f773803e491cc0903
| 5,981 |
use crate::cosmos_entity::{add_as_partition_key_header_serialized2, serialize_partition_key};
use crate::headers::from_headers::*;
use crate::prelude::*;
use crate::resources::document::DocumentAttributes;
use crate::ResourceQuota;
use azure_core::headers::{etag_from_headers, session_token_from_headers};
use azure_core::prelude::*;
use chrono::{DateTime, Utc};
use http::StatusCode;
use serde::Serialize;
use std::convert::TryFrom;
use azure_core::{collect_pinned_stream, Request as HttpRequest, Response as HttpResponse};
#[derive(Debug, Clone)]
pub struct CreateDocumentOptions {
is_upsert: IsUpsert,
indexing_directive: IndexingDirective,
if_match_condition: Option<IfMatchCondition>,
if_modified_since: Option<IfModifiedSince>,
consistency_level: Option<ConsistencyLevel>,
allow_tentative_writes: TentativeWritesAllowance,
partition_key: Option<String>,
}
impl CreateDocumentOptions {
pub fn new() -> Self {
Self {
is_upsert: IsUpsert::No,
indexing_directive: IndexingDirective::Default,
if_match_condition: None,
if_modified_since: None,
consistency_level: None,
allow_tentative_writes: TentativeWritesAllowance::Deny,
partition_key: None,
}
}
setters! {
consistency_level: ConsistencyLevel => Some(consistency_level),
if_match_condition: IfMatchCondition => Some(if_match_condition),
if_modified_since: DateTime<Utc> => Some(IfModifiedSince::new(if_modified_since)),
allow_tentative_writes: TentativeWritesAllowance,
is_upsert: bool => if is_upsert { IsUpsert::Yes } else { IsUpsert::No },
indexing_directive: IndexingDirective,
}
pub fn partition_key<PK: Serialize>(
mut self,
partition_key: &PK,
) -> Result<Self, serde_json::Error> {
self.partition_key = Some(serialize_partition_key(partition_key)?);
Ok(self)
}
pub(crate) fn decorate_request<'b, DOC>(
&self,
req: &mut HttpRequest,
document: &'b DOC,
) -> crate::Result<()>
where
DOC: Serialize + CosmosEntity<'b>,
{
let serialized = serde_json::to_string(document)?;
let partition_key = match &self.partition_key {
Some(s) => s.clone(),
None => serialize_partition_key(&document.partition_key())?,
};
add_as_partition_key_header_serialized2(&partition_key, req);
azure_core::headers::add_optional_header2(&self.if_match_condition, req)?;
azure_core::headers::add_optional_header2(&self.if_modified_since, req)?;
azure_core::headers::add_optional_header2(&self.consistency_level, req)?;
azure_core::headers::add_mandatory_header2(&self.is_upsert, req)?;
azure_core::headers::add_mandatory_header2(&self.indexing_directive, req)?;
azure_core::headers::add_mandatory_header2(&self.allow_tentative_writes, req)?;
req.set_body(bytes::Bytes::from(serialized).into());
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct CreateDocumentResponse {
pub document_attributes: DocumentAttributes,
pub is_update: bool,
pub last_state_change: DateTime<Utc>,
pub etag: String,
pub resource_quota: Vec<ResourceQuota>,
pub resource_usage: Vec<ResourceQuota>,
pub lsn: u64,
pub schema_version: String,
pub alt_content_path: String,
pub content_path: String,
pub quorum_acked_lsn: u64,
pub current_write_quorum: u64,
pub current_replica_set_size: u64,
pub role: u32,
pub global_committed_lsn: u64,
pub number_of_read_regions: u32,
pub transport_request_id: u64,
pub cosmos_llsn: u64,
pub cosmos_quorum_acked_llsn: u64,
pub session_token: String,
pub charge: f64,
pub service_version: String,
pub activity_id: uuid::Uuid,
pub gateway_version: String,
pub date: DateTime<Utc>,
}
impl CreateDocumentResponse {
pub async fn try_from(response: HttpResponse) -> crate::Result<Self> {
let (status_code, headers, pinned_stream) = response.deconstruct();
let body = collect_pinned_stream(pinned_stream).await?;
Ok(CreateDocumentResponse {
is_update: status_code == StatusCode::OK,
last_state_change: last_state_change_from_headers(&headers)?,
etag: etag_from_headers(&headers)?,
resource_quota: resource_quota_from_headers(&headers)?,
resource_usage: resource_usage_from_headers(&headers)?,
lsn: lsn_from_headers(&headers)?,
schema_version: schema_version_from_headers(&headers)?.to_owned(),
alt_content_path: alt_content_path_from_headers(&headers)?.to_owned(),
content_path: content_path_from_headers(&headers)?.to_owned(),
quorum_acked_lsn: quorum_acked_lsn_from_headers(&headers)?,
current_write_quorum: current_write_quorum_from_headers(&headers)?,
current_replica_set_size: current_replica_set_size_from_headers(&headers)?,
role: role_from_headers(&headers)?,
global_committed_lsn: global_committed_lsn_from_headers(&headers)?,
number_of_read_regions: number_of_read_regions_from_headers(&headers)?,
transport_request_id: transport_request_id_from_headers(&headers)?,
cosmos_llsn: cosmos_llsn_from_headers(&headers)?,
cosmos_quorum_acked_llsn: cosmos_quorum_acked_llsn_from_headers(&headers)?,
session_token: session_token_from_headers(&headers)?,
charge: request_charge_from_headers(&headers)?,
service_version: service_version_from_headers(&headers)?.to_owned(),
activity_id: activity_id_from_headers(&headers)?,
gateway_version: gateway_version_from_headers(&headers)?.to_owned(),
date: date_from_headers(&headers)?,
document_attributes: DocumentAttributes::try_from(body)?,
})
}
}
| 40.412162 | 93 | 0.694867 |
48870bff3104ff19c0200b361cd969cb080229da
| 8,851 |
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Depth and stencil operations description.
//!
//! After the fragment shader has finished running, each fragment goes through the depth
//! and stencil tests.
//!
//! The depth test passes of fails depending on how the depth value of each fragment compares
//! to the existing depth value in the depth buffer at that fragment's location. Depth values
//! are always between 0.0 and 1.0.
//!
//! The stencil test passes or fails depending on how a reference value compares to the existing
//! value in the stencil buffer at each fragment's location. Depending on the outcome of the
//! depth and stencil tests, the value of the stencil buffer at that location can be updated.
use std::ops::Range;
use std::u32;
use vk;
/// Configuration of the depth and stencil tests.
#[derive(Debug, Clone)]
pub struct DepthStencil {
/// Comparison to use between the depth value of each fragment and the depth value currently
/// in the depth buffer.
pub depth_compare: Compare,
/// If `true`, then the value in the depth buffer will be updated when the depth test succeeds.
pub depth_write: bool,
/// Allows you to ask the GPU to exclude fragments that are outside of a certain range. This is
/// done in addition to the regular depth test.
pub depth_bounds_test: DepthBounds,
/// Stencil operations to use for points, lines and triangles whose front is facing the user.
pub stencil_front: Stencil,
/// Stencil operations to use for triangles whose back is facing the user.
pub stencil_back: Stencil,
}
impl DepthStencil {
/// Creates a `DepthStencil` where both the depth and stencil tests are disabled and have
/// no effect.
#[inline]
pub fn disabled() -> DepthStencil {
DepthStencil {
depth_write: false,
depth_compare: Compare::Always,
depth_bounds_test: DepthBounds::Disabled,
stencil_front: Default::default(),
stencil_back: Default::default(),
}
}
/// Creates a `DepthStencil` with a `Less` depth test, `depth_write` set to true, and stencil
/// testing disabled.
#[inline]
pub fn simple_depth_test() -> DepthStencil {
DepthStencil {
depth_write: true,
depth_compare: Compare::Less,
depth_bounds_test: DepthBounds::Disabled,
stencil_front: Default::default(),
stencil_back: Default::default(),
}
}
}
impl Default for DepthStencil {
#[inline]
fn default() -> DepthStencil {
DepthStencil::disabled()
}
}
/// Configuration of a stencil test.
#[derive(Debug, Copy, Clone)]
pub struct Stencil {
/// The comparison to perform between the existing stencil value in the stencil buffer, and
/// the reference value (given by `reference`).
pub compare: Compare,
/// The operation to perform when both the depth test and the stencil test passed.
pub pass_op: StencilOp,
/// The operation to perform when the stencil test failed.
pub fail_op: StencilOp,
/// The operation to perform when the stencil test passed but the depth test failed.
pub depth_fail_op: StencilOp,
/// Selects the bits of the unsigned integer stencil values participating in the stencil test.
///
/// Ignored if `compare` is `Never` or `Always`.
///
/// If `None`, then this value is dynamic and will need to be set when drawing. Doesn't apply
/// if `compare` is `Never` or `Always`.
///
/// Note that if this value is `Some` in `stencil_front`, it must also be `Some` in
/// `stencil_back` (but the content can be different). If this value is `None` in
/// `stencil_front`, then it must also be `None` in `stencil_back`. This rule doesn't apply
/// if `compare` is `Never` or `Always`.
pub compare_mask: Option<u32>,
/// Selects the bits of the unsigned integer stencil values updated by the stencil test in the
/// stencil framebuffer attachment.
///
/// If `None`, then this value is dynamic and will need to be set when drawing.
///
/// Note that if this value is `Some` in `stencil_front`, it must also be `Some` in
/// `stencil_back` (but the content can be different). If this value is `None` in
/// `stencil_front`, then it must also be `None` in `stencil_back`.
pub write_mask: Option<u32>,
/// Reference value that is used in the unsigned stencil comparison.
///
/// If `None`, then this value is dynamic and will need to be set when drawing.
///
/// Note that if this value is `Some` in `stencil_front`, it must also be `Some` in
/// `stencil_back` (but the content can be different). If this value is `None` in
/// `stencil_front`, then it must also be `None` in `stencil_back`.
pub reference: Option<u32>,
}
impl Stencil {
/// Returns true if the stencil operation will always result in `Keep`.
#[inline]
pub fn always_keep(&self) -> bool {
match self.compare {
Compare::Always => {
self.pass_op == StencilOp::Keep && self.depth_fail_op == StencilOp::Keep
}
Compare::Never => self.fail_op == StencilOp::Keep,
_ => {
self.pass_op == StencilOp::Keep
&& self.fail_op == StencilOp::Keep
&& self.depth_fail_op == StencilOp::Keep
}
}
}
}
impl Default for Stencil {
#[inline]
fn default() -> Stencil {
Stencil {
compare: Compare::Never,
pass_op: StencilOp::Keep,
fail_op: StencilOp::Keep,
depth_fail_op: StencilOp::Keep,
compare_mask: Some(u32::MAX),
write_mask: Some(u32::MAX),
reference: Some(u32::MAX),
}
}
}
/// Operation to perform after the depth and stencil tests.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[repr(u32)]
pub enum StencilOp {
Keep = vk::STENCIL_OP_KEEP,
Zero = vk::STENCIL_OP_ZERO,
Replace = vk::STENCIL_OP_REPLACE,
IncrementAndClamp = vk::STENCIL_OP_INCREMENT_AND_CLAMP,
DecrementAndClamp = vk::STENCIL_OP_DECREMENT_AND_CLAMP,
Invert = vk::STENCIL_OP_INVERT,
IncrementAndWrap = vk::STENCIL_OP_INCREMENT_AND_WRAP,
DecrementAndWrap = vk::STENCIL_OP_DECREMENT_AND_WRAP,
}
/// Enum to specify which stencil state to use
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[repr(u32)]
pub enum StencilFaceFlags {
StencilFaceFrontBit = vk::STENCIL_FACE_FRONT_BIT,
StencilFaceBackBit = vk::STENCIL_FACE_BACK_BIT,
StencilFrontAndBack = vk::STENCIL_FRONT_AND_BACK,
}
/// Container for dynamic StencilFaceFlags and value
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct DynamicStencilValue {
pub face: StencilFaceFlags,
pub value: u32,
}
/// Allows you to ask the GPU to exclude fragments that are outside of a certain range.
#[derive(Debug, Clone, PartialEq)]
pub enum DepthBounds {
/// The test is disabled. All fragments pass the depth bounds test.
Disabled,
/// Fragments that are within the given range do pass the test. Values are depth values
/// between 0.0 and 1.0.
Fixed(Range<f32>),
/// The depth bounds test is enabled, but the range will need to specified when you submit
/// a draw command.
Dynamic,
}
impl DepthBounds {
/// Returns true if equal to `DepthBounds::Dynamic`.
#[inline]
pub fn is_dynamic(&self) -> bool {
match self {
&DepthBounds::Dynamic => true,
_ => false,
}
}
}
/// Specifies how two values should be compared to decide whether a test passes or fails.
///
/// Used for both depth testing and stencil testing.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[repr(u32)]
pub enum Compare {
/// The test never passes.
Never = vk::COMPARE_OP_NEVER,
/// The test passes if `value < reference_value`.
Less = vk::COMPARE_OP_LESS,
/// The test passes if `value == reference_value`.
Equal = vk::COMPARE_OP_EQUAL,
/// The test passes if `value <= reference_value`.
LessOrEqual = vk::COMPARE_OP_LESS_OR_EQUAL,
/// The test passes if `value > reference_value`.
Greater = vk::COMPARE_OP_GREATER,
/// The test passes if `value != reference_value`.
NotEqual = vk::COMPARE_OP_NOT_EQUAL,
/// The test passes if `value >= reference_value`.
GreaterOrEqual = vk::COMPARE_OP_GREATER_OR_EQUAL,
/// The test always passes.
Always = vk::COMPARE_OP_ALWAYS,
}
| 36.27459 | 99 | 0.662185 |
289272504fc7d97065d2f7d1d1fc326156cadb57
| 7,423 |
// Copyright (c) 2017 Colin Finck, RWTH Aachen University
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use alloc::rc::Rc;
use core::cell::RefCell;
use collections::{DoublyLinkedList, Node};
use mm;
use logging::*;
pub struct FreeListEntry {
pub start: usize,
pub end: usize,
}
pub struct FreeList {
pub list: DoublyLinkedList<FreeListEntry>,
}
impl FreeList {
pub const fn new() -> Self {
Self { list: DoublyLinkedList::new() }
}
pub fn allocate(&mut self, size: usize) -> Result<usize, ()> {
debug!("Allocating {} bytes from Free List {:#X}", size, self as *const Self as usize);
// Find a region in the Free List that has at least the requested size.
for node in self.list.iter() {
let (region_start, region_size) = {
let borrowed = node.borrow();
(borrowed.value.start, borrowed.value.end - borrowed.value.start)
};
if region_size > size {
// We have found a region that is larger than the requested size.
// Return the address to the beginning of that region and shrink the region by that size.
node.borrow_mut().value.start += size;
return Ok(region_start);
} else if region_size == size {
// We have found a region that has exactly the requested size.
// Return the address to the beginning of that region and move the node into the pool for deletion or reuse.
self.list.remove(node.clone());
unsafe { mm::POOL.list.push(node); }
return Ok(region_start);
}
}
Err(())
}
#[inline]
fn allocate_address_for_node(&mut self, address: usize, end: usize, node: Rc<RefCell<Node<FreeListEntry>>>) -> bool {
let (region_start, region_end) = {
let borrowed = node.borrow();
(borrowed.value.start, borrowed.value.end)
};
// There are 4 possible cases of finding the free space we want to reserve.
if region_start == address && region_end == end {
// We found free space that has exactly the address and size of the block we want to allocate.
// Remove it.
self.list.remove(node.clone());
unsafe { mm::POOL.list.push(node); }
return true;
} else if region_start < address && region_end == end {
// We found free space in which the block we want to allocate lies right-aligned.
// Resize the free space to end at our block.
node.borrow_mut().value.end = address;
return true;
} else if region_start == address && region_end > end {
// We found free space in which the block we want to allocate lies left-aligned.
// Resize the free space to begin where our block ends.
node.borrow_mut().value.start = end;
return true;
} else if region_start < address && region_end > end {
// We found free space that covers the block we want to allocate.
// Resize the free space to end at our block and add another free space entry that begins where our block ends.
node.borrow_mut().value.end = address;
let new_node = unsafe { mm::POOL.list.head().expect("Pool is empty when reserving memory") };
unsafe { mm::POOL.list.remove(new_node.clone()); }
{
let mut new_node_borrowed = new_node.borrow_mut();
new_node_borrowed.value.start = end;
new_node_borrowed.value.end = region_end;
}
self.list.insert_after(new_node, node);
return true;
}
false
}
pub fn allocate_aligned(&mut self, size: usize, alignment: usize) -> Result<usize, ()> {
debug!("Allocating {} bytes from Free List {:#X} aligned to {} bytes", size, self as *const Self as usize, alignment);
for node in self.list.iter() {
// Align up the start address of the current node in the list to the desired alignment.
// Then let allocate_address_for_node check if this node is suitable and alter it respectively.
let address = align_up!(node.borrow().value.start, alignment);
let end = address + size;
if self.allocate_address_for_node(address, end, node) {
return Ok(address);
}
}
Err(())
}
pub fn reserve(&mut self, address: usize, size: usize) -> Result<(), ()> {
debug!("Reserving {} bytes at address {:#X} in Free List {:#X}", size, address, self as *const Self as usize);
let end = address + size;
for node in self.list.iter() {
// Let allocate_address_for_node check if this node contains the desired address.
if self.allocate_address_for_node(address, end, node) {
return Ok(());
}
}
// Our Free List contains no block covering the given address and size.
// This is an error, because we have to reserve the address to prevent it from being used differently.
Err(())
}
pub fn deallocate(&mut self, address: usize, size: usize) {
debug!("Deallocating {} bytes at {:#X} from Free List {:#X}", size, address, self as *const Self as usize);
let end = address + size;
let mut iter = self.list.iter();
while let Some(node) = iter.next() {
let (region_start, region_end) = {
let borrowed = node.borrow();
(borrowed.value.start, borrowed.value.end)
};
if region_start == end {
// The deallocated memory extends this free memory region to the left.
node.borrow_mut().value.start = address;
return;
} else if region_end == address {
// The deallocated memory extends this free memory region to the right.
// Check if it can even reunite with the next region.
if let Some(next_node) = iter.next() {
let (next_region_start, next_region_end) = {
let borrowed = node.borrow();
(borrowed.value.start, borrowed.value.end)
};
if next_region_start == end {
// It can reunite, so let the current region span over the reunited region and move the duplicate node
// into the pool for deletion or reuse.
node.borrow_mut().value.end = next_region_end;
self.list.remove(next_node.clone());
unsafe { mm::POOL.list.push(next_node); }
return;
}
}
// It cannot reunite, so just extend this region to the right and we are done.
node.borrow_mut().value.end = end;
return;
} else if end < region_start {
// The deallocated memory does not extend any memory region and needs an own entry in the Free List.
// Get that entry from the node pool.
// We search the list from low to high addresses and insert us before the first entry that has a
// higher address than us.
let new_node = unsafe { mm::POOL.list.head().expect("Pool is empty when attempting insert_before") };
unsafe { mm::POOL.list.remove(new_node.clone()); }
{
let mut new_node_borrowed = new_node.borrow_mut();
new_node_borrowed.value.start = address;
new_node_borrowed.value.end = end;
}
self.list.insert_before(new_node, node);
return;
}
}
// We could not find an entry with a higher address than us.
// So we become the new last entry in the list. Get that entry from the node pool.
let new_node = unsafe { mm::POOL.list.head().expect("Pool is empty when attempting insert_after") };
unsafe { mm::POOL.list.remove(new_node.clone()); }
{
let mut new_node_borrowed = new_node.borrow_mut();
new_node_borrowed.value.start = address;
new_node_borrowed.value.end = end;
}
if let Some(tail) = self.list.tail() {
self.list.insert_after(new_node, tail);
} else {
self.list.push(new_node);
}
}
}
| 35.859903 | 120 | 0.680048 |
71477baa4e35772b09318385b678e56a0f4062e4
| 2,747 |
#[cfg(feature = "pci")]
pub mod rtl8139;
#[cfg(feature = "pci")]
pub mod virtio_net;
use crate::arch::kernel::apic;
use crate::arch::kernel::irq::ExceptionStackFrame;
#[cfg(feature = "pci")]
use crate::arch::kernel::pci;
use crate::arch::kernel::percore::*;
use crate::synch::semaphore::*;
use crate::synch::spinlock::SpinlockIrqSave;
/// A trait for accessing the network interface
pub trait NetworkInterface {
/// Returns the mac address of the device.
fn get_mac_address(&self) -> [u8; 6];
/// Returns the current MTU of the device.
fn get_mtu(&self) -> u16;
/// Get buffer to create a TX packet
///
/// This returns ownership of the TX buffer.
fn get_tx_buffer(&mut self, len: usize) -> Result<(*mut u8, usize), ()>;
/// Frees the TX buffer (takes ownership)
fn free_tx_buffer(&self, token: usize);
/// Send TC packets (takes TX buffer ownership)
fn send_tx_buffer(&mut self, tkn_handle: usize, len: usize) -> Result<(), ()>;
/// Check if a packet is available
fn has_packet(&self) -> bool;
/// Get RX buffer with an received packet
fn receive_rx_buffer(&mut self) -> Result<(&'static [u8], usize), ()>;
/// Tells driver, that buffer is consumed and can be deallocated
fn rx_buffer_consumed(&mut self, trf_handle: usize);
/// Enable / disable the polling mode of the network interface
fn set_polling_mode(&mut self, value: bool);
/// Handle interrupt and check if a packet is available
fn handle_interrupt(&mut self) -> bool;
}
static NET_SEM: Semaphore = Semaphore::new(0);
/// set driver in polling mode and threads will not be blocked
pub extern "C" fn set_polling_mode(value: bool) {
static THREADS_IN_POLLING_MODE: SpinlockIrqSave<usize> = SpinlockIrqSave::new(0);
let mut guard = THREADS_IN_POLLING_MODE.lock();
if value {
*guard += 1;
if *guard == 1 {
#[cfg(feature = "pci")]
if let Some(driver) = crate::arch::kernel::pci::get_network_driver() {
driver.lock().set_polling_mode(value)
}
}
} else {
*guard -= 1;
if *guard == 0 {
#[cfg(feature = "pci")]
if let Some(driver) = crate::arch::kernel::pci::get_network_driver() {
driver.lock().set_polling_mode(value)
}
}
}
}
pub extern "C" fn netwait() {
NET_SEM.acquire(None);
}
pub fn netwakeup() {
NET_SEM.release();
}
#[cfg(target_arch = "x86_64")]
pub extern "x86-interrupt" fn network_irqhandler(_stack_frame: ExceptionStackFrame) {
debug!("Receive network interrupt");
apic::eoi();
#[cfg(feature = "pci")]
let check_scheduler = match pci::get_network_driver() {
Some(driver) => driver.lock().handle_interrupt(),
_ => {
debug!("Unable to handle interrupt!");
false
}
};
#[cfg(not(feature = "pci"))]
let check_scheduler = false;
if check_scheduler {
core_scheduler().scheduler();
}
}
| 28.319588 | 85 | 0.680379 |
e6a081b13b788ba7febf4010daa1edcb4f98bb75
| 8,319 |
// Buttplug Rust Source Code File - See https://buttplug.io for more info.
//
// Copyright 2016-2019 Nonpolynomial Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
// Time to see what devices are available! In this example, we'll see how
// servers can access certain types of devices, and how clients can ask servers
// which devices are available.
use async_std::io;
use buttplug::{
client::{ButtplugClient, ButtplugClientEvent},
server::ButtplugServerOptions,
util::async_manager,
};
use futures::StreamExt;
use tracing::{info, span, Level};
async fn device_enumeration_example() {
tracing_subscriber::fmt::init();
let example = span!(Level::INFO, "Device Enumeration Example");
let _enter = example.enter();
info!("Starting Device Enumeration Example");
// Time to see what devices are available! In this example, we'll see how
// servers can access certain types of devices, and how clients can ask
// servers which devices are available.
// Since we're going to need to manage our server and client, this example
// will use an embedded connector.
//let connector = ButtplugRemoteClientConnector::<ButtplugWebsocketClientTransport, ButtplugClientJSONSerializer>::new(ButtplugWebsocketClientTransport::new_insecure_connector("ws://192.168.123.100:12345"));
// This example will also work with a WebsocketConnector if you want to
// connect to Intiface Desktop or an intiface-cli instance.
// We're to the new stuff. When we create a ButtplugEmbeddedConnector, it in
// turn creates a Buttplug Server to hold (unless we pass it one to use, which
// we won't be doing until later examples). If you're just interested in
// creating Buttplug Client applications that will access things like the
// Windows Buttplug Server, you won't have to set up the server like this, but
// this is good knowledge to have anyways, so it's recommended to at least
// read through this.
//
// When a Buttplug Server is created, it in turn creates a Device Manager. The
// Device Manager is basically the hub of all hardware communication for
// Buttplug. A Device Manager will hold multiple Device Communication
// Managers, which is where we get to specifics about hardware busses and
// communications. For instance, as of this writing, Buttplug currently ships
// with Device Communication Managers for
//
// - Bluetooth LE (Windows 10/Mac/Linux/iOS)
// - XInput/XBox Gamepads (Win >= 7)
// - Test/Simulator
//
// We can specify which device communication managers we want to use. For this
// example, we'll just add a TestDeviceManager so we don't have to deal with
// actual hardware. This requires a bit of manual setup.
//
// To do this, we'll add the device comm manager. For the test device comm
// manager, this gets a little complicated. We'll just be emulating a
// bluetooth device, the Aneros Vivi, by using its bluetooth name.
/* let helper = connector.server_ref().add_test_comm_manager().unwrap();
let _ = helper.add_ble_device("Massage Demo").await;
*/
// If we wanted to add a real device manager, like the btleplug manager,
// we'd run something like this:
//
//connector.server_ref().add_comm_manager::<BtlePlugCommunicationManager>();
// Anyways, now that we have a manager sorted, Let's talk about when and how
// you'll get events (in this case, DeviceAdded events) from the server.
//
// The server can fire device connection events at 2 points.
//
// - When a client first connects, if the server has a device connection it is
// already holding.
//
// - During device scanning.
//
// When the client connects as part of ButtplugClient::run(), it asks the
// server for a list of already connected devices. The server will return
// these as DeviceAdded events, including a ButtplugClientDevice instance we
// can then use to control the device.
//
// A quick aside on why a server could hold devices. There are a few reasons
// this could happen, some chosen, some forced.
//
// - On Windows 10, it is sometimes difficult to get bluetooth LE devices to
// disconnect, so some software (including the Windows Buttplug Server)
// leaves devices connected until either the device is powered off/taken out
// of bluetooth range, or the program terminates.
//
// - Depending on how a server is being used, parts of it like a device
// manager may stay alive between client connections. This would mean that
// if a client disconnected from a server then reconnected quickly, setup
// steps wouldn't have to happen again.
//
// With that out of the way, let's build our client.
let (client, mut event_stream) =
ButtplugClient::connect_in_process("test client", &ButtplugServerOptions::default())
.await
.unwrap();
// First, we'll start the server looking for devices.
if let Err(err) = client.start_scanning().await {
// If the server disconnected between the time we spun up the
// loop and now, the scanning will return an error. At that
// point we should just bail out.
println!("Client errored when starting scan! {}", err);
return;
}
// Ok, we've started scanning. Now we need to wait to hear back from the
// server on whether we got anything. To do that, we use our event stream.
//
// The event stream is to Buttplug's Rust implementation what the event
// handlers in C#/JS were to those implementations. However, since we're not
// in a GC'd language anymore, event handlers are a bit difficult to
// implement, so we just have a stream-like function instead.
//
// Running .next() on the event stream will return a future that waits until
// it gets something from the server. You can either await that and block
// until you get something from the server (or race/select it against other
// futures), or else save the future and use something like a timeout join.
//
// For our purposes for the moment, all we care about is receiving new
// devices, so we'll just loop and wait. We'll do so in another task.
async_manager::spawn(async move {
loop {
match event_stream.next().await.unwrap() {
// Yay we got an event!
ButtplugClientEvent::DeviceAdded(device) => {
// And we actually got a device!
//
// The device we're given is a real
// ButtplugClientDevice object. We could control the
// device with it if we wanted, but that's coming up
// in a later example. For now, we'll just print the
// device name then drop our instance of it.
println!("We got a device: {}", device.name);
}
ButtplugClientEvent::ScanningFinished => {
println!("Scanning finished signaled.");
}
ButtplugClientEvent::ServerDisconnect => {
// The server disconnected, which means we're done
// here, so just break up to the top level.
println!("Server disconnected!");
}
_ => {
// Something else happened, like scanning finishing,
// devices getting removed, etc... Might as well say
// something about it.
println!("Got some other kind of event we don't care about");
}
}
}
})
.unwrap();
println!("Hit enter to continue...");
let mut line = String::new();
io::stdin().read_line(&mut line).await.unwrap();
// Hypothetical situation: We've now exited our match block, and
// realized that hey, we actually wanted that device object we
// dropped in the DeviceAdded branch!
//
// Never fear, you can always ask for a vec of all devices from
// the client. It requires an await as the devices require
// creation by the event loop, but it should be pretty quick.
//
// As with everything else, since the event loop may have shut
// down due to server disconnect, this returns a result that
// will error if that has happened.
println!("Devices currently connected:");
for dev in client.devices() {
println!("- {}", dev.name);
}
// And now we're done!
println!("Exiting example");
}
fn main() {
async_manager::block_on(async {
device_enumeration_example().await;
})
}
| 44.25 | 209 | 0.695877 |
79b343da24700ef721f458a6f7dfd0ee34738e27
| 154 |
mod a;
mod b;
#[path = "b.rs"]
mod b2;
mod c;
#[path = "c.rs"]
mod c2;
#[path = "c.rs"]
mod c3;
mod from_other_module;
mod other_module;
fn main() {}
| 9.058824 | 22 | 0.577922 |
03a5a4f66dc6639130440e4695dce8cdbb52580c
| 37,162 |
use crate::bounding_volume::{BoundingSphere, BoundingVolume, AABB};
use crate::mass_properties::MassProperties;
use crate::math::{Isometry, Point, Real, Vector};
use crate::query::{PointQuery, RayCast};
use crate::shape::composite_shape::SimdCompositeShape;
#[cfg(feature = "serde-serialize")]
use crate::shape::SharedShape;
use crate::shape::{
Ball, Capsule, Compound, Cuboid, FeatureId, HalfSpace, HeightField, PolygonalFeatureMap,
Polyline, RoundCuboid, RoundShape, RoundTriangle, Segment, SupportMap, TriMesh, Triangle,
};
#[cfg(feature = "dim3")]
use crate::shape::{
Cone, ConvexPolyhedron, Cylinder, RoundCone, RoundConvexPolyhedron, RoundCylinder,
};
#[cfg(feature = "dim2")]
use crate::shape::{ConvexPolygon, RoundConvexPolygon};
use downcast_rs::{impl_downcast, DowncastSync};
use na::{RealField, Unit};
use num::Zero;
use num_derive::FromPrimitive;
#[derive(Copy, Clone, Debug, FromPrimitive)]
/// Enum representing the type of a shape.
pub enum ShapeType {
/// A ball shape.
Ball = 0,
/// A cuboid shape.
Cuboid,
/// A capsule shape.
Capsule,
/// A segment shape.
Segment,
/// A triangle shape.
Triangle,
/// A triangle mesh shape.
TriMesh,
/// A set of segments.
Polyline,
/// A shape representing a full half-space.
HalfSpace,
/// A heightfield shape.
HeightField,
/// A Compound shape.
Compound,
#[cfg(feature = "dim2")]
ConvexPolygon,
#[cfg(feature = "dim3")]
/// A convex polyhedron.
ConvexPolyhedron,
#[cfg(feature = "dim3")]
/// A cylindrical shape.
Cylinder,
#[cfg(feature = "dim3")]
/// A cylindrical shape.
Cone,
// /// A custom shape type.
// Custom(u8),
/// A cuboid with rounded corners.
RoundCuboid,
/// A triangle with rounded corners.
RoundTriangle,
// /// A triangle-mesh with rounded corners.
// RoundedTriMesh,
// /// An heightfield with rounded corners.
// RoundedHeightField,
/// A cylinder with rounded corners.
#[cfg(feature = "dim3")]
RoundCylinder,
/// A cone with rounded corners.
#[cfg(feature = "dim3")]
RoundCone,
/// A convex polyhedron with rounded corners.
#[cfg(feature = "dim3")]
RoundConvexPolyhedron,
/// A convex polygon with rounded corners.
#[cfg(feature = "dim2")]
RoundConvexPolygon,
/// A custom user-defined shape.
Custom,
}
#[derive(Copy, Clone)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize))]
/// Enum representing the shape with its actual type
pub enum TypedShape<'a> {
/// A ball shape.
Ball(&'a Ball),
/// A cuboid shape.
Cuboid(&'a Cuboid),
/// A capsule shape.
Capsule(&'a Capsule),
/// A segment shape.
Segment(&'a Segment),
/// A triangle shape.
Triangle(&'a Triangle),
/// A triangle mesh shape.
TriMesh(&'a TriMesh),
/// A set of segments.
Polyline(&'a Polyline),
/// A shape representing a full half-space.
HalfSpace(&'a HalfSpace),
/// A heightfield shape.
HeightField(&'a HeightField),
/// A Compound shape.
Compound(&'a Compound),
#[cfg(feature = "dim2")]
ConvexPolygon(&'a ConvexPolygon),
#[cfg(feature = "dim3")]
/// A convex polyhedron.
ConvexPolyhedron(&'a ConvexPolyhedron),
#[cfg(feature = "dim3")]
/// A cylindrical shape.
Cylinder(&'a Cylinder),
#[cfg(feature = "dim3")]
/// A cylindrical shape.
Cone(&'a Cone),
// /// A custom shape type.
// Custom(u8),
/// A cuboid with rounded corners.
RoundCuboid(&'a RoundCuboid),
/// A triangle with rounded corners.
RoundTriangle(&'a RoundTriangle),
// /// A triangle-mesh with rounded corners.
// RoundedTriMesh,
// /// An heightfield with rounded corners.
// RoundedHeightField,
/// A cylinder with rounded corners.
#[cfg(feature = "dim3")]
RoundCylinder(&'a RoundCylinder),
/// A cone with rounded corners.
#[cfg(feature = "dim3")]
RoundCone(&'a RoundCone),
/// A convex polyhedron with rounded corners.
#[cfg(feature = "dim3")]
RoundConvexPolyhedron(&'a RoundConvexPolyhedron),
/// A convex polygon with rounded corners.
#[cfg(feature = "dim2")]
RoundConvexPolygon(&'a RoundConvexPolygon),
/// A custom user-defined shape with a type identified by a number.
Custom(u32),
}
#[cfg(feature = "serde-serialize")]
#[derive(Deserialize)]
// NOTE: tha this enum MUST match the `TypedShape` enum.
/// Enum representing the shape with its actual type
pub(crate) enum DeserializableTypedShape {
/// A ball shape.
Ball(Ball),
/// A cuboid shape.
Cuboid(Cuboid),
/// A capsule shape.
Capsule(Capsule),
/// A segment shape.
Segment(Segment),
/// A triangle shape.
Triangle(Triangle),
/// A triangle mesh shape.
TriMesh(TriMesh),
/// A set of segments.
Polyline(Polyline),
/// A shape representing a full half-space.
HalfSpace(HalfSpace),
/// A heightfield shape.
HeightField(HeightField),
/// A Compound shape.
Compound(Compound),
#[cfg(feature = "dim2")]
ConvexPolygon(ConvexPolygon),
#[cfg(feature = "dim3")]
/// A convex polyhedron.
ConvexPolyhedron(ConvexPolyhedron),
#[cfg(feature = "dim3")]
/// A cylindrical shape.
Cylinder(Cylinder),
#[cfg(feature = "dim3")]
/// A cylindrical shape.
Cone(Cone),
// /// A custom shape type.
// Custom(u8),
/// A cuboid with rounded corners.
RoundCuboid(RoundCuboid),
/// A triangle with rounded corners.
RoundTriangle(RoundTriangle),
// /// A triangle-mesh with rounded corners.
// RoundedTriMesh,
// /// An heightfield with rounded corners.
// RoundedHeightField,
/// A cylinder with rounded corners.
#[cfg(feature = "dim3")]
RoundCylinder(RoundCylinder),
/// A cone with rounded corners.
#[cfg(feature = "dim3")]
RoundCone(RoundCone),
/// A convex polyhedron with rounded corners.
#[cfg(feature = "dim3")]
RoundConvexPolyhedron(RoundConvexPolyhedron),
/// A convex polygon with rounded corners.
#[cfg(feature = "dim2")]
RoundConvexPolygon(RoundConvexPolygon),
/// A custom user-defined shape identified by a number.
Custom(u32),
}
#[cfg(feature = "serde-serialize")]
impl DeserializableTypedShape {
/// Converts `self` to a `SharedShape` if `self` isn't `Custom`.
pub fn into_shared_shape(self) -> Option<SharedShape> {
match self {
DeserializableTypedShape::Ball(s) => Some(SharedShape::new(s)),
DeserializableTypedShape::Cuboid(s) => Some(SharedShape::new(s)),
DeserializableTypedShape::Capsule(s) => Some(SharedShape::new(s)),
DeserializableTypedShape::Segment(s) => Some(SharedShape::new(s)),
DeserializableTypedShape::Triangle(s) => Some(SharedShape::new(s)),
DeserializableTypedShape::TriMesh(s) => Some(SharedShape::new(s)),
DeserializableTypedShape::Polyline(s) => Some(SharedShape::new(s)),
DeserializableTypedShape::HalfSpace(s) => Some(SharedShape::new(s)),
DeserializableTypedShape::HeightField(s) => Some(SharedShape::new(s)),
DeserializableTypedShape::Compound(s) => Some(SharedShape::new(s)),
#[cfg(feature = "dim2")]
DeserializableTypedShape::ConvexPolygon(s) => Some(SharedShape::new(s)),
#[cfg(feature = "dim3")]
DeserializableTypedShape::ConvexPolyhedron(s) => Some(SharedShape::new(s)),
#[cfg(feature = "dim3")]
DeserializableTypedShape::Cylinder(s) => Some(SharedShape::new(s)),
#[cfg(feature = "dim3")]
DeserializableTypedShape::Cone(s) => Some(SharedShape::new(s)),
DeserializableTypedShape::RoundCuboid(s) => Some(SharedShape::new(s)),
DeserializableTypedShape::RoundTriangle(s) => Some(SharedShape::new(s)),
#[cfg(feature = "dim3")]
DeserializableTypedShape::RoundCylinder(s) => Some(SharedShape::new(s)),
#[cfg(feature = "dim3")]
DeserializableTypedShape::RoundCone(s) => Some(SharedShape::new(s)),
#[cfg(feature = "dim3")]
DeserializableTypedShape::RoundConvexPolyhedron(s) => Some(SharedShape::new(s)),
#[cfg(feature = "dim2")]
DeserializableTypedShape::RoundConvexPolygon(s) => Some(SharedShape::new(s)),
DeserializableTypedShape::Custom(_) => None,
}
}
}
/// Trait implemented by shapes usable by Rapier.
pub trait Shape: RayCast + PointQuery + DowncastSync {
/// Computes the AABB of this shape.
fn compute_local_aabb(&self) -> AABB;
/// Computes the bounding-sphere of this shape.
fn compute_local_bounding_sphere(&self) -> BoundingSphere;
/// Clones this shape into a boxed trait-object.
fn clone_box(&self) -> Box<dyn Shape>;
/// Computes the AABB of this shape with the given position.
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.compute_local_aabb().transform_by(position)
}
/// Computes the bounding-sphere of this shape with the given position.
fn compute_bounding_sphere(&self, position: &Isometry<Real>) -> BoundingSphere {
self.compute_local_bounding_sphere().transform_by(position)
}
/// Compute the mass-properties of this shape given its uniform density.
fn mass_properties(&self, density: Real) -> MassProperties;
/// Gets the type tag of this shape.
fn shape_type(&self) -> ShapeType;
/// Gets the underlying shape as an enum.
fn as_typed_shape(&self) -> TypedShape;
fn ccd_thickness(&self) -> Real;
// TODO: document this.
// This should probably be the largest sharp edge angle (in radians) in [0; PI].
// Though this isn't a very good description considering this is PI / 2
// for capsule (which doesn't have any sharp angle). I guess a better way
// to phrase this is: "the smallest angle such that rotating the shape by
// that angle may result in different contact points".
fn ccd_angular_thickness(&self) -> Real;
/// Is this shape known to be convex?
///
/// If this returns `true` then `self` is known to be convex.
/// If this returns `false` then it is not known whether or
/// not `self` is convex.
fn is_convex(&self) -> bool {
false
}
/// Convents this shape into its support mapping, if it has one.
fn as_support_map(&self) -> Option<&dyn SupportMap> {
None
}
fn as_composite_shape(&self) -> Option<&dyn SimdCompositeShape> {
None
}
/// Converts this shape to a polygonal feature-map, if it is one.
fn as_polygonal_feature_map(&self) -> Option<(&dyn PolygonalFeatureMap, Real)> {
None
}
// fn as_rounded(&self) -> Option<&Rounded<Box<AnyShape>>> {
// None
// }
/// The shape's normal at the given point located on a specific feature.
fn feature_normal_at_point(
&self,
_feature: FeatureId,
_point: &Point<Real>,
) -> Option<Unit<Vector<Real>>> {
None
}
}
impl_downcast!(sync Shape);
impl dyn Shape {
/// Converts this abstract shape to the given shape, if it is one.
pub fn as_shape<T: Shape>(&self) -> Option<&T> {
self.downcast_ref()
}
/// Converts this abstract shape to the given mutable shape, if it is one.
pub fn as_shape_mut<T: Shape>(&mut self) -> Option<&mut T> {
self.downcast_mut()
}
/// Converts this abstract shape to a ball, if it is one.
pub fn as_ball(&self) -> Option<&Ball> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable ball, if it is one.
pub fn as_ball_mut(&mut self) -> Option<&mut Ball> {
self.downcast_mut()
}
/// Converts this abstract shape to a cuboid, if it is one.
pub fn as_cuboid(&self) -> Option<&Cuboid> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable cuboid, if it is one.
pub fn as_cuboid_mut(&mut self) -> Option<&mut Cuboid> {
self.downcast_mut()
}
/// Converts this abstract shape to a halfspace, if it is one.
pub fn as_halfspace(&self) -> Option<&HalfSpace> {
self.downcast_ref()
}
/// Converts this abstract shape to a halfspace, if it is one.
pub fn as_halfspace_mut(&mut self) -> Option<&mut HalfSpace> {
self.downcast_mut()
}
/// Converts this abstract shape to a segment, if it is one.
pub fn as_segment(&self) -> Option<&Segment> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable segment, if it is one.
pub fn as_segment_mut(&mut self) -> Option<&mut Segment> {
self.downcast_mut()
}
/// Converts this abstract shape to a capsule, if it is one.
pub fn as_capsule(&self) -> Option<&Capsule> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable capsule, if it is one.
pub fn as_capsule_mut(&mut self) -> Option<&mut Capsule> {
self.downcast_mut()
}
/// Converts this abstract shape to a triangle, if it is one.
pub fn as_triangle(&self) -> Option<&Triangle> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable triangle, if it is one.
pub fn as_triangle_mut(&mut self) -> Option<&mut Triangle> {
self.downcast_mut()
}
/// Converts this abstract shape to a compound shape, if it is one.
pub fn as_compound(&self) -> Option<&Compound> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable compound shape, if it is one.
pub fn as_compound_mut(&mut self) -> Option<&mut Compound> {
self.downcast_mut()
}
/// Converts this abstract shape to a triangle mesh, if it is one.
pub fn as_trimesh(&self) -> Option<&TriMesh> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable triangle mesh, if it is one.
pub fn as_trimesh_mut(&mut self) -> Option<&mut TriMesh> {
self.downcast_mut()
}
/// Converts this abstract shape to a polyline, if it is one.
pub fn as_polyline(&self) -> Option<&Polyline> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable polyline, if it is one.
pub fn as_polyline_mut(&mut self) -> Option<&mut Polyline> {
self.downcast_mut()
}
/// Converts this abstract shape to a heightfield, if it is one.
pub fn as_heightfield(&self) -> Option<&HeightField> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable heightfield, if it is one.
pub fn as_heightfield_mut(&mut self) -> Option<&mut HeightField> {
self.downcast_mut()
}
/// Converts this abstract shape to a round cuboid, if it is one.
pub fn as_round_cuboid(&self) -> Option<&RoundCuboid> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable round cuboid, if it is one.
pub fn as_round_cuboid_mut(&mut self) -> Option<&mut RoundCuboid> {
self.downcast_mut()
}
/// Converts this abstract shape to a round triangle, if it is one.
pub fn as_round_triangle(&self) -> Option<&RoundTriangle> {
self.downcast_ref()
}
/// Converts this abstract shape to a round triangle, if it is one.
pub fn as_round_triangle_mut(&mut self) -> Option<&mut RoundTriangle> {
self.downcast_mut()
}
/// Converts this abstract shape to a convex polygon, if it is one.
#[cfg(feature = "dim2")]
pub fn as_convex_polygon(&self) -> Option<&ConvexPolygon> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable convex polygon, if it is one.
#[cfg(feature = "dim2")]
pub fn as_convex_polygon_mut(&mut self) -> Option<&mut ConvexPolygon> {
self.downcast_mut()
}
/// Converts this abstract shape to a round convex polygon, if it is one.
#[cfg(feature = "dim2")]
pub fn as_round_convex_polygon(&self) -> Option<&RoundConvexPolygon> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable round convex polygon, if it is one.
#[cfg(feature = "dim2")]
pub fn as_round_convex_polygon_mut(&mut self) -> Option<&mut RoundConvexPolygon> {
self.downcast_mut()
}
#[cfg(feature = "dim3")]
pub fn as_convex_polyhedron(&self) -> Option<&ConvexPolyhedron> {
self.downcast_ref()
}
#[cfg(feature = "dim3")]
pub fn as_convex_polyhedron_mut(&mut self) -> Option<&mut ConvexPolyhedron> {
self.downcast_mut()
}
/// Converts this abstract shape to a cylinder, if it is one.
#[cfg(feature = "dim3")]
pub fn as_cylinder(&self) -> Option<&Cylinder> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable cylinder, if it is one.
#[cfg(feature = "dim3")]
pub fn as_cylinder_mut(&mut self) -> Option<&mut Cylinder> {
self.downcast_mut()
}
/// Converts this abstract shape to a cone, if it is one.
#[cfg(feature = "dim3")]
pub fn as_cone(&self) -> Option<&Cone> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable cone, if it is one.
#[cfg(feature = "dim3")]
pub fn as_cone_mut(&mut self) -> Option<&mut Cone> {
self.downcast_mut()
}
/// Converts this abstract shape to a round cylinder, if it is one.
#[cfg(feature = "dim3")]
pub fn as_round_cylinder(&self) -> Option<&RoundCylinder> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable round cylinder, if it is one.
#[cfg(feature = "dim3")]
pub fn as_round_cylinder_mut(&mut self) -> Option<&mut RoundCylinder> {
self.downcast_mut()
}
/// Converts this abstract shape to a round cone, if it is one.
#[cfg(feature = "dim3")]
pub fn as_round_cone(&self) -> Option<&RoundCone> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable round cone, if it is one.
#[cfg(feature = "dim3")]
pub fn as_round_cone_mut(&mut self) -> Option<&mut RoundCone> {
self.downcast_mut()
}
/// Converts this abstract shape to a round convex polyhedron, if it is one.
#[cfg(feature = "dim3")]
pub fn as_round_convex_polyhedron(&self) -> Option<&RoundConvexPolyhedron> {
self.downcast_ref()
}
/// Converts this abstract shape to a mutable round convex polyhedron, if it is one.
#[cfg(feature = "dim3")]
pub fn as_round_convex_polyhedron_mut(&mut self) -> Option<&mut RoundConvexPolyhedron> {
self.downcast_mut()
}
}
impl Shape for Ball {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.aabb(position)
}
fn mass_properties(&self, density: Real) -> MassProperties {
MassProperties::from_ball(density, self.radius)
}
fn ccd_thickness(&self) -> Real {
self.radius
}
fn ccd_angular_thickness(&self) -> Real {
Real::pi()
}
fn is_convex(&self) -> bool {
true
}
fn shape_type(&self) -> ShapeType {
ShapeType::Ball
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::Ball(self)
}
fn as_support_map(&self) -> Option<&dyn SupportMap> {
Some(self as &dyn SupportMap)
}
}
impl Shape for Cuboid {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.aabb(position)
}
fn mass_properties(&self, density: Real) -> MassProperties {
MassProperties::from_cuboid(density, self.half_extents)
}
fn is_convex(&self) -> bool {
true
}
fn shape_type(&self) -> ShapeType {
ShapeType::Cuboid
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::Cuboid(self)
}
fn ccd_thickness(&self) -> Real {
self.half_extents.min()
}
fn ccd_angular_thickness(&self) -> Real {
Real::frac_pi_2()
}
fn as_support_map(&self) -> Option<&dyn SupportMap> {
Some(self as &dyn SupportMap)
}
fn as_polygonal_feature_map(&self) -> Option<(&dyn PolygonalFeatureMap, Real)> {
Some((self as &dyn PolygonalFeatureMap, 0.0))
}
}
impl Shape for Capsule {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.aabb(position)
}
fn mass_properties(&self, density: Real) -> MassProperties {
MassProperties::from_capsule(density, self.segment.a, self.segment.b, self.radius)
}
fn is_convex(&self) -> bool {
true
}
fn shape_type(&self) -> ShapeType {
ShapeType::Capsule
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::Capsule(self)
}
fn ccd_thickness(&self) -> Real {
self.radius
}
fn ccd_angular_thickness(&self) -> Real {
Real::frac_pi_2()
}
fn as_support_map(&self) -> Option<&dyn SupportMap> {
Some(self as &dyn SupportMap)
}
fn as_polygonal_feature_map(&self) -> Option<(&dyn PolygonalFeatureMap, Real)> {
Some((&self.segment as &dyn PolygonalFeatureMap, self.radius))
}
}
impl Shape for Triangle {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.aabb(position)
}
fn mass_properties(&self, _density: Real) -> MassProperties {
#[cfg(feature = "dim2")]
return MassProperties::from_triangle(_density, &self.a, &self.b, &self.c);
#[cfg(feature = "dim3")]
return MassProperties::zero();
}
fn is_convex(&self) -> bool {
true
}
fn shape_type(&self) -> ShapeType {
ShapeType::Triangle
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::Triangle(self)
}
fn ccd_thickness(&self) -> Real {
// TODO: in 2D use the smallest height of the triangle.
0.0
}
fn ccd_angular_thickness(&self) -> Real {
Real::frac_pi_2()
}
fn as_support_map(&self) -> Option<&dyn SupportMap> {
Some(self as &dyn SupportMap)
}
fn as_polygonal_feature_map(&self) -> Option<(&dyn PolygonalFeatureMap, Real)> {
Some((self as &dyn PolygonalFeatureMap, 0.0))
}
}
impl Shape for Segment {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.aabb(position)
}
fn mass_properties(&self, _density: Real) -> MassProperties {
MassProperties::zero()
}
fn is_convex(&self) -> bool {
true
}
fn ccd_thickness(&self) -> Real {
0.0
}
fn ccd_angular_thickness(&self) -> Real {
Real::frac_pi_2()
}
fn shape_type(&self) -> ShapeType {
ShapeType::Segment
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::Segment(self)
}
fn as_support_map(&self) -> Option<&dyn SupportMap> {
Some(self as &dyn SupportMap)
}
fn as_polygonal_feature_map(&self) -> Option<(&dyn PolygonalFeatureMap, Real)> {
Some((self as &dyn PolygonalFeatureMap, 0.0))
}
}
impl Shape for Compound {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
*self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.local_aabb().transform_by(position)
}
fn mass_properties(&self, density: Real) -> MassProperties {
MassProperties::from_compound(density, self.shapes())
}
fn shape_type(&self) -> ShapeType {
ShapeType::Compound
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::Compound(self)
}
fn ccd_thickness(&self) -> Real {
self.shapes()
.iter()
.fold(Real::MAX, |curr, (_, s)| curr.min(s.ccd_thickness()))
}
fn ccd_angular_thickness(&self) -> Real {
self.shapes().iter().fold(Real::MAX, |curr, (_, s)| {
curr.max(s.ccd_angular_thickness())
})
}
fn as_composite_shape(&self) -> Option<&dyn SimdCompositeShape> {
Some(self as &dyn SimdCompositeShape)
}
}
impl Shape for Polyline {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
*self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.aabb(position)
}
fn mass_properties(&self, _density: Real) -> MassProperties {
MassProperties::zero()
}
fn shape_type(&self) -> ShapeType {
ShapeType::Polyline
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::Polyline(self)
}
fn ccd_thickness(&self) -> Real {
0.0
}
fn ccd_angular_thickness(&self) -> Real {
// TODO: the value should depend on the angles between
// adjacent segments of the polyline.
Real::frac_pi_4()
}
fn as_composite_shape(&self) -> Option<&dyn SimdCompositeShape> {
Some(self as &dyn SimdCompositeShape)
}
}
impl Shape for TriMesh {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
*self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.aabb(position)
}
fn mass_properties(&self, _density: Real) -> MassProperties {
#[cfg(feature = "dim2")]
return MassProperties::from_trimesh(_density, self.vertices(), self.indices());
#[cfg(feature = "dim3")]
return MassProperties::zero();
}
fn shape_type(&self) -> ShapeType {
ShapeType::TriMesh
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::TriMesh(self)
}
fn ccd_thickness(&self) -> Real {
// TODO: in 2D, return the smallest CCD thickness among triangles?
0.0
}
fn ccd_angular_thickness(&self) -> Real {
// TODO: the value should depend on the angles between
// adjacent triangles of the trimesh.
Real::frac_pi_4()
}
fn as_composite_shape(&self) -> Option<&dyn SimdCompositeShape> {
Some(self as &dyn SimdCompositeShape)
}
}
impl Shape for HeightField {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.aabb(position)
}
fn mass_properties(&self, _density: Real) -> MassProperties {
MassProperties::zero()
}
fn shape_type(&self) -> ShapeType {
ShapeType::HeightField
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::HeightField(self)
}
fn ccd_thickness(&self) -> Real {
0.0
}
fn ccd_angular_thickness(&self) -> Real {
// TODO: the value should depend on the angles between
// adjacent triangles of the heightfield.
Real::frac_pi_4()
}
}
#[cfg(feature = "dim2")]
impl Shape for ConvexPolygon {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.aabb(position)
}
fn mass_properties(&self, density: Real) -> MassProperties {
MassProperties::from_convex_polygon(density, &self.points())
}
fn is_convex(&self) -> bool {
true
}
fn shape_type(&self) -> ShapeType {
ShapeType::ConvexPolygon
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::ConvexPolygon(self)
}
fn ccd_thickness(&self) -> Real {
// TODO: we should use the OBB instead.
self.compute_local_aabb().half_extents().min()
}
fn ccd_angular_thickness(&self) -> Real {
// TODO: the value should depend on the angles between
// adjacent segments of the convex polygon.
Real::frac_pi_4()
}
fn as_support_map(&self) -> Option<&dyn SupportMap> {
Some(self as &dyn SupportMap)
}
fn as_polygonal_feature_map(&self) -> Option<(&dyn PolygonalFeatureMap, Real)> {
Some((self as &dyn PolygonalFeatureMap, 0.0))
}
}
#[cfg(feature = "dim3")]
impl Shape for ConvexPolyhedron {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.aabb(position)
}
fn mass_properties(&self, density: Real) -> MassProperties {
let (vertices, indices) = self.to_trimesh();
MassProperties::from_convex_polyhedron(density, &vertices, &indices)
}
fn is_convex(&self) -> bool {
true
}
fn shape_type(&self) -> ShapeType {
ShapeType::ConvexPolyhedron
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::ConvexPolyhedron(self)
}
fn ccd_thickness(&self) -> Real {
// TODO: we should use the OBB instead.
self.compute_local_aabb().half_extents().min()
}
fn ccd_angular_thickness(&self) -> Real {
// TODO: the value should depend on the angles between
// adjacent segments of the convex polyhedron.
Real::frac_pi_4()
}
fn as_support_map(&self) -> Option<&dyn SupportMap> {
Some(self as &dyn SupportMap)
}
fn as_polygonal_feature_map(&self) -> Option<(&dyn PolygonalFeatureMap, Real)> {
Some((self as &dyn PolygonalFeatureMap, 0.0))
}
}
#[cfg(feature = "dim3")]
impl Shape for Cylinder {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.aabb(position)
}
fn mass_properties(&self, density: Real) -> MassProperties {
MassProperties::from_cylinder(density, self.half_height, self.radius)
}
fn is_convex(&self) -> bool {
true
}
fn shape_type(&self) -> ShapeType {
ShapeType::Cylinder
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::Cylinder(self)
}
fn ccd_thickness(&self) -> Real {
self.radius
}
fn ccd_angular_thickness(&self) -> Real {
Real::frac_pi_2()
}
fn as_support_map(&self) -> Option<&dyn SupportMap> {
Some(self as &dyn SupportMap)
}
fn as_polygonal_feature_map(&self) -> Option<(&dyn PolygonalFeatureMap, Real)> {
Some((self as &dyn PolygonalFeatureMap, 0.0))
}
}
#[cfg(feature = "dim3")]
impl Shape for Cone {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.aabb(position)
}
fn mass_properties(&self, density: Real) -> MassProperties {
MassProperties::from_cone(density, self.half_height, self.radius)
}
fn is_convex(&self) -> bool {
true
}
fn shape_type(&self) -> ShapeType {
ShapeType::Cone
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::Cone(self)
}
fn ccd_thickness(&self) -> Real {
self.radius
}
fn ccd_angular_thickness(&self) -> Real {
let apex_half_angle = self.radius.atan2(self.half_height);
assert!(apex_half_angle >= 0.0);
let basis_angle = Real::frac_pi_2() - apex_half_angle;
basis_angle.min(apex_half_angle * 2.0)
}
fn as_support_map(&self) -> Option<&dyn SupportMap> {
Some(self as &dyn SupportMap)
}
fn as_polygonal_feature_map(&self) -> Option<(&dyn PolygonalFeatureMap, Real)> {
Some((self as &dyn PolygonalFeatureMap, 0.0))
}
}
impl Shape for HalfSpace {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
self.local_aabb()
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.local_bounding_sphere()
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.aabb(position)
}
fn is_convex(&self) -> bool {
true
}
fn ccd_thickness(&self) -> Real {
f32::MAX as Real
}
fn ccd_angular_thickness(&self) -> Real {
Real::pi()
}
fn mass_properties(&self, _: Real) -> MassProperties {
MassProperties::zero()
}
fn shape_type(&self) -> ShapeType {
ShapeType::HalfSpace
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::HalfSpace(self)
}
}
macro_rules! impl_shape_for_round_shape(
($($S: ty, $Tag: ident);*) => {$(
impl Shape for RoundShape<$S> {
fn clone_box(&self) -> Box<dyn Shape> {
Box::new(self.clone())
}
fn compute_local_aabb(&self) -> AABB {
self.base_shape.local_aabb().loosened(self.border_radius)
}
fn compute_local_bounding_sphere(&self) -> BoundingSphere {
self.base_shape.local_bounding_sphere().loosened(self.border_radius)
}
fn compute_aabb(&self, position: &Isometry<Real>) -> AABB {
self.base_shape.aabb(position).loosened(self.border_radius)
}
fn mass_properties(&self, density: Real) -> MassProperties {
self.base_shape.mass_properties(density)
}
fn is_convex(&self) -> bool {
self.base_shape.is_convex()
}
fn shape_type(&self) -> ShapeType {
ShapeType::$Tag
}
fn as_typed_shape(&self) -> TypedShape {
TypedShape::$Tag(self)
}
fn ccd_thickness(&self) -> Real {
self.base_shape.ccd_thickness() + self.border_radius
}
fn ccd_angular_thickness(&self) -> Real {
// The fact that the shape is round doesn't change anything
// to the CCD angular thickness.
self.base_shape.ccd_angular_thickness()
}
fn as_support_map(&self) -> Option<&dyn SupportMap> {
Some(self as &dyn SupportMap)
}
fn as_polygonal_feature_map(&self) -> Option<(&dyn PolygonalFeatureMap, Real)> {
Some((&self.base_shape as &dyn PolygonalFeatureMap, self.border_radius))
}
}
)*}
);
impl_shape_for_round_shape!(
Cuboid, RoundCuboid;
Triangle, RoundTriangle
);
#[cfg(feature = "dim2")]
impl_shape_for_round_shape!(ConvexPolygon, RoundConvexPolygon);
#[cfg(feature = "dim3")]
impl_shape_for_round_shape!(
Cylinder, RoundCylinder;
Cone, RoundCone;
ConvexPolyhedron, RoundConvexPolyhedron
);
| 29.078247 | 93 | 0.61423 |
18e622db6d99453ed3d3d82923a007d82d011ca7
| 1,040 |
use std::convert::TryFrom;
use crate::conv;
use crate::models::{ModelError, ModelResult};
use crate::proto::models::PJob;
pub use self::{
id::*,
name::*,
opcode::DJobOpcode,
status::*,
};
mod id;
mod name;
pub mod opcode;
mod status;
#[derive(Clone, Debug)]
pub struct DJob {
pub id: DJobId,
pub name: DJobName,
pub opcodes: Vec<DJobOpcode>,
pub status: DJobStatus,
}
impl TryFrom<PJob> for DJob {
type Error = ModelError;
fn try_from(PJob { id, name, opcodes, status }: PJob) -> ModelResult<Self> {
Ok(Self {
id: conv!(id as _),
name: conv!(name as _),
opcodes: conv!(opcodes as [_?]),
status: conv!(status? as _?),
})
}
}
impl Into<PJob> for DJob {
fn into(self) -> PJob {
let Self { id, name, opcodes, status } = self;
PJob {
id: conv!(id as _),
name: conv!(name as _),
opcodes: conv!(opcodes as [_]),
status: Some(conv!(status as _)),
}
}
}
| 20.392157 | 80 | 0.538462 |
79b5a2fb0a4f4f43b74d8be74b782546893149d8
| 78 |
fn main() {
for n in range(1, 10) {
println!(n);
}
}
| 9.75 | 28 | 0.358974 |
28ebf195f7e2d988606368623ea0da91f0f1083c
| 11,396 |
use error::{ErrorKind, ParserErrorRef, ExpectedChar};
use lut::Table;
use media_type_impl_utils::lookup_tables::{MediaTypeChars, CText, VCharWs};
use media_type_impl_utils::quoted_string::MimeParsingExt;
pub fn parse_opt_cfws<E: MimeParsingExt>(input: &str) -> Result<usize, ParserErrorRef> {
//CFWS = (1*([FWS] comment) [FWS]) / FWS
//parse just: *([FWS] comment) [FWS]
// which is fine as its a opt CFWS so empty "" is ok, and
// just a comment is ok anyway and just a FWS is also ok anyway.
let mut offset = 0;
loop {
offset = parse_opt_fws::<E>(input, offset)?;
if let Some(new_offset) = opt_parse_comment::<E>(&input[offset..])? {
offset = new_offset;
} else {
return Ok(offset);
}
}
}
fn parse_opt_fws<E: MimeParsingExt>(input: &str, offset: usize) -> Result<usize, ParserErrorRef> {
if E::OBS {
_parse_fws_obs(input, offset)
} else {
_parse_fws_modern(input, offset)
}
}
#[inline]
fn _parse_fws_obs(input: &str, offset: usize) -> Result<usize, ParserErrorRef> {
// obs-FWS = 1*([CRLF] WSP)
// parse *([CRLF] WSP) as it's optional obs-fws
let mut offset = offset;
loop {
let crlfws_offset = parse_opt_crlf_seq(input, offset)?;
let ws_offset = parse_opt_ws_seq(input, crlfws_offset);
if offset == ws_offset {
break
} else {
offset = ws_offset
}
}
Ok(offset)
}
#[inline]
fn _parse_fws_modern(input: &str, offset: usize) -> Result<usize, ParserErrorRef> {
let offset = parse_opt_ws_seq(input, offset);
let crlf_offset = parse_opt_crlf_seq(input, offset)?;
if crlf_offset == offset {
Ok(offset)
} else {
Ok(parse_opt_ws_seq(input, crlf_offset))
}
}
fn opt_parse_comment<E>(input: &str) -> Result<Option<usize>, ParserErrorRef>
where E: MimeParsingExt
{
if input.as_bytes().get(0) == Some(&b'(') {
Ok(Some(inner_parse_comment::<E>(input, 1)?))
} else {
Ok(None)
}
}
/// starts parsing after the initial '('
fn inner_parse_comment<E>(input: &str, offset: usize) -> Result<usize, ParserErrorRef>
where E: MimeParsingExt
{
// comment = "(" *([FWS] ccontent) [FWS] ")"
// ccontent = ctext / quoted-pair / comment
// FWS = ([*WSP "\r\n"] 1*WSP) / obs-FWS
// obs-FWS = 1*([CRLF] WSP)
let mut offset = offset;
loop {
offset = parse_opt_fws::<E>(input, offset)?;
if let Some(&last_byte) = input.as_bytes().get(offset) {
//offset now points BEHIND last_byte
offset += 1;
if MediaTypeChars::check_at(last_byte as usize, CText)
|| (E::ALLOW_UTF8 && last_byte > 0x7f)
{ continue }
match last_byte {
b'(' => {
//UNWRAP_SAFE: only returns non if input does not starts with '('
// but we know it does
offset = inner_parse_comment::<E>(input, offset)?;
},
b'\\' => {
offset = parse_quotable::<E>(input, offset)?;
},
b')' => {
return Ok(offset);
},
b'\r' | b'\n' => {
return Err(
ErrorKind::IllegalCrNlSeq { pos: offset - 1 }
.with_input(input)
);
}
_ => {
let charclass =
if E::ALLOW_UTF8 { "ctext / non-ascii-utf8 / '(' / ')' / '\\'" }
else { "ctext / '(' / ')' / '\\'" };
return Err(ErrorKind::UnexpectedChar {
//remember offset already points to the next char
pos: offset - 1,
expected: ExpectedChar::CharClass(charclass)
}.with_input(input));
}
}
} else {
return Err(ErrorKind::UnexpectedEof.with_input(input));
}
}
}
fn parse_quotable<E: MimeParsingExt>(input: &str, offset: usize) -> Result<usize, ParserErrorRef> {
if let Some(&byte) = input.as_bytes().get(offset) {
let valid =
if E::OBS {
byte <= 0x7f
} else {
MediaTypeChars::check_at(byte as usize, VCharWs)
};
if valid {
Ok(offset + 1)
} else {
let charclass = if E::OBS { "quotable/obs-quotabe" } else { "quotable" };
Err(
ErrorKind::UnexpectedChar {
pos: offset, expected: ExpectedChar::CharClass(charclass)
}.with_input(input)
)
}
} else {
Err(ErrorKind::UnexpectedEof.with_input(input))
}
}
/// parsed both "\r\n " and "\r\n\t"
///
/// if the input does not start with `'\r'` then `offset` is returned
///
/// # Error
///
/// returns an error if the input starts with `'\r'` but does not continue with
/// either `"\n "` or `"\n\t"`
///
#[inline]
pub fn parse_opt_crlf_seq(input: &str, offset: usize) -> Result<usize, ParserErrorRef> {
if input.as_bytes().get(offset) != Some(&b'\r') {
Ok(offset)
} else {
if input.as_bytes().get(offset + 1) == Some(&b'\n') {
if input.as_bytes().get(offset + 2).map(|bt|is_ws(*bt)).unwrap_or(false) {
return Ok(offset + 3)
}
}
Err(ErrorKind::IllegalCrNlSeq { pos: offset }.with_input(input))
}
}
#[inline]
pub fn is_ws(bch: u8) -> bool {
bch == b' ' || bch == b'\t'
}
#[inline]
pub fn parse_opt_ws_seq(input: &str, offset: usize) -> usize {
let mut offset = offset;
let bdata = input.as_bytes();
while bdata.get(offset).map(|bt| is_ws(*bt)).unwrap_or(false) {
offset += 1;
}
offset
}
#[cfg(test)]
mod test {
mod opt_parse_comment {
use media_type_impl_utils::quoted_string::{MimeObsParsing, MimeParsing};
use super::super::*;
#[test]
fn empty() {
assert_eq!(opt_parse_comment::<MimeObsParsing>("()"), Ok(Some(2)));
}
#[test]
fn simple() {
let text = "(so is a \"comment)";
assert_eq!(
opt_parse_comment::<MimeObsParsing>(text),
Ok(Some(text.len()))
);
}
#[test]
fn with_quoted_pair() {
let text = "(so is a \\(comment)";
assert_eq!(
opt_parse_comment::<MimeObsParsing>(text),
Ok(Some(text.len()))
);
}
#[test]
fn with_comment() {
let text = "(= (+ (* 2 3) 4) 10)";
assert_eq!(
opt_parse_comment::<MimeObsParsing>(text),
Ok(Some(text.len()))
);
}
#[test]
fn with_fws() {
let text = "(= (+ \r\n (* 2 3) 4) 10)";
assert_eq!(
opt_parse_comment::<MimeParsing>(text),
Ok(Some(text.len()))
);
}
#[test]
fn with_fws_ons() {
let text = "(= (+ \r\n (* 2 3) 4) 10)";
assert_eq!(
opt_parse_comment::<MimeObsParsing>(text),
Ok(Some(text.len()))
);
}
#[test]
fn with_more_data_at_the_end() {
let cmd = "(abc yay d)";
let more = "so dada";
assert_eq!(
opt_parse_comment::<MimeObsParsing>((String::from(cmd) + more).as_str()),
Ok(Some(cmd.len()))
);
}
#[test]
fn obs_with_bad_fws_no_cr() {
let text = "(= (+ \n (* 2 3) 4) 10)";
let res = opt_parse_comment::<MimeObsParsing>(text);
assert_eq!(res, Err(
ErrorKind::IllegalCrNlSeq { pos: 6 }
.with_input("(= (+ \n (* 2 3) 4) 10)")
));
}
#[test]
fn with_bad_fws_no_cr() {
let text = "(= (+ \n (* 2 3) 4) 10)";
let res = opt_parse_comment::<MimeParsing>(text);
assert_eq!(res, Err(
ErrorKind::IllegalCrNlSeq { pos: 6 }
.with_input("(= (+ \n (* 2 3) 4) 10)")
));
}
#[test]
fn with_bad_fws_twice_in_row() {
let res = opt_parse_comment::<MimeParsing>("(= (+ \r\n \r\n (* 2 3) 4) 10)");
assert_eq!(res, Err(
ErrorKind::IllegalCrNlSeq { pos: 9 }
.with_input("(= (+ \r\n \r\n (* 2 3) 4) 10)")
));
}
#[test]
fn with_fws_twice_in_row_obs_grammar() {
let text = "(= (+ \r\n \r\n (* 2 3) 4) 10)";
assert_eq!(
opt_parse_comment::<MimeObsParsing>(text),
Ok(Some(text.len()))
);
}
#[test]
fn not_a_comment() {
let text = " (noop)";
let res = opt_parse_comment::<MimeParsing>(text);
assert_eq!(res, Ok(None));
}
}
mod _parse_fws_modern {
use super::super::_parse_fws_modern;
#[test]
fn crlf_space() {
let text = "\r\n ";
assert_eq!(_parse_fws_modern(text, 0), Ok(3));
}
#[test]
fn crlf_tab() {
let text = "\r\n\t";
assert_eq!(_parse_fws_modern(text, 0), Ok(3));
}
#[test]
fn ws_then_crlf() {
let text = " \r\n ";
assert_eq!(_parse_fws_modern(text, 0), Ok(5));
}
#[test]
fn ws_then_crlf_then_ws() {
let text = " \r\n abcde";
assert_eq!(_parse_fws_modern(text, 0), Ok(7));
}
#[test]
fn wsonly() {
let text = " ";
assert_eq!(_parse_fws_modern(text, 0), Ok(5));
}
#[test]
fn no_fws() {
let text = "";
assert_eq!(_parse_fws_modern(text, 0), Ok(0));
}
}
mod parse_opt_crlf_seq {
use super::super::parse_opt_crlf_seq;
#[test]
fn non_crlf() {
let text = "abc";
assert_eq!(parse_opt_crlf_seq(text, 0), Ok(0));
}
#[test]
fn crnl_space() {
let text = "\r\n ";
assert_eq!(parse_opt_crlf_seq(text, 0), Ok(3));
}
#[test]
fn crnl_tab() {
let text = "\r\n\t";
assert_eq!(parse_opt_crlf_seq(text, 0), Ok(3));
}
}
mod parse_opt_ws_seq {
use super::super::parse_opt_ws_seq;
#[test]
fn no_ws() {
let text = "";
assert_eq!(parse_opt_ws_seq(text, 0), 0)
}
#[test]
fn spaces() {
let text = " ";
assert_eq!(parse_opt_ws_seq(text, 0), 3)
}
#[test]
fn spaces_then_more() {
let text = " abc";
assert_eq!(parse_opt_ws_seq(text, 0), 3)
}
#[test]
fn mixed_spaces() {
let text = " \t\t \t";
assert_eq!(parse_opt_ws_seq(text, 0), 5)
}
#[test]
fn start_offset() {
let text = "a \t\t \t";
assert_eq!(parse_opt_ws_seq(text, 2), text.len())
}
}
}
| 28.561404 | 100 | 0.474465 |
232d9349f906342c9faf145924cedfee5bf6b75d
| 12,005 |
#![allow(missing_docs)]
///! This crate contains types only for working JWK and JWK Sets
///! This is only meant to be used to deal with public JWK, not generate ones.
///! Most of the code in this file is taken from https://github.com/lawliet89/biscuit but
/// tweaked to remove the private bits as it's not the goal for this crate currently.
///!
use crate::Algorithm;
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
use std::fmt;
/// The intended usage of the public `KeyType`. This enum is serialized `untagged`
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub enum PublicKeyUse {
/// Indicates a public key is meant for signature verification
Signature,
/// Indicates a public key is meant for encryption
Encryption,
/// Other usage
Other(String),
}
impl Serialize for PublicKeyUse {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let string = match *self {
PublicKeyUse::Signature => "sig",
PublicKeyUse::Encryption => "enc",
PublicKeyUse::Other(ref other) => other,
};
serializer.serialize_str(string)
}
}
impl<'de> Deserialize<'de> for PublicKeyUse {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct PublicKeyUseVisitor;
impl<'de> de::Visitor<'de> for PublicKeyUseVisitor {
type Value = PublicKeyUse;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "a string")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(match v {
"sig" => PublicKeyUse::Signature,
"enc" => PublicKeyUse::Encryption,
other => PublicKeyUse::Other(other.to_string()),
})
}
}
deserializer.deserialize_string(PublicKeyUseVisitor)
}
}
/// Operations that the key is intended to be used for. This enum is serialized `untagged`
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub enum KeyOperations {
/// Computer digital signature or MAC
Sign,
/// Verify digital signature or MAC
Verify,
/// Encrypt content
Encrypt,
/// Decrypt content and validate decryption, if applicable
Decrypt,
/// Encrypt key
WrapKey,
/// Decrypt key and validate decryption, if applicable
UnwrapKey,
/// Derive key
DeriveKey,
/// Derive bits not to be used as a key
DeriveBits,
/// Other operation
Other(String),
}
impl Serialize for KeyOperations {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let string = match *self {
KeyOperations::Sign => "sign",
KeyOperations::Verify => "verify",
KeyOperations::Encrypt => "encrypt",
KeyOperations::Decrypt => "decrypt",
KeyOperations::WrapKey => "wrapKey",
KeyOperations::UnwrapKey => "unwrapKey",
KeyOperations::DeriveKey => "deriveKey",
KeyOperations::DeriveBits => "deriveBits",
KeyOperations::Other(ref other) => other,
};
serializer.serialize_str(string)
}
}
impl<'de> Deserialize<'de> for KeyOperations {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct KeyOperationsVisitor;
impl<'de> de::Visitor<'de> for KeyOperationsVisitor {
type Value = KeyOperations;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "a string")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(match v {
"sign" => KeyOperations::Sign,
"verify" => KeyOperations::Verify,
"encrypt" => KeyOperations::Encrypt,
"decrypt" => KeyOperations::Decrypt,
"wrapKey" => KeyOperations::WrapKey,
"unwrapKey" => KeyOperations::UnwrapKey,
"deriveKey" => KeyOperations::DeriveKey,
"deriveBits" => KeyOperations::DeriveBits,
other => KeyOperations::Other(other.to_string()),
})
}
}
deserializer.deserialize_string(KeyOperationsVisitor)
}
}
/// Common JWK parameters
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Default, Hash)]
pub struct CommonParameters {
/// The intended use of the public key. Should not be specified with `key_operations`.
/// See sections 4.2 and 4.3 of [RFC7517](https://tools.ietf.org/html/rfc7517).
#[serde(rename = "use", skip_serializing_if = "Option::is_none", default)]
pub public_key_use: Option<PublicKeyUse>,
/// The "key_ops" (key operations) parameter identifies the operation(s)
/// for which the key is intended to be used. The "key_ops" parameter is
/// intended for use cases in which public, private, or symmetric keys
/// may be present.
/// Should not be specified with `public_key_use`.
/// See sections 4.2 and 4.3 of [RFC7517](https://tools.ietf.org/html/rfc7517).
#[serde(rename = "key_ops", skip_serializing_if = "Option::is_none", default)]
pub key_operations: Option<Vec<KeyOperations>>,
/// The algorithm intended for use with the key
#[serde(rename = "alg", skip_serializing_if = "Option::is_none", default)]
pub algorithm: Option<Algorithm>,
/// The case sensitive Key ID for the key
#[serde(rename = "kid", skip_serializing_if = "Option::is_none", default)]
pub key_id: Option<String>,
/// X.509 Public key cerfificate URL. This is currently not implemented (correctly).
///
/// Serialized to `x5u`.
#[serde(rename = "x5u", skip_serializing_if = "Option::is_none")]
pub x509_url: Option<String>,
/// X.509 public key certificate chain. This is currently not implemented (correctly).
///
/// Serialized to `x5c`.
#[serde(rename = "x5c", skip_serializing_if = "Option::is_none")]
pub x509_chain: Option<Vec<String>>,
/// X.509 Certificate SHA1 thumbprint. This is currently not implemented (correctly).
///
/// Serialized to `x5t`.
#[serde(rename = "x5t", skip_serializing_if = "Option::is_none")]
pub x509_sha1_fingerprint: Option<String>,
/// X.509 Certificate SHA256 thumbprint. This is currently not implemented (correctly).
///
/// Serialized to `x5t#S256`.
#[serde(rename = "x5t#S256", skip_serializing_if = "Option::is_none")]
pub x509_sha256_fingerprint: Option<String>,
}
/// Key type value for an Elliptic Curve Key.
/// This single value enum is a workaround for Rust not supporting associated constants.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub enum EllipticCurveKeyType {
/// Key type value for an Elliptic Curve Key.
EC,
}
impl Default for EllipticCurveKeyType {
fn default() -> Self {
EllipticCurveKeyType::EC
}
}
/// Type of cryptographic curve used by a key. This is defined in
/// [RFC 7518 #7.6](https://tools.ietf.org/html/rfc7518#section-7.6)
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub enum EllipticCurve {
/// P-256 curve
#[serde(rename = "P-256")]
P256,
/// P-384 curve
#[serde(rename = "P-384")]
P384,
/// P-521 curve -- unsupported by `ring`.
#[serde(rename = "P-521")]
P521,
}
impl Default for EllipticCurve {
fn default() -> Self {
EllipticCurve::P256
}
}
/// Parameters for an Elliptic Curve Key
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Default, Hash)]
pub struct EllipticCurveKeyParameters {
/// Key type value for an Elliptic Curve Key.
#[serde(rename = "kty")]
pub key_type: EllipticCurveKeyType,
/// The "crv" (curve) parameter identifies the cryptographic curve used
/// with the key.
#[serde(rename = "crv")]
pub curve: EllipticCurve,
/// The "x" (x coordinate) parameter contains the x coordinate for the
/// Elliptic Curve point.
pub x: String,
/// The "y" (y coordinate) parameter contains the y coordinate for the
/// Elliptic Curve point.
pub y: String,
}
/// Key type value for an RSA Key.
/// This single value enum is a workaround for Rust not supporting associated constants.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub enum RSAKeyType {
/// Key type value for an RSA Key.
RSA,
}
impl Default for RSAKeyType {
fn default() -> Self {
RSAKeyType::RSA
}
}
/// Parameters for a RSA Key
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Default, Hash)]
pub struct RSAKeyParameters {
/// Key type value for a RSA Key
#[serde(rename = "kty")]
pub key_type: RSAKeyType,
/// The "n" (modulus) parameter contains the modulus value for the RSA
/// public key.
pub n: String,
/// The "e" (exponent) parameter contains the exponent value for the RSA
/// public key.
pub e: String,
}
/// Key type value for an Octet symmetric key.
/// This single value enum is a workaround for Rust not supporting associated constants.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub enum OctetKeyType {
/// Key type value for an Octet symmetric key.
#[serde(rename = "oct")]
Octet,
}
impl Default for OctetKeyType {
fn default() -> Self {
OctetKeyType::Octet
}
}
/// Parameters for an Octet Key
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Default, Hash)]
pub struct OctetKeyParameters {
/// Key type value for an Octet Key
#[serde(rename = "kty")]
pub key_type: OctetKeyType,
/// The octet key value
pub value: String,
}
/// Key type value for an Octet Key Pair.
/// This single value enum is a workaround for Rust not supporting associated constants.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub enum OctetKeyPairType {
/// Key type value for an Octet Key Pair.
#[serde(rename = "OKP")]
OctetKeyPair,
}
impl Default for OctetKeyPairType {
fn default() -> Self {
OctetKeyPairType::OctetKeyPair
}
}
/// Parameters for an Octet Key Pair
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Default, Hash)]
pub struct OctetKeyPairParameters {
/// Key type value for an Octet Key Pair
#[serde(rename = "kty")]
pub key_type: OctetKeyPairType,
/// The "crv" (curve) parameter identifies the cryptographic curve used
/// with the key.
#[serde(rename = "crv")]
pub curve: EllipticCurve,
/// The "x" parameter contains the base64 encoded public key
pub x: String,
}
/// Algorithm specific parameters
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
#[serde(untagged)]
pub enum AlgorithmParameters {
EllipticCurve(EllipticCurveKeyParameters),
RSA(RSAKeyParameters),
OctetKey(OctetKeyParameters),
OctetKeyPair(OctetKeyPairParameters),
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub struct Jwk {
#[serde(flatten)]
pub common: CommonParameters,
/// Key algorithm specific parameters
#[serde(flatten)]
pub algorithm: AlgorithmParameters,
}
/// A JWK set
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct JwkSet {
pub keys: Vec<Jwk>,
}
impl JwkSet {
/// Find the key in the set that matches the given key id, if any.
pub fn find(&self, kid: &str) -> Option<&Jwk> {
self.keys
.iter()
.find(|jwk| jwk.common.key_id.is_some() && jwk.common.key_id.as_ref().unwrap() == kid)
}
}
| 32.890411 | 98 | 0.631987 |
710e605868b8e759dc0c9e77c26c93bac32e4445
| 2,869 |
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use diem_types::transaction::ScriptABI;
use serde_generate::CustomCode;
use std::io::Read;
/// Support for code-generation in C++17.
pub mod cpp;
/// Support for code-generation in Go >= 1.13.
pub mod golang;
/// Support for code-generation in Java 8.
pub mod java;
/// Support for code-generation in Python 3.
pub mod python3;
/// Support for code-generation in Rust.
pub mod rust;
/// Support for code-generation in TypeScript.
pub mod typescript;
// Support for code-generation in C#
pub mod csharp;
/// Internals shared between languages.
mod common;
/// Read all ABI files in a directory. This supports both new and old `ScriptABI`s.
pub fn read_abis<P: AsRef<std::path::Path>>(dir_path: P) -> anyhow::Result<Vec<ScriptABI>> {
let mut abis = Vec::<ScriptABI>::new();
for entry in std::fs::read_dir(dir_path)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
continue;
}
let mut buffer = Vec::new();
let mut f = std::fs::File::open(path)?;
f.read_to_end(&mut buffer)?;
abis.push(bcs::from_bytes(&buffer)?);
}
// Sort scripts by alphabetical order.
#[allow(clippy::unnecessary_sort_by)]
abis.sort_by(|a, b| a.name().cmp(b.name()));
Ok(abis)
}
/// How to copy ABI-generated source code for a given language.
pub trait SourceInstaller {
type Error;
/// Create a module exposing the transaction builders for the given ABIs.
fn install_transaction_builders(
&self,
name: &str,
abis: &[ScriptABI],
) -> std::result::Result<(), Self::Error>;
}
/// How to read custom code to inject in Diem containers.
pub fn read_custom_code_from_paths<'a, I>(package: &'a [&'a str], paths: I) -> CustomCode
where
I: Iterator<Item = std::path::PathBuf>,
{
paths
.map(|path| {
let container_name = path
.file_stem()
.expect("file name must have a non-empty stem")
.to_str()
.expect("file names must be valid UTF8")
.to_string();
let mut container_path = package.iter().map(|s| s.to_string()).collect::<Vec<_>>();
container_path.push(container_name);
let content = std::fs::read_to_string(path).expect("custom code file must be readable");
// Skip initial comments (e.g. copyright headers) and empty lines.
let lines = content.lines().skip_while(|line| {
line.starts_with("// ") || line.starts_with("# ") || line.is_empty()
});
let mut code = lines.collect::<Vec<_>>().join("\n");
if !code.ends_with('\n') {
code += "\n";
}
(container_path, code)
})
.collect()
}
| 33.360465 | 100 | 0.599163 |
ac802e1bf1553df34d4d79f3e86e46f6a15f7e30
| 8,851 |
//! Various wrappers and helper structs.
//!
//! The types here are not expected to be used directly. These wrap some things (futures,
//! references) and implement other functionality on them, but are usually created through methods
//! in [`prelude`](../prelude/index.html).
//!
//! Despite that, they still can be created and used directly if the need arises.
use std::panic;
use futures::{Async, AsyncSink, Future, Poll, Sink, Stream};
use prelude::CoroutineFuture;
use errors::Dropped;
/// An iterator returned from
/// [`CoroutineStream::iter_cleanup`](../prelude/trait.CoroutineStream.html#method.iter_cleanup).
///
/// It wraps a stream and allows iterating through it.
pub struct CleanupIterator<S>(Option<S>);
impl<S> CleanupIterator<S> {
/// A constructor.
pub fn new(stream: S) -> Self {
CleanupIterator(Some(stream))
}
/// Extracts the stream inside.
///
/// # Returns
///
/// * `Ok(stream)` under normal circumstances.
/// * `Err(Dropped)` if the stream got lost when the reactor got dropped while iterating.
pub fn into_inner(self) -> Result<S, Dropped> {
self.0.ok_or(Dropped)
}
}
impl<I, E, S: Stream<Item = I, Error = E>> Iterator for CleanupIterator<S> {
type Item = Result<Result<I, E>, Dropped>;
fn next(&mut self) -> Option<Result<Result<I, E>, Dropped>> {
let resolved = match self.0.take() {
Some(stream) => stream.into_future().coro_wait_cleanup(),
None => return Some(Err(Dropped)), // Dropped in previous attempt to iterate. Still dead.
};
let (result, stream) = match resolved {
Ok(Ok((None, stream))) => (None, Some(stream)),
Ok(Ok((Some(ok), stream))) => (Some(Ok(Ok(ok))), Some(stream)),
Ok(Err((err, stream))) => (Some(Ok(Err(err))), Some(stream)),
Err(Dropped) => (Some(Err(Dropped)), None),
};
self.0 = stream;
result
}
}
fn drop_panic<T>(r: Result<T, Dropped>) -> T {
r.unwrap_or_else(|_| panic::resume_unwind(Box::new(Dropped)))
}
/// An iterator returned from
/// [`CoroutineStream::iter_ok`](../prelude/trait.CoroutineStream.html#method.iter_ok).
///
/// This wraps the [`CleanupIterator`](struct.CleanupIterator.html) and provides iteration through
/// the successful items.
pub struct OkIterator<I>(I);
impl<I> OkIterator<I> {
/// A constructor.
pub fn new(inner: I) -> Self {
OkIterator(inner)
}
/// Extracts the `CleanupIterator` inside.
pub fn into_inner(self) -> I {
self.0
}
}
impl<I, E, S: Stream<Item = I, Error = E>> Iterator for OkIterator<CleanupIterator<S>> {
type Item = I;
fn next(&mut self) -> Option<I> {
self.0
.next()
.map(drop_panic)
.and_then(Result::ok)
}
}
/// An iterator returned from
/// [`CoroutineStream::iter_result`](../prelude/trait.CoroutineStream.html#method.iter_result).
///
/// This wraps the [`CleanupIterator`](struct.CleanupIterator.html) and provides iteration through
/// the direct results.
pub struct ResultIterator<I>(I);
impl<I> ResultIterator<I> {
/// A constructor.
pub fn new(inner: I) -> Self {
ResultIterator(inner)
}
/// Extracts the `CleanupIterator` inside.
pub fn into_inner(self) -> I {
self.0
}
}
impl<I, E, S: Stream<Item = I, Error = E>> Iterator for ResultIterator<CleanupIterator<S>> {
type Item = Result<I, E>;
fn next(&mut self) -> Option<Result<I, E>> {
self.0
.next()
.map(drop_panic)
}
}
/// A future that extracts one item from a stream.
///
/// This is the future returned from
/// [`CoroutineStream::extractor`](../prelude/trait.CoroutineStream.html#method.extractor). It
/// borrows the stream mutably and allows taking one item out of it.
///
/// Unlike `Stream::into_future`, this does not consume the stream.
pub struct StreamExtractor<'a, S: 'a>(&'a mut S);
impl<'a, S: 'a> StreamExtractor<'a, S> {
/// A constructor.
pub fn new(stream: &'a mut S) -> Self {
StreamExtractor(stream)
}
}
impl<'a, I, E, S: Stream<Item = I, Error = E> + 'a> Future for StreamExtractor<'a, S> {
type Item = Option<I>;
type Error = E;
fn poll(&mut self) -> Poll<Option<I>, E> {
self.0.poll()
}
}
/// A future sending a sequence of items into a sink.
///
/// This borrows a sink and sends the provided items (from an iterator) into it. It is returned by
/// [`CoroutineSink::coro_sender`](../prelude/trait.CoroutineSink.html#method.coro_sender).
pub struct SinkSender<'a, V, S: 'a, I: Iterator<Item = V>> {
sink: &'a mut S,
iter: Option<I>,
value: Option<V>,
}
impl<'a, V, S: 'a, I: Iterator<Item = V>> SinkSender<'a, V, S, I> {
/// A constructor.
pub fn new<Src: IntoIterator<IntoIter = I, Item = V>>(sink: &'a mut S, src: Src) -> Self {
let iter = src.into_iter();
Self {
sink,
iter: Some(iter),
value: None,
}
}
// Pull the next value from somewhere.
fn next(&mut self) -> Option<V> {
// A postponed value
if self.value.is_some() {
return self.value.take();
}
// If we have nothing postponed, try pulling it from an iterator, if we have one.
let result = self.iter.as_mut().and_then(Iterator::next);
// If we got nothing, then make sure we don't call the iterator again.
if result.is_none() {
self.iter = None;
}
result
}
}
impl<'a, V, E, S, I> Future for SinkSender<'a, V, S, I>
where
S: Sink<SinkItem = V, SinkError = E> + 'a,
I: Iterator<Item = V>,
{
type Item = ();
type Error = E;
fn poll(&mut self) -> Poll<(), E> {
// First, try to push as much inside as possible.
while let Some(value) = self.next() {
match self.sink.start_send(value) {
Err(e) => return Err(e), // Early abort on errors.
Ok(AsyncSink::NotReady(returned)) => {
// This item doesn't fit. Hold onto it until we are called again.
self.value = Some(returned);
return Ok(Async::NotReady);
},
Ok(AsyncSink::Ready) => (), // Accepted, try next one.
}
}
// By now, we put everything into the sink. Try flushing it.
self.sink.poll_complete()
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::stream;
use futures::sync::mpsc;
use tokio::prelude::*;
use tokio::runtime::current_thread;
use prelude::*;
/// Test getting things out of a stream one by one.
///
/// This is similar to the .into_future() stream modifier, but doesn't consume the stream. That
/// is more convenient in the context of coroutines, which allow waiting for non-'static
/// futures.
#[test]
fn stream_extract() {
let mut s = stream::once::<_, ()>(Ok(42));
assert_eq!(StreamExtractor::new(&mut s).wait(), Ok(Some(42)));
assert_eq!(StreamExtractor::new(&mut s).wait(), Ok(None));
}
/// A test checking that sink_sender feeds everything to the sink.
///
/// This one doesn't do much async things, though, as everything fits inside right away.
#[test]
fn sink_sender() {
let (mut sender, receiver) = mpsc::unbounded();
let data = vec![1, 2, 3];
{
let sender_fut = SinkSender::new(&mut sender, data.clone());
// Just plain old future's wait. No coroutines here.
sender_fut.wait().unwrap();
}
drop(sender); // EOF the channel
// The data is there.
let received = receiver.wait().collect::<Result<Vec<_>, _>>().unwrap();
assert_eq!(data, received);
}
/// An async version of the above.
///
/// It needs to switch between the two futures to complete, because not everything fits.
#[test]
fn async_sink_sender() {
current_thread::block_on_all(future::lazy(|| {
let (mut sender, receiver) = mpsc::channel(1);
let sending_fut = Coroutine::with_defaults(move || {
let data = vec![1, 2, 3];
Coroutine::wait(SinkSender::new(&mut sender, data))
.unwrap()
.unwrap();
});
let receiving_fut = Coroutine::with_defaults(move || {
let mut result = Vec::new();
Coroutine::wait(receiver.for_each(|val| {
result.push(val);
Ok(())
}))
.unwrap()
.unwrap();
assert_eq!(vec![1, 2, 3], result);
});
receiving_fut.join(sending_fut)
})).unwrap();
}
}
| 32.540441 | 101 | 0.575528 |
3ac810834484a85eb12b6288d020ba24cfde2380
| 12,535 |
//! HAL interface to the UARTE peripheral
//!
//! See product specification:
//!
//! - nrf52832: Section 35
//! - nrf52840: Section 6.34
use core::fmt;
use core::ops::Deref;
use core::sync::atomic::{compiler_fence, Ordering::SeqCst};
use embedded_hal::digital::v2::OutputPin;
#[cfg(any(feature = "52833", feature = "52840"))]
use crate::target::UARTE1;
#[cfg(feature = "9160")]
use crate::target::{uarte0_ns as uarte0, UARTE0_NS as UARTE0, UARTE1_NS as UARTE1};
#[cfg(not(feature = "9160"))]
use crate::target::{uarte0, UARTE0};
use crate::gpio::{Floating, Input, Output, Pin, PushPull};
use crate::prelude::*;
use crate::slice_in_ram_or;
use crate::target_constants::EASY_DMA_SIZE;
use crate::timer::{self, Timer};
// Re-export SVD variants to allow user to directly set values
pub use uarte0::{baudrate::BAUDRATE_A as Baudrate, config::PARITY_A as Parity};
/// Interface to a UARTE instance
///
/// This is a very basic interface that comes with the following limitations:
/// - The UARTE instances share the same address space with instances of UART.
/// You need to make sure that conflicting instances
/// are disabled before using `Uarte`. See product specification:
/// - nrf52832: Section 15.2
/// - nrf52840: Section 6.1.2
pub struct Uarte<T>(T);
impl<T> Uarte<T>
where
T: Instance,
{
pub fn new(uarte: T, mut pins: Pins, parity: Parity, baudrate: Baudrate) -> Self {
// Select pins
uarte.psel.rxd.write(|w| {
let w = unsafe { w.pin().bits(pins.rxd.pin()) };
#[cfg(any(feature = "52833", feature = "52840"))]
let w = w.port().bit(pins.rxd.port().bit());
w.connect().connected()
});
pins.txd.set_high().unwrap();
uarte.psel.txd.write(|w| {
let w = unsafe { w.pin().bits(pins.txd.pin()) };
#[cfg(any(feature = "52833", feature = "52840"))]
let w = w.port().bit(pins.txd.port().bit());
w.connect().connected()
});
// Optional pins
uarte.psel.cts.write(|w| {
if let Some(ref pin) = pins.cts {
let w = unsafe { w.pin().bits(pin.pin()) };
#[cfg(any(feature = "52833", feature = "52840"))]
let w = w.port().bit(pin.port().bit());
w.connect().connected()
} else {
w.connect().disconnected()
}
});
uarte.psel.rts.write(|w| {
if let Some(ref pin) = pins.rts {
let w = unsafe { w.pin().bits(pin.pin()) };
#[cfg(any(feature = "52833", feature = "52840"))]
let w = w.port().bit(pin.port().bit());
w.connect().connected()
} else {
w.connect().disconnected()
}
});
// Enable UARTE instance
uarte.enable.write(|w| w.enable().enabled());
// Configure
let hardware_flow_control = pins.rts.is_some() && pins.cts.is_some();
uarte
.config
.write(|w| w.hwfc().bit(hardware_flow_control).parity().variant(parity));
// Configure frequency
uarte.baudrate.write(|w| w.baudrate().variant(baudrate));
Uarte(uarte)
}
/// Write via UARTE
///
/// This method uses transmits all bytes in `tx_buffer`
///
/// The buffer must have a length of at most 255 bytes on the nRF52832
/// and at most 65535 bytes on the nRF52840.
pub fn write(&mut self, tx_buffer: &[u8]) -> Result<(), Error> {
if tx_buffer.len() > EASY_DMA_SIZE {
return Err(Error::TxBufferTooLong);
}
// We can only DMA out of RAM
slice_in_ram_or(tx_buffer, Error::BufferNotInRAM)?;
// Conservative compiler fence to prevent optimizations that do not
// take in to account actions by DMA. The fence has been placed here,
// before any DMA action has started
compiler_fence(SeqCst);
// Reset the events.
self.0.events_endtx.reset();
self.0.events_txstopped.reset();
// Set up the DMA write
self.0.txd.ptr.write(|w|
// We're giving the register a pointer to the stack. Since we're
// waiting for the UARTE transaction to end before this stack pointer
// becomes invalid, there's nothing wrong here.
//
// The PTR field is a full 32 bits wide and accepts the full range
// of values.
unsafe { w.ptr().bits(tx_buffer.as_ptr() as u32) });
self.0.txd.maxcnt.write(|w|
// We're giving it the length of the buffer, so no danger of
// accessing invalid memory. We have verified that the length of the
// buffer fits in an `u8`, so the cast to `u8` is also fine.
//
// The MAXCNT field is 8 bits wide and accepts the full range of
// values.
unsafe { w.maxcnt().bits(tx_buffer.len() as _) });
// Start UARTE Transmit transaction
self.0.tasks_starttx.write(|w|
// `1` is a valid value to write to task registers.
unsafe { w.bits(1) });
// Wait for transmission to end
let mut endtx;
let mut txstopped;
loop {
endtx = self.0.events_endtx.read().bits() != 0;
txstopped = self.0.events_txstopped.read().bits() != 0;
if endtx || txstopped {
break;
}
}
// Conservative compiler fence to prevent optimizations that do not
// take in to account actions by DMA. The fence has been placed here,
// after all possible DMA actions have completed
compiler_fence(SeqCst);
if txstopped {
return Err(Error::Transmit);
}
// Lower power consumption by disabling the transmitter once we're
// finished
self.0.tasks_stoptx.write(|w|
// `1` is a valid value to write to task registers.
unsafe { w.bits(1) });
Ok(())
}
/// Read via UARTE
///
/// This method fills all bytes in `rx_buffer`, and blocks
/// until the buffer is full.
///
/// The buffer must have a length of at most 255 bytes
pub fn read(&mut self, rx_buffer: &mut [u8]) -> Result<(), Error> {
self.start_read(rx_buffer)?;
// Wait for transmission to end
while self.0.events_endrx.read().bits() == 0 {}
self.finalize_read();
if self.0.rxd.amount.read().bits() != rx_buffer.len() as u32 {
return Err(Error::Receive);
}
Ok(())
}
/// Read via UARTE
///
/// This method fills all bytes in `rx_buffer`, and blocks
/// until the buffer is full or the timeout expires, whichever
/// comes first.
///
/// If the timeout occurs, an `Error::Timeout(n)` will be returned,
/// where `n` is the number of bytes read successfully.
///
/// This method assumes the interrupt for the given timer is NOT enabled,
/// and in cases where a timeout does NOT occur, the timer will be left running
/// until completion.
///
/// The buffer must have a length of at most 255 bytes
pub fn read_timeout<I>(
&mut self,
rx_buffer: &mut [u8],
timer: &mut Timer<I>,
cycles: u32,
) -> Result<(), Error>
where
I: timer::Instance,
{
// Start the read
self.start_read(rx_buffer)?;
// Start the timeout timer
timer.start(cycles);
// Wait for transmission to end
let mut event_complete = false;
let mut timeout_occured = false;
loop {
event_complete |= self.0.events_endrx.read().bits() != 0;
timeout_occured |= timer.wait().is_ok();
if event_complete || timeout_occured {
break;
}
}
if !event_complete {
// Cancel the reception if it did not complete until now
self.cancel_read();
}
// Cleanup, even in the error case
self.finalize_read();
let bytes_read = self.0.rxd.amount.read().bits() as usize;
if timeout_occured && !event_complete {
return Err(Error::Timeout(bytes_read));
}
if bytes_read != rx_buffer.len() as usize {
return Err(Error::Receive);
}
Ok(())
}
/// Start a UARTE read transaction by setting the control
/// values and triggering a read task
fn start_read(&mut self, rx_buffer: &mut [u8]) -> Result<(), Error> {
// This is overly restrictive. See (similar SPIM issue):
// https://github.com/nrf-rs/nrf52/issues/17
if rx_buffer.len() > u8::max_value() as usize {
return Err(Error::TxBufferTooLong);
}
// NOTE: RAM slice check is not necessary, as a mutable slice can only be
// built from data located in RAM
// Conservative compiler fence to prevent optimizations that do not
// take in to account actions by DMA. The fence has been placed here,
// before any DMA action has started
compiler_fence(SeqCst);
// Set up the DMA read
self.0.rxd.ptr.write(|w|
// We're giving the register a pointer to the stack. Since we're
// waiting for the UARTE transaction to end before this stack pointer
// becomes invalid, there's nothing wrong here.
//
// The PTR field is a full 32 bits wide and accepts the full range
// of values.
unsafe { w.ptr().bits(rx_buffer.as_ptr() as u32) });
self.0.rxd.maxcnt.write(|w|
// We're giving it the length of the buffer, so no danger of
// accessing invalid memory. We have verified that the length of the
// buffer fits in an `u8`, so the cast to `u8` is also fine.
//
// The MAXCNT field is at least 8 bits wide and accepts the full
// range of values.
unsafe { w.maxcnt().bits(rx_buffer.len() as _) });
// Start UARTE Receive transaction
self.0.tasks_startrx.write(|w|
// `1` is a valid value to write to task registers.
unsafe { w.bits(1) });
Ok(())
}
/// Finalize a UARTE read transaction by clearing the event
fn finalize_read(&mut self) {
// Reset the event, otherwise it will always read `1` from now on.
self.0.events_endrx.write(|w| w);
// Conservative compiler fence to prevent optimizations that do not
// take in to account actions by DMA. The fence has been placed here,
// after all possible DMA actions have completed
compiler_fence(SeqCst);
}
/// Stop an unfinished UART read transaction and flush FIFO to DMA buffer
fn cancel_read(&mut self) {
// Stop reception
self.0.tasks_stoprx.write(|w| unsafe { w.bits(1) });
// Wait for the reception to have stopped
while self.0.events_rxto.read().bits() == 0 {}
// Reset the event flag
self.0.events_rxto.write(|w| w);
// Ask UART to flush FIFO to DMA buffer
self.0.tasks_flushrx.write(|w| unsafe { w.bits(1) });
// Wait for the flush to complete.
while self.0.events_endrx.read().bits() == 0 {}
// The event flag itself is later reset by `finalize_read`.
}
/// Return the raw interface to the underlying UARTE peripheral
pub fn free(self) -> T {
self.0
}
}
impl<T> fmt::Write for Uarte<T>
where
T: Instance,
{
fn write_str(&mut self, s: &str) -> fmt::Result {
// Copy all data into an on-stack buffer so we never try to EasyDMA from
// flash
let buf = &mut [0; 16][..];
for block in s.as_bytes().chunks(16) {
buf[..block.len()].copy_from_slice(block);
self.write(&buf[..block.len()]).map_err(|_| fmt::Error)?;
}
Ok(())
}
}
pub struct Pins {
pub rxd: Pin<Input<Floating>>,
pub txd: Pin<Output<PushPull>>,
pub cts: Option<Pin<Input<Floating>>>,
pub rts: Option<Pin<Output<PushPull>>>,
}
#[derive(Debug)]
pub enum Error {
TxBufferTooLong,
RxBufferTooLong,
Transmit,
Receive,
Timeout(usize),
BufferNotInRAM,
}
pub trait Instance: Deref<Target = uarte0::RegisterBlock> {}
impl Instance for UARTE0 {}
#[cfg(any(feature = "52833", feature = "52840", feature = "9160"))]
impl Instance for UARTE1 {}
| 33.249337 | 86 | 0.573833 |
876a5a43fa40556084b737c8707bb427e55f941b
| 19,412 |
//! Core processing module
//!
//! It causes me great pain that I can't figure out how split these methods up. The fact that we are relying on
//! lifetime coersion to reuse the `shuffler` vector really locks down the possible options.
//!
//! If we go with a dyn trait on the line splitter function it is appreciably slower.
use crate::{
field_range::{FieldRange, RegexOrString},
line_parser::LineParser,
mmap::MmapChoice,
single_byte_delim_parser::SingleByteDelimParser,
};
use anyhow::Result;
use bstr::ByteSlice;
use flate2::read::MultiGzDecoder;
use grep_cli::DecompressionReaderBuilder;
use regex::bytes::Regex;
use ripline::{
line_buffer::{LineBuffer, LineBufferReader},
lines::{self, LineIter},
LineTerminator,
};
use std::{
fs::File,
io::{self, BufRead, BufReader, Read, Write},
path::Path,
};
const DEFAULT_DELIM: &[u8] = &[b'\t'];
/// The input types that `hck` can parse.
pub enum HckInput<P: AsRef<Path>> {
Stdin,
Path(P),
}
/// The config object for [`Core`].
#[derive(Debug, Clone)]
pub struct CoreConfig<'a> {
delimiter: &'a [u8],
output_delimiter: &'a [u8],
line_terminator: LineTerminator,
mmap_choice: MmapChoice,
is_parser_regex: bool,
try_decompress: bool,
raw_fields: Option<&'a str>,
raw_header_fields: Option<&'a [Regex]>,
raw_exclude: Option<&'a str>,
raw_exclude_headers: Option<&'a [Regex]>,
header_is_regex: bool,
parsed_delim: RegexOrString,
}
impl<'a> Default for CoreConfig<'a> {
fn default() -> Self {
Self {
delimiter: DEFAULT_DELIM,
output_delimiter: DEFAULT_DELIM,
line_terminator: LineTerminator::default(),
mmap_choice: unsafe { MmapChoice::auto() },
is_parser_regex: false,
try_decompress: false,
raw_fields: Some("1-"),
raw_header_fields: None,
raw_exclude: None,
raw_exclude_headers: None,
header_is_regex: false,
parsed_delim: RegexOrString::String(
std::str::from_utf8(DEFAULT_DELIM).unwrap().to_string(),
),
}
}
}
impl<'a> CoreConfig<'a> {
/// Get the parsed delimiter
pub fn parsed_delim(&self) -> &RegexOrString {
&self.parsed_delim
}
/// Read the first line of an input and return it.
///
/// It's up to the user to make sure that any consumed bytes are properly handed
/// off to the line parsers later on.
pub fn peek_first_line<P: AsRef<Path>>(
&self,
input: &HckInput<P>,
) -> Result<Vec<u8>, io::Error> {
let mut buffer = String::new();
match input {
HckInput::Stdin => {
// TODO: work out how to decode just a byte slice
if self.try_decompress {
unimplemented!("Header selections not supported when piping gzipped stdin")
}
io::stdin().read_line(&mut buffer)?;
}
HckInput::Path(path) => {
if self.try_decompress {
let reader: Box<dyn Read> = if path
.as_ref()
.to_str()
.map(|p| p.ends_with(".gz"))
.unwrap_or(false)
{
Box::new(MultiGzDecoder::new(File::open(&path)?))
} else {
Box::new(
DecompressionReaderBuilder::new()
// .matcher(matcher)
.build(&path)?,
)
};
let mut reader = BufReader::new(reader);
reader.read_line(&mut buffer)?;
} else {
BufReader::new(File::open(path)?).read_line(&mut buffer)?;
}
}
}
Ok(lines::without_terminator(buffer.as_bytes(), self.line_terminator).to_owned())
}
/// Parse the raw user input fields and header fields. Returns any header bytes read and the parsed fields
pub fn parse_fields<P>(&self, input: &HckInput<P>) -> Result<(Option<Vec<u8>>, Vec<FieldRange>)>
where
P: AsRef<Path>,
{
// Parser the fields in the context of the files being looked at
let (mut extra, fields) = match (self.raw_fields, self.raw_header_fields) {
(Some(field_list), Some(header_fields)) => {
let first_line = self.peek_first_line(input)?;
let mut fields = FieldRange::from_list(field_list)?;
let header_fields = FieldRange::from_header_list(
header_fields,
first_line.as_bytes(),
&self.parsed_delim,
self.header_is_regex,
false,
)?;
fields.extend(header_fields.into_iter());
FieldRange::post_process_ranges(&mut fields);
(Some(first_line), fields)
}
(Some(field_list), None) => (None, FieldRange::from_list(field_list)?),
(None, Some(header_fields)) => {
let first_line = self.peek_first_line(input)?;
let fields = FieldRange::from_header_list(
header_fields,
first_line.as_bytes(),
&self.parsed_delim,
self.header_is_regex,
false,
)?;
(Some(first_line), fields)
}
(None, None) => (None, FieldRange::from_list("1-")?),
};
let fields = match (&self.raw_exclude, &self.raw_exclude_headers) {
(Some(exclude), Some(exclude_header)) => {
let exclude = FieldRange::from_list(exclude)?;
let fields = FieldRange::exclude(fields, exclude);
let first_line = if let Some(first_line) = extra {
first_line
} else {
self.peek_first_line(input)?
};
let exclude_headers = FieldRange::from_header_list(
exclude_header,
first_line.as_bytes(),
&self.parsed_delim,
self.header_is_regex,
true,
)?;
extra = Some(first_line);
FieldRange::exclude(fields, exclude_headers)
}
(Some(exclude), None) => {
let exclude = FieldRange::from_list(exclude)?;
FieldRange::exclude(fields, exclude)
}
(None, Some(exclude_header)) => {
let first_line = if let Some(first_line) = extra {
first_line
} else {
self.peek_first_line(input)?
};
let exclude_headers = FieldRange::from_header_list(
exclude_header,
first_line.as_bytes(),
&self.parsed_delim,
self.header_is_regex,
true,
)?;
extra = Some(first_line);
FieldRange::exclude(fields, exclude_headers)
}
(None, None) => fields,
};
Ok((extra, fields))
}
}
/// A builder for the [`CoreConfig`] which drives [`Core`].
#[derive(Clone, Debug)]
pub struct CoreConfigBuilder<'a> {
config: CoreConfig<'a>,
}
impl<'a> CoreConfigBuilder<'a> {
pub fn new() -> Self {
Self {
config: CoreConfig::default(),
}
}
pub fn build(mut self) -> Result<CoreConfig<'a>> {
let delim = if self.config.is_parser_regex {
RegexOrString::Regex(Regex::new(self.config.delimiter.to_str()?)?)
} else {
let unescaped =
std::str::from_utf8(&grep_cli::unescape(self.config.delimiter.to_str()?))?
.to_string();
RegexOrString::String(unescaped)
};
self.config.parsed_delim = delim;
Ok(self.config)
}
/// The substr to split lines on.
pub fn delimiter(mut self, delim: &'a [u8]) -> Self {
self.config.delimiter = delim;
self
}
/// The substr to use as the output delimiter
pub fn output_delimiter(mut self, delim: &'a [u8]) -> Self {
self.config.output_delimiter = delim;
self
}
/// The line terminator to use when looking for linebreaks and stripping linebreach chars.
pub fn line_terminator(mut self, term: LineTerminator) -> Self {
self.config.line_terminator = term;
self
}
/// Whether or not to try to use mmap mode
pub fn mmap(mut self, mmap_choice: MmapChoice) -> Self {
self.config.mmap_choice = mmap_choice;
self
}
/// Whether or not the parser is a regex
#[allow(clippy::wrong_self_convention)]
pub fn is_regex_parser(mut self, is_regex: bool) -> Self {
self.config.is_parser_regex = is_regex;
self
}
/// Try to decompress an input file
pub fn try_decompress(mut self, try_decompress: bool) -> Self {
self.config.try_decompress = try_decompress;
self
}
/// The raw user input fields to output
pub fn fields(mut self, fields: Option<&'a str>) -> Self {
self.config.raw_fields = fields;
self
}
/// The raw user input header to output
pub fn headers(mut self, headers: Option<&'a [Regex]>) -> Self {
self.config.raw_header_fields = headers;
self
}
/// The raw user input fields to exclude
pub fn exclude(mut self, exclude: Option<&'a str>) -> Self {
self.config.raw_exclude = exclude;
self
}
/// The raw user input headers to exclude
pub fn exclude_headers(mut self, exclude_headers: Option<&'a [Regex]>) -> Self {
self.config.raw_exclude_headers = exclude_headers;
self
}
/// Whether or not to treat the headers as regex
pub fn header_is_regex(mut self, header_is_regex: bool) -> Self {
self.config.header_is_regex = header_is_regex;
self
}
}
impl<'a> Default for CoreConfigBuilder<'a> {
fn default() -> Self {
Self::new()
}
}
/// The main processing loop
pub struct Core<'a, L> {
/// The [`CoreConfig`] object that determines how [`Core`] is run
config: &'a CoreConfig<'a>,
/// The [`FieldRange`]'s to keep, in the order to output them
fields: &'a [FieldRange],
/// The reusable line parse that defines how to parse a line (regex or substr).
line_parser: L,
/// The reusable line buffer that holds bytes from reads
line_buffer: &'a mut LineBuffer,
}
impl<'a, L> Core<'a, L>
where
L: LineParser<'a>,
{
/// Create a new "core" the can be used to parse multiple inputs
pub fn new(
config: &'a CoreConfig,
fields: &'a [FieldRange],
line_parser: L,
line_buffer: &'a mut LineBuffer,
) -> Self {
Self {
config,
fields,
line_parser,
line_buffer,
}
}
/// Check if no reordering of fields is happening
#[inline]
fn are_fields_pos_sorted(&self) -> bool {
let mut test = 0;
for field in self.fields {
if field.pos < test {
return false;
}
test = field.pos
}
true
}
/// Check if we can run in `fast mode`.
///
/// delimiter is 1 byte, newline is 1 bytes, and we are not using a regex
fn allow_fastmode(&self) -> bool {
self.config.delimiter.len() == 1
&& self.config.line_terminator.as_bytes().len() == 1
&& !self.config.is_parser_regex
&& self.are_fields_pos_sorted()
}
pub fn hck_input<P, W>(
&mut self,
input: HckInput<P>,
mut output: W,
header: Option<Vec<u8>>,
) -> Result<(), io::Error>
where
P: AsRef<Path>,
W: Write,
{
// Dispatch to a given `hck_*` runner depending on configuration
match input {
HckInput::Stdin => {
if let Some(header) = header {
self.hck_bytes(header.as_bytes(), &mut output)?;
}
let reader: Box<dyn Read> = if self.config.try_decompress {
Box::new(MultiGzDecoder::new(io::stdin()))
} else {
Box::new(io::stdin())
};
if self.allow_fastmode() {
self.hck_reader_fast(reader, &mut output)
} else {
self.hck_reader(reader, &mut output)
}
}
HckInput::Path(path) => {
if self.config.try_decompress {
let reader: Box<dyn Read> = if path
.as_ref()
.to_str()
.map(|p| p.ends_with(".gz"))
.unwrap_or(false)
{
Box::new(MultiGzDecoder::new(File::open(&path)?))
} else {
Box::new(
DecompressionReaderBuilder::new()
// .matcher(matcher)
.build(&path)?,
)
};
if self.allow_fastmode() {
self.hck_reader_fast(reader, &mut output)
} else {
self.hck_reader(reader, &mut output)
}
} else {
let file = File::open(&path)?;
if let Some(mmap) = self.config.mmap_choice.open(&file, Some(&path)) {
if self.allow_fastmode() {
self.hck_bytes_fast(mmap.as_bytes(), &mut output)
} else {
self.hck_bytes(mmap.as_bytes(), &mut output)
}
} else if self.allow_fastmode() {
self.hck_reader_fast(file, &mut output)
} else {
self.hck_reader(file, &mut output)
}
}
}
}
}
/// Iterate over the lines in a slice of bytes.
///
/// The input slice of bytes is assumed to end in a newline.
pub fn hck_bytes<W>(&mut self, bytes: &[u8], mut output: W) -> Result<(), io::Error>
where
W: Write,
{
let iter = LineIter::new(self.config.line_terminator.as_byte(), bytes.as_bytes());
let mut shuffler: Vec<Vec<&'static [u8]>> =
vec![vec![]; self.fields.iter().map(|f| f.pos).max().unwrap() + 1];
for line in iter {
let mut s: Vec<Vec<&[u8]>> = shuffler;
self.line_parser.parse_line(
lines::without_terminator(line, self.config.line_terminator),
&mut s,
);
let items = s.iter_mut().flat_map(|s| s.drain(..));
output.join_append(
self.config.output_delimiter,
items,
&self.config.line_terminator,
)?;
shuffler = unsafe { core::mem::transmute(s) };
}
Ok(())
}
/// Fast mode iteration over lines in a slice of bytes.
///
/// This expects the seperator to be a single byte and the newline to be a singel byte.
///
/// Instead of seaching for linebreaks, then splitting up the line on the `sep`,
/// fast mode looks for either `sep` or `newline` at the same time, so instead of two passes
/// over the bytes we only make one pass.
pub fn hck_bytes_fast<W: Write>(&mut self, bytes: &[u8], output: W) -> Result<(), io::Error> {
let mut buffer_parser = SingleByteDelimParser::new(
self.config.line_terminator,
self.config.output_delimiter,
self.fields,
self.config.delimiter[0],
);
buffer_parser.process_buffer(bytes, output)?;
Ok(())
}
/// Fast mode iteration over lines in a reader.
///
/// This expects the separator to be a single byte and the newline to be a single byte.
///
/// Instead of seaching for linebreaks, then splitting up the line on the `sep`,
/// fast mode looks for either `sep` or `newline` at the same time, so instead of two passes
/// over the bytes we only make one pass.
pub fn hck_reader_fast<R: Read, W: Write>(
&mut self,
reader: R,
mut output: W,
) -> Result<(), io::Error> {
let mut reader = LineBufferReader::new(reader, &mut self.line_buffer);
let mut buffer_parser = SingleByteDelimParser::new(
self.config.line_terminator,
self.config.output_delimiter,
self.fields,
self.config.delimiter[0],
);
while reader.fill()? {
buffer_parser.process_buffer(reader.buffer(), &mut output)?;
buffer_parser.reset();
reader.consume(reader.buffer().len());
}
Ok(())
}
/// Process lines from a reader.
pub fn hck_reader<R: Read, W: Write>(
&mut self,
reader: R,
mut output: W,
) -> Result<(), io::Error> {
let mut reader = LineBufferReader::new(reader, &mut self.line_buffer);
let mut shuffler: Vec<Vec<&'static [u8]>> =
vec![vec![]; self.fields.iter().map(|f| f.pos).max().unwrap() + 1];
while reader.fill()? {
let iter = LineIter::new(self.config.line_terminator.as_byte(), reader.buffer());
for line in iter {
let mut s: Vec<Vec<&[u8]>> = shuffler;
self.line_parser.parse_line(
lines::without_terminator(line, self.config.line_terminator),
&mut s,
);
let items = s.iter_mut().flat_map(|s| s.drain(..));
output.join_append(
self.config.output_delimiter,
items,
&self.config.line_terminator,
)?;
shuffler = unsafe { core::mem::transmute(s) };
}
reader.consume(reader.buffer().len());
}
Ok(())
}
}
/// A trait for adding `join_append` to a writer.
pub trait JoinAppend {
/// Given an input iterator of items, write them with a serparator and a newline.
fn join_append<'b>(
&mut self,
sep: &[u8],
items: impl Iterator<Item = &'b [u8]>,
term: &LineTerminator,
) -> Result<(), io::Error>;
}
/// [`JoinAppend`] for [`Write`].
impl<W: Write> JoinAppend for W {
/// Given an input iterator of items, write them with a serparator and a newline.
#[inline(always)]
fn join_append<'b>(
&mut self,
sep: &[u8],
mut items: impl Iterator<Item = &'b [u8]>,
term: &LineTerminator,
) -> Result<(), io::Error> {
if let Some(item) = items.next() {
self.write_all(item)?;
}
for item in items {
self.write_all(sep)?;
self.write_all(item)?;
}
self.write_all(term.as_bytes())?;
Ok(())
}
}
| 34.357522 | 111 | 0.521585 |
fb27a1d0e0f1a4402f9d23b8a24510e5df5e8403
| 3,436 |
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub struct Config {
pub(crate) endpoint_resolver: ::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>,
pub(crate) region: Option<aws_types::region::Region>,
pub(crate) credentials_provider: aws_types::credentials::SharedCredentialsProvider,
}
impl std::fmt::Debug for Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut config = f.debug_struct("Config");
config.finish()
}
}
impl Config {
pub fn builder() -> Builder {
Builder::default()
}
pub fn new(config: &aws_types::config::Config) -> Self {
Builder::from(config).build()
}
/// The signature version 4 service signing name to use in the credential scope when signing requests.
///
/// The signing service may be overridden by the `Endpoint`, or by specifying a custom
/// [`SigningService`](aws_types::SigningService) during operation construction
pub fn signing_service(&self) -> &'static str {
"marketplacecommerceanalytics"
}
}
#[derive(Default)]
pub struct Builder {
endpoint_resolver: Option<::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>>,
region: Option<aws_types::region::Region>,
credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>,
}
impl Builder {
pub fn new() -> Self {
Self::default()
}
pub fn endpoint_resolver(
mut self,
endpoint_resolver: impl aws_endpoint::ResolveAwsEndpoint + 'static,
) -> Self {
self.endpoint_resolver = Some(::std::sync::Arc::new(endpoint_resolver));
self
}
pub fn region(mut self, region: impl Into<Option<aws_types::region::Region>>) -> Self {
self.region = region.into();
self
}
/// Set the credentials provider for this service
pub fn credentials_provider(
mut self,
credentials_provider: impl aws_types::credentials::ProvideCredentials + 'static,
) -> Self {
self.credentials_provider = Some(aws_types::credentials::SharedCredentialsProvider::new(
credentials_provider,
));
self
}
pub fn set_credentials_provider(
&mut self,
credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>,
) -> &mut Self {
self.credentials_provider = credentials_provider;
self
}
pub fn build(self) -> Config {
Config {
endpoint_resolver: self
.endpoint_resolver
.unwrap_or_else(|| ::std::sync::Arc::new(crate::aws_endpoint::endpoint_resolver())),
region: self.region,
credentials_provider: self.credentials_provider.unwrap_or_else(|| {
aws_types::credentials::SharedCredentialsProvider::new(
crate::no_credentials::NoCredentials,
)
}),
}
}
}
impl From<&aws_types::config::Config> for Builder {
fn from(input: &aws_types::config::Config) -> Self {
let mut builder = Builder::default();
builder = builder.region(input.region().cloned());
builder.set_credentials_provider(input.credentials_provider().cloned());
builder
}
}
impl From<&aws_types::config::Config> for Config {
fn from(config: &aws_types::config::Config) -> Self {
Builder::from(config).build()
}
}
| 35.791667 | 106 | 0.641153 |
bbae395e7580a6d5d9a9fa55a3de63c851d9a69d
| 22,784 |
// Copyright (C) 2015 - 2016 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use futures::Future;
use tokio::runtime::current_thread::Runtime;
use proto::error::ProtoError;
use proto::xfer::{DnsRequestSender, DnsResponse};
#[cfg(feature = "dnssec")]
use client::SecureClientHandle;
use client::{BasicClientHandle, ClientConnection, ClientFuture, ClientHandle};
use error::*;
use rr::dnssec::Signer;
#[cfg(feature = "dnssec")]
use rr::dnssec::TrustAnchor;
use rr::{DNSClass, Name, Record, RecordSet, RecordType};
/// Client trait which implements basic DNS Client operations.
///
/// As of 0.10.0, the Client is now a wrapper around the `ClientFuture`, which is a futures-rs
/// and tokio-rs based implementation. This trait implements synchronous functions for ease of use.
///
/// There was a strong attempt to make it backwards compatible, but making it a drop in replacement
/// for the old Client was not possible. This trait has two implementations, the `SyncClient` which
/// is a standard DNS Client, and the `SecureSyncClient` which is a wrapper on `SecureClientHandle`
/// providing DNSSec validation.
///
/// *note* When upgrading from previous usage, both `SyncClient` and `SecureSyncClient` have an
/// signer which can be optionally associated to the Client. This replaces the previous per-function
/// parameter, and it will sign all update requests (this matches the `ClientFuture` API).
pub trait Client {
/// The result future that will resolve into a DnsResponse
type Response: Future<Item = DnsResponse, Error = ProtoError> + 'static + Send;
/// The actual DNS request sender, aka Connection
type Sender: DnsRequestSender<DnsResponseFuture = Self::Response>;
/// A future that resolves into the Sender after connection
type SenderFuture: Future<Item = Self::Sender, Error = ProtoError> + 'static + Send;
/// A handle to send messages to the Sender
type Handle: ClientHandle;
/// Return the inner Futures items
///
/// Consumes the connection and allows for future based operations afterward.
#[allow(clippy::type_complexity)]
fn new_future(
&self,
) -> (
ClientFuture<Self::SenderFuture, Self::Sender, Self::Response>,
Self::Handle,
);
/// A *classic* DNS query, i.e. does not perform any DNSSec operations
///
/// *Note* As of now, this will not recurse on PTR record responses, that is up to
/// the caller.
///
/// # Arguments
///
/// * `name` - the label to lookup
/// * `query_class` - most likely this should always be DNSClass::IN
/// * `query_type` - record type to lookup
fn query(
&self,
name: &Name,
query_class: DNSClass,
query_type: RecordType,
) -> ClientResult<DnsResponse> {
let mut reactor = Runtime::new()?;
let (bg, mut client) = self.new_future();
reactor
.spawn(bg)
.block_on(client.query(name.clone(), query_class, query_type))
}
/// Sends a NOTIFY message to the remote system
///
/// # Arguments
///
/// * `name` - the label which is being notified
/// * `query_class` - most likely this should always be DNSClass::IN
/// * `query_type` - record type which has been updated
/// * `rrset` - the new version of the record(s) being notified
fn notify<R>(
&mut self,
name: Name,
query_class: DNSClass,
query_type: RecordType,
rrset: Option<R>,
) -> ClientResult<DnsResponse>
where
R: Into<RecordSet>,
{
let mut reactor = Runtime::new()?;
let (bg, mut client) = self.new_future();
reactor
.spawn(bg)
.block_on(client.notify(name, query_class, query_type, rrset))
}
/// Sends a record to create on the server, this will fail if the record exists (atomicity
/// depends on the server)
///
/// [RFC 2136](https://tools.ietf.org/html/rfc2136), DNS Update, April 1997
///
/// ```text
/// 2.4.3 - RRset Does Not Exist
///
/// No RRs with a specified NAME and TYPE (in the zone and class denoted
/// by the Zone Section) can exist.
///
/// For this prerequisite, a requestor adds to the section a single RR
/// whose NAME and TYPE are equal to that of the RRset whose nonexistence
/// is required. The RDLENGTH of this record is zero (0), and RDATA
/// field is therefore empty. CLASS must be specified as NONE in order
/// to distinguish this condition from a valid RR whose RDLENGTH is
/// naturally zero (0) (for example, the NULL RR). TTL must be specified
/// as zero (0).
///
/// 2.5.1 - Add To An RRset
///
/// RRs are added to the Update Section whose NAME, TYPE, TTL, RDLENGTH
/// and RDATA are those being added, and CLASS is the same as the zone
/// class. Any duplicate RRs will be silently ignored by the primary
/// master.
/// ```
///
/// # Arguments
///
/// * `rrset` - the record(s) to create
/// * `zone_origin` - the zone name to update, i.e. SOA name
///
/// The update must go to a zone authority (i.e. the server used in the ClientConnection)
fn create<R>(&self, rrset: R, zone_origin: Name) -> ClientResult<DnsResponse>
where
R: Into<RecordSet>,
{
let mut reactor = Runtime::new()?;
let (bg, mut client) = self.new_future();
reactor
.spawn(bg)
.block_on(client.create(rrset, zone_origin))
}
/// Appends a record to an existing rrset, optionally require the rrset to exist (atomicity
/// depends on the server)
///
/// [RFC 2136](https://tools.ietf.org/html/rfc2136), DNS Update, April 1997
///
/// ```text
/// 2.4.1 - RRset Exists (Value Independent)
///
/// At least one RR with a specified NAME and TYPE (in the zone and class
/// specified in the Zone Section) must exist.
///
/// For this prerequisite, a requestor adds to the section a single RR
/// whose NAME and TYPE are equal to that of the zone RRset whose
/// existence is required. RDLENGTH is zero and RDATA is therefore
/// empty. CLASS must be specified as ANY to differentiate this
/// condition from that of an actual RR whose RDLENGTH is naturally zero
/// (0) (e.g., NULL). TTL is specified as zero (0).
///
/// 2.5.1 - Add To An RRset
///
/// RRs are added to the Update Section whose NAME, TYPE, TTL, RDLENGTH
/// and RDATA are those being added, and CLASS is the same as the zone
/// class. Any duplicate RRs will be silently ignored by the primary
/// master.
/// ```
///
/// # Arguments
///
/// * `rrset` - the record(s) to append to an RRSet
/// * `zone_origin` - the zone name to update, i.e. SOA name
/// * `must_exist` - if true, the request will fail if the record does not exist
///
/// The update must go to a zone authority (i.e. the server used in the ClientConnection). If
/// the rrset does not exist and must_exist is false, then the RRSet will be created.
fn append<R>(&self, rrset: R, zone_origin: Name, must_exist: bool) -> ClientResult<DnsResponse>
where
R: Into<RecordSet>,
{
let mut reactor = Runtime::new()?;
let (bg, mut client) = self.new_future();
reactor
.spawn(bg)
.block_on(client.append(rrset, zone_origin, must_exist))
}
/// Compares and if it matches, swaps it for the new value (atomicity depends on the server)
///
/// ```text
/// 2.4.2 - RRset Exists (Value Dependent)
///
/// A set of RRs with a specified NAME and TYPE exists and has the same
/// members with the same RDATAs as the RRset specified here in this
/// section. While RRset ordering is undefined and therefore not
/// significant to this comparison, the sets be identical in their
/// extent.
///
/// For this prerequisite, a requestor adds to the section an entire
/// RRset whose preexistence is required. NAME and TYPE are that of the
/// RRset being denoted. CLASS is that of the zone. TTL must be
/// specified as zero (0) and is ignored when comparing RRsets for
/// identity.
///
/// 2.5.4 - Delete An RR From An RRset
///
/// RRs to be deleted are added to the Update Section. The NAME, TYPE,
/// RDLENGTH and RDATA must match the RR being deleted. TTL must be
/// specified as zero (0) and will otherwise be ignored by the primary
/// master. CLASS must be specified as NONE to distinguish this from an
/// RR addition. If no such RRs exist, then this Update RR will be
/// silently ignored by the primary master.
///
/// 2.5.1 - Add To An RRset
///
/// RRs are added to the Update Section whose NAME, TYPE, TTL, RDLENGTH
/// and RDATA are those being added, and CLASS is the same as the zone
/// class. Any duplicate RRs will be silently ignored by the primary
/// master.
/// ```
///
/// # Arguments
///
/// * `current` - the current rrset which must exist for the swap to complete
/// * `new` - the new rrset with which to replace the current rrset
/// * `zone_origin` - the zone name to update, i.e. SOA name
///
/// The update must go to a zone authority (i.e. the server used in the ClientConnection).
fn compare_and_swap<CR, NR>(
&self,
current: CR,
new: NR,
zone_origin: Name,
) -> ClientResult<DnsResponse>
where
CR: Into<RecordSet>,
NR: Into<RecordSet>,
{
let mut reactor = Runtime::new()?;
let (bg, mut client) = self.new_future();
reactor
.spawn(bg)
.block_on(client.compare_and_swap(current, new, zone_origin))
}
/// Deletes a record (by rdata) from an rrset, optionally require the rrset to exist.
///
/// [RFC 2136](https://tools.ietf.org/html/rfc2136), DNS Update, April 1997
///
/// ```text
/// 2.4.1 - RRset Exists (Value Independent)
///
/// At least one RR with a specified NAME and TYPE (in the zone and class
/// specified in the Zone Section) must exist.
///
/// For this prerequisite, a requestor adds to the section a single RR
/// whose NAME and TYPE are equal to that of the zone RRset whose
/// existence is required. RDLENGTH is zero and RDATA is therefore
/// empty. CLASS must be specified as ANY to differentiate this
/// condition from that of an actual RR whose RDLENGTH is naturally zero
/// (0) (e.g., NULL). TTL is specified as zero (0).
///
/// 2.5.4 - Delete An RR From An RRset
///
/// RRs to be deleted are added to the Update Section. The NAME, TYPE,
/// RDLENGTH and RDATA must match the RR being deleted. TTL must be
/// specified as zero (0) and will otherwise be ignored by the primary
/// master. CLASS must be specified as NONE to distinguish this from an
/// RR addition. If no such RRs exist, then this Update RR will be
/// silently ignored by the primary master.
/// ```
///
/// # Arguments
///
/// * `rrset` - the record(s) to delete from a RRSet, the name, type and rdata must match the
/// record to delete
/// * `zone_origin` - the zone name to update, i.e. SOA name
///
/// The update must go to a zone authority (i.e. the server used in the ClientConnection). If
/// the rrset does not exist and must_exist is false, then the RRSet will be deleted.
fn delete_by_rdata<R>(&self, record: R, zone_origin: Name) -> ClientResult<DnsResponse>
where
R: Into<RecordSet>,
{
let mut reactor = Runtime::new()?;
let (bg, mut client) = self.new_future();
reactor
.spawn(bg)
.block_on(client.delete_by_rdata(record, zone_origin))
}
/// Deletes an entire rrset, optionally require the rrset to exist.
///
/// [RFC 2136](https://tools.ietf.org/html/rfc2136), DNS Update, April 1997
///
/// ```text
/// 2.4.1 - RRset Exists (Value Independent)
///
/// At least one RR with a specified NAME and TYPE (in the zone and class
/// specified in the Zone Section) must exist.
///
/// For this prerequisite, a requestor adds to the section a single RR
/// whose NAME and TYPE are equal to that of the zone RRset whose
/// existence is required. RDLENGTH is zero and RDATA is therefore
/// empty. CLASS must be specified as ANY to differentiate this
/// condition from that of an actual RR whose RDLENGTH is naturally zero
/// (0) (e.g., NULL). TTL is specified as zero (0).
///
/// 2.5.2 - Delete An RRset
///
/// One RR is added to the Update Section whose NAME and TYPE are those
/// of the RRset to be deleted. TTL must be specified as zero (0) and is
/// otherwise not used by the primary master. CLASS must be specified as
/// ANY. RDLENGTH must be zero (0) and RDATA must therefore be empty.
/// If no such RRset exists, then this Update RR will be silently ignored
/// by the primary master.
/// ```
///
/// # Arguments
///
/// * `record` - the record to delete from a RRSet, the name, and type must match the
/// record set to delete
/// * `zone_origin` - the zone name to update, i.e. SOA name
///
/// The update must go to a zone authority (i.e. the server used in the ClientConnection). If
/// the rrset does not exist and must_exist is false, then the RRSet will be deleted.
fn delete_rrset(&self, record: Record, zone_origin: Name) -> ClientResult<DnsResponse> {
let mut reactor = Runtime::new()?;
let (bg, mut client) = self.new_future();
reactor
.spawn(bg)
.block_on(client.delete_rrset(record, zone_origin))
}
/// Deletes all records at the specified name
///
/// [RFC 2136](https://tools.ietf.org/html/rfc2136), DNS Update, April 1997
///
/// ```text
/// 2.5.3 - Delete All RRsets From A Name
///
/// One RR is added to the Update Section whose NAME is that of the name
/// to be cleansed of RRsets. TYPE must be specified as ANY. TTL must
/// be specified as zero (0) and is otherwise not used by the primary
/// master. CLASS must be specified as ANY. RDLENGTH must be zero (0)
/// and RDATA must therefore be empty. If no such RRsets exist, then
/// this Update RR will be silently ignored by the primary master.
/// ```
///
/// # Arguments
///
/// * `name_of_records` - the name of all the record sets to delete
/// * `zone_origin` - the zone name to update, i.e. SOA name
/// * `dns_class` - the class of the SOA
///
/// The update must go to a zone authority (i.e. the server used in the ClientConnection). This
/// operation attempts to delete all resource record sets the specified name regardless of
/// the record type.
fn delete_all(
&self,
name_of_records: Name,
zone_origin: Name,
dns_class: DNSClass,
) -> ClientResult<DnsResponse> {
let mut reactor = Runtime::new()?;
let (bg, mut client) = self.new_future();
reactor
.spawn(bg)
.block_on(client.delete_all(name_of_records, zone_origin, dns_class))
}
}
/// The Client is abstracted over either trust_dns::tcp::TcpClientConnection or
/// trust_dns::udp::UdpClientConnection.
///
/// Usage of TCP or UDP is up to the user. Some DNS servers
/// disallow TCP in some cases, so if TCP double check if UDP works.
pub struct SyncClient<CC> {
conn: CC,
signer: Option<Arc<Signer>>,
}
impl<CC> SyncClient<CC>
where
CC: ClientConnection,
{
/// Creates a new DNS client with the specified connection type
///
/// # Arguments
///
/// * `conn` - the [`ClientConnection`] to use for all communication
pub fn new(conn: CC) -> Self {
SyncClient { conn, signer: None }
}
/// Creates a new DNS client with the specified connection type and a SIG0 signer.
///
/// This is necessary for signed update requests to update trust-dns-server entries.
///
/// # Arguments
///
/// * `conn` - the [`ClientConnection`] to use for all communication
/// * `signer` - signer to use, this needs an associated private key
pub fn with_signer(conn: CC, signer: Signer) -> Self {
SyncClient {
conn,
signer: Some(Arc::new(signer)),
}
}
}
impl<CC> Client for SyncClient<CC>
where
CC: ClientConnection,
{
type Response = CC::Response;
type Sender = CC::Sender;
type SenderFuture = CC::SenderFuture;
type Handle = BasicClientHandle<CC::Response>;
#[allow(clippy::type_complexity)]
fn new_future(
&self,
) -> (
ClientFuture<Self::SenderFuture, Self::Sender, Self::Response>,
Self::Handle,
) {
let stream = self.conn.new_stream(self.signer.clone());
ClientFuture::connect(stream)
}
}
/// A DNS client which will validate DNSSec records upon receipt
#[cfg(feature = "dnssec")]
pub struct SecureSyncClient<CC> {
conn: CC,
signer: Option<Arc<Signer>>,
}
#[cfg(feature = "dnssec")]
impl<CC> SecureSyncClient<CC>
where
CC: ClientConnection,
{
/// Creates a new DNS client with the specified connection type
///
/// # Arguments
///
/// * `client_connection` - the client_connection to use for all communication
#[allow(clippy::new_ret_no_self)]
pub fn new(conn: CC) -> SecureSyncClientBuilder<CC> {
SecureSyncClientBuilder {
conn,
trust_anchor: None,
signer: None,
}
}
/// DNSSec validating query, this will return an error if the requested records can not be
/// validated against the trust_anchor.
///
/// *Deprecated* This function only exists for backward compatibility. It's just a wrapper around `Client::query` at this point
///
/// When the resolver receives an answer via the normal DNS lookup process, it then checks to
/// make sure that the answer is correct. Then starts
/// with verifying the DS and DNSKEY records at the DNS root. Then use the DS
/// records for the top level domain found at the root, e.g. 'com', to verify the DNSKEY
/// records in the 'com' zone. From there see if there is a DS record for the
/// subdomain, e.g. 'example.com', in the 'com' zone, and if there is use the
/// DS record to verify a DNSKEY record found in the 'example.com' zone. Finally,
/// verify the RRSIG record found in the answer for the rrset, e.g. 'www.example.com'.
///
/// *Note* As of now, this will not recurse on PTR or CNAME record responses, that is up to
/// the caller.
///
/// # Arguments
///
/// * `query_name` - the label to lookup
/// * `query_class` - most likely this should always be DNSClass::IN
/// * `query_type` - record type to lookup
#[deprecated(note = "use `Client::query` instead")]
pub fn secure_query(
&self,
query_name: &Name,
query_class: DNSClass,
query_type: RecordType,
) -> ClientResult<DnsResponse> {
let mut reactor = Runtime::new()?;
let (bg, mut client) = self.new_future();
reactor
.spawn(bg)
.block_on(client.query(query_name.clone(), query_class, query_type))
}
}
#[cfg(feature = "dnssec")]
impl<CC> Client for SecureSyncClient<CC>
where
CC: ClientConnection,
{
type Response = CC::Response;
type Sender = CC::Sender;
type SenderFuture = CC::SenderFuture;
type Handle = SecureClientHandle<BasicClientHandle<Self::Response>>;
#[allow(clippy::type_complexity)]
fn new_future(
&self,
) -> (
ClientFuture<Self::SenderFuture, Self::Sender, Self::Response>,
Self::Handle,
) {
let stream = self.conn.new_stream(self.signer.clone());
let (background, handle) = ClientFuture::connect(stream);
(background, SecureClientHandle::new(handle))
}
}
#[cfg(feature = "dnssec")]
pub struct SecureSyncClientBuilder<CC>
where
CC: ClientConnection,
{
conn: CC,
trust_anchor: Option<TrustAnchor>,
signer: Option<Arc<Signer>>,
}
#[cfg(feature = "dnssec")]
impl<CC> SecureSyncClientBuilder<CC>
where
CC: ClientConnection,
{
/// This variant allows for the trust_anchor to be replaced
///
/// # Arguments
///
/// * `trust_anchor` - the set of trusted DNSKEY public_keys, by default this only contains the
/// root public_key.
pub fn trust_anchor(mut self, trust_anchor: TrustAnchor) -> Self {
self.trust_anchor = Some(trust_anchor);
self
}
/// Associate a signer to produce a SIG0 for all udpate requests
///
/// This is necessary for signed update requests to update trust-dns-server entries
///
/// # Arguments
///
/// * `signer` - signer to use, this needs an associated private key
pub fn signer(mut self, signer: Signer) -> Self {
self.signer = Some(Arc::new(signer));
self
}
pub fn build(self) -> SecureSyncClient<CC> {
SecureSyncClient {
conn: self.conn,
signer: self.signer,
}
}
}
#[cfg(test)]
fn assert_send_and_sync<T: Send + Sync>() {
assert!(true)
}
#[test]
fn test_sync_client_send_and_sync() {
use tcp::TcpClientConnection;
use udp::UdpClientConnection;
assert_send_and_sync::<SyncClient<UdpClientConnection>>();
assert_send_and_sync::<SyncClient<TcpClientConnection>>();
}
#[test]
#[cfg(feature = "dnssec")]
fn test_secure_client_send_and_sync() {
use tcp::TcpClientConnection;
use udp::UdpClientConnection;
assert_send_and_sync::<SecureSyncClient<UdpClientConnection>>();
assert_send_and_sync::<SecureSyncClient<TcpClientConnection>>();
}
| 37.784411 | 131 | 0.627809 |
72a1f1a7ba3769eb5c976bfb54c2bc92cb5590a3
| 30,740 |
use crate::{Error, GPUVertex, MaskState};
use enum_map::{enum_map, EnumMap};
use wgpu::vertex_attr_array;
#[derive(Debug)]
pub struct ShapePipeline {
pub mask_pipelines: EnumMap<MaskState, wgpu::RenderPipeline>,
}
#[derive(Debug)]
pub struct Pipelines {
pub mesh_layout: wgpu::BindGroupLayout,
pub color_pipelines: ShapePipeline,
pub bitmap_pipelines: ShapePipeline,
pub bitmap_layout: wgpu::BindGroupLayout,
pub gradient_pipelines: ShapePipeline,
pub gradient_layout: wgpu::BindGroupLayout,
}
impl ShapePipeline {
pub fn pipeline_for(&self, mask_state: MaskState) -> &wgpu::RenderPipeline {
&self.mask_pipelines[mask_state]
}
}
impl Pipelines {
pub fn new(
device: &wgpu::Device,
msaa_sample_count: u32,
sampler_layout: &wgpu::BindGroupLayout,
globals_layout: &wgpu::BindGroupLayout,
) -> Result<Self, Error> {
let color_vs =
device.create_shader_module(wgpu::include_spirv!("../shaders/color.vert.spv"));
let color_fs =
device.create_shader_module(wgpu::include_spirv!("../shaders/color.frag.spv"));
let texture_vs =
device.create_shader_module(wgpu::include_spirv!("../shaders/texture.vert.spv"));
let gradient_fs =
device.create_shader_module(wgpu::include_spirv!("../shaders/gradient.frag.spv"));
let bitmap_fs =
device.create_shader_module(wgpu::include_spirv!("../shaders/bitmap.frag.spv"));
let vertex_buffers_description = [wgpu::VertexBufferDescriptor {
stride: std::mem::size_of::<GPUVertex>() as u64,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &vertex_attr_array![
0 => Float2,
1 => Float4
],
}];
let mesh_bind_layout_label = create_debug_label!("Mesh bind group layout");
let mesh_bind_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
label: mesh_bind_layout_label.as_deref(),
});
let color_pipelines = create_color_pipelines(
&device,
&color_vs,
&color_fs,
msaa_sample_count,
&vertex_buffers_description,
globals_layout,
&mesh_bind_layout,
);
let bitmap_bind_layout_label = create_debug_label!("Bitmap shape bind group layout");
let bitmap_bind_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
],
label: bitmap_bind_layout_label.as_deref(),
});
let bitmap_pipelines = create_bitmap_pipeline(
&device,
&texture_vs,
&bitmap_fs,
msaa_sample_count,
&vertex_buffers_description,
sampler_layout,
globals_layout,
&mesh_bind_layout,
&bitmap_bind_layout,
);
let gradient_bind_layout_label = create_debug_label!("Gradient shape bind group");
let gradient_bind_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
label: gradient_bind_layout_label.as_deref(),
});
let gradient_pipelines = create_gradient_pipeline(
&device,
&texture_vs,
&gradient_fs,
msaa_sample_count,
&vertex_buffers_description,
globals_layout,
&mesh_bind_layout,
&gradient_bind_layout,
);
Ok(Self {
mesh_layout: mesh_bind_layout,
color_pipelines,
bitmap_pipelines,
bitmap_layout: bitmap_bind_layout,
gradient_pipelines,
gradient_layout: gradient_bind_layout,
})
}
}
#[allow(clippy::too_many_arguments)]
fn create_pipeline_descriptor<'a>(
label: Option<&'a str>,
vertex_shader: &'a wgpu::ShaderModule,
fragment_shader: &'a wgpu::ShaderModule,
pipeline_layout: &'a wgpu::PipelineLayout,
depth_stencil_state: Option<wgpu::DepthStencilStateDescriptor>,
color_states: &'a [wgpu::ColorStateDescriptor],
vertex_buffers_description: &'a [wgpu::VertexBufferDescriptor<'a>],
msaa_sample_count: u32,
) -> wgpu::RenderPipelineDescriptor<'a> {
wgpu::RenderPipelineDescriptor {
label,
layout: Some(&pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vertex_shader,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fragment_shader,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::None,
polygon_mode: Default::default(),
clamp_depth: false,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states,
depth_stencil_state,
sample_count: msaa_sample_count,
sample_mask: !0,
alpha_to_coverage_enabled: false,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: vertex_buffers_description,
},
}
}
fn create_color_pipelines(
device: &wgpu::Device,
vertex_shader: &wgpu::ShaderModule,
fragment_shader: &wgpu::ShaderModule,
msaa_sample_count: u32,
vertex_buffers_description: &[wgpu::VertexBufferDescriptor<'_>],
globals_layout: &wgpu::BindGroupLayout,
mesh_bind_layout: &wgpu::BindGroupLayout,
) -> ShapePipeline {
let pipeline_layout_label = create_debug_label!("Color shape pipeline layout");
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: pipeline_layout_label.as_deref(),
bind_group_layouts: &[globals_layout, &mesh_bind_layout],
push_constant_ranges: &[],
});
let mask_pipelines = enum_map! {
MaskState::NoMask => {
let (stencil, write_mask) = mask_render_state(MaskState::NoMask);
device.create_render_pipeline(&create_pipeline_descriptor(
create_debug_label!("Color pipeline no mask").as_deref(),
vertex_shader,
fragment_shader,
&pipeline_layout,
Some(wgpu::DepthStencilStateDescriptor {
format: wgpu::TextureFormat::Depth24PlusStencil8,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Always,
stencil,
}),
&[wgpu::ColorStateDescriptor {
format: wgpu::TextureFormat::Bgra8Unorm,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
write_mask,
}],
vertex_buffers_description,
msaa_sample_count,
))
},
MaskState::DrawMaskStencil => {
let (stencil, write_mask) = mask_render_state(MaskState::DrawMaskStencil);
device.create_render_pipeline(&create_pipeline_descriptor(
create_debug_label!("Color pipeline draw mask stencil").as_deref(),
vertex_shader,
fragment_shader,
&pipeline_layout,
Some(wgpu::DepthStencilStateDescriptor {
format: wgpu::TextureFormat::Depth24PlusStencil8,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Always,
stencil,
}),
&[wgpu::ColorStateDescriptor {
format: wgpu::TextureFormat::Bgra8Unorm,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
write_mask,
}],
vertex_buffers_description,
msaa_sample_count,
))
},
MaskState::DrawMaskedContent => {
let (stencil, write_mask) = mask_render_state(MaskState::DrawMaskedContent);
device.create_render_pipeline(&create_pipeline_descriptor(
create_debug_label!("Color pipeline draw masked content").as_deref(),
vertex_shader,
fragment_shader,
&pipeline_layout,
Some(wgpu::DepthStencilStateDescriptor {
format: wgpu::TextureFormat::Depth24PlusStencil8,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Always,
stencil,
}),
&[wgpu::ColorStateDescriptor {
format: wgpu::TextureFormat::Bgra8Unorm,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
write_mask,
}],
vertex_buffers_description,
msaa_sample_count,
))
},
MaskState::ClearMaskStencil => {
let (stencil, write_mask) = mask_render_state(MaskState::ClearMaskStencil);
device.create_render_pipeline(&create_pipeline_descriptor(
create_debug_label!("Color pipeline clear mask stencil").as_deref(),
vertex_shader,
fragment_shader,
&pipeline_layout,
Some(wgpu::DepthStencilStateDescriptor {
format: wgpu::TextureFormat::Depth24PlusStencil8,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Always,
stencil,
}),
&[wgpu::ColorStateDescriptor {
format: wgpu::TextureFormat::Bgra8Unorm,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
write_mask,
}],
vertex_buffers_description,
msaa_sample_count,
))
},
};
ShapePipeline { mask_pipelines }
}
#[allow(clippy::too_many_arguments)]
fn create_bitmap_pipeline(
device: &wgpu::Device,
vertex_shader: &wgpu::ShaderModule,
fragment_shader: &wgpu::ShaderModule,
msaa_sample_count: u32,
vertex_buffers_description: &[wgpu::VertexBufferDescriptor<'_>],
sampler_layout: &wgpu::BindGroupLayout,
globals_layout: &wgpu::BindGroupLayout,
mesh_bind_layout: &wgpu::BindGroupLayout,
bitmap_bind_layout: &wgpu::BindGroupLayout,
) -> ShapePipeline {
let pipeline_layout_label = create_debug_label!("Bitmap shape pipeline layout");
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: pipeline_layout_label.as_deref(),
bind_group_layouts: &[
globals_layout,
mesh_bind_layout,
bitmap_bind_layout,
sampler_layout,
],
push_constant_ranges: &[],
});
let mask_pipelines = enum_map! {
MaskState::NoMask => {
let (stencil, write_mask) = mask_render_state(MaskState::NoMask);
device.create_render_pipeline(&create_pipeline_descriptor(
create_debug_label!("Bitmap pipeline no mask").as_deref(),
vertex_shader,
fragment_shader,
&pipeline_layout,
Some(wgpu::DepthStencilStateDescriptor {
format: wgpu::TextureFormat::Depth24PlusStencil8,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Always,
stencil,
}),
&[wgpu::ColorStateDescriptor {
format: wgpu::TextureFormat::Bgra8Unorm,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
write_mask,
}],
vertex_buffers_description,
msaa_sample_count,
))
},
MaskState::DrawMaskStencil => {
let (stencil, write_mask) = mask_render_state(MaskState::DrawMaskStencil);
device.create_render_pipeline(&create_pipeline_descriptor(
create_debug_label!("Bitmap pipeline draw mask stencil").as_deref(),
vertex_shader,
fragment_shader,
&pipeline_layout,
Some(wgpu::DepthStencilStateDescriptor {
format: wgpu::TextureFormat::Depth24PlusStencil8,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Always,
stencil,
}),
&[wgpu::ColorStateDescriptor {
format: wgpu::TextureFormat::Bgra8Unorm,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
write_mask,
}],
vertex_buffers_description,
msaa_sample_count,
))
},
MaskState::DrawMaskedContent => {
let (stencil, write_mask) = mask_render_state(MaskState::DrawMaskedContent);
device.create_render_pipeline(&create_pipeline_descriptor(
create_debug_label!("Bitmap pipeline draw masked content").as_deref(),
vertex_shader,
fragment_shader,
&pipeline_layout,
Some(wgpu::DepthStencilStateDescriptor {
format: wgpu::TextureFormat::Depth24PlusStencil8,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Equal,
stencil,
}),
&[wgpu::ColorStateDescriptor {
format: wgpu::TextureFormat::Bgra8Unorm,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
write_mask,
}],
vertex_buffers_description,
msaa_sample_count,
))
},
MaskState::ClearMaskStencil => {
let (stencil, write_mask) = mask_render_state(MaskState::ClearMaskStencil);
device.create_render_pipeline(&create_pipeline_descriptor(
create_debug_label!("Bitmap pipeline clear mask stencil").as_deref(),
vertex_shader,
fragment_shader,
&pipeline_layout,
Some(wgpu::DepthStencilStateDescriptor {
format: wgpu::TextureFormat::Depth24PlusStencil8,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Always,
stencil,
}),
&[wgpu::ColorStateDescriptor {
format: wgpu::TextureFormat::Bgra8Unorm,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
write_mask,
}],
vertex_buffers_description,
msaa_sample_count,
))
}
};
ShapePipeline { mask_pipelines }
}
#[allow(clippy::too_many_arguments)]
fn create_gradient_pipeline(
device: &wgpu::Device,
vertex_shader: &wgpu::ShaderModule,
fragment_shader: &wgpu::ShaderModule,
msaa_sample_count: u32,
vertex_buffers_description: &[wgpu::VertexBufferDescriptor<'_>],
globals_layout: &wgpu::BindGroupLayout,
mesh_bind_layout: &wgpu::BindGroupLayout,
gradient_bind_layout: &wgpu::BindGroupLayout,
) -> ShapePipeline {
let pipeline_layout_label = create_debug_label!("Gradient shape pipeline layout");
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: pipeline_layout_label.as_deref(),
bind_group_layouts: &[globals_layout, mesh_bind_layout, gradient_bind_layout],
push_constant_ranges: &[],
});
let mask_pipelines = enum_map! {
MaskState::NoMask => {
let (stencil, write_mask) = mask_render_state(MaskState::NoMask);
device.create_render_pipeline(&create_pipeline_descriptor(
create_debug_label!("Gradient pipeline no mask").as_deref(),
vertex_shader,
fragment_shader,
&pipeline_layout,
Some(wgpu::DepthStencilStateDescriptor {
format: wgpu::TextureFormat::Depth24PlusStencil8,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Always,
stencil,
}),
&[wgpu::ColorStateDescriptor {
format: wgpu::TextureFormat::Bgra8Unorm,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
write_mask,
}],
vertex_buffers_description,
msaa_sample_count,
))
},
MaskState::DrawMaskStencil => {
let (stencil, write_mask) = mask_render_state(MaskState::DrawMaskStencil);
device.create_render_pipeline(&create_pipeline_descriptor(
create_debug_label!("Gradient pipeline draw mask stencil").as_deref(),
vertex_shader,
fragment_shader,
&pipeline_layout,
Some(wgpu::DepthStencilStateDescriptor {
format: wgpu::TextureFormat::Depth24PlusStencil8,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Always,
stencil,
}),
&[wgpu::ColorStateDescriptor {
format: wgpu::TextureFormat::Bgra8Unorm,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
write_mask,
}],
vertex_buffers_description,
msaa_sample_count,
))
},
MaskState::DrawMaskedContent => {
let (stencil, write_mask) = mask_render_state(MaskState::DrawMaskedContent);
device.create_render_pipeline(&create_pipeline_descriptor(
create_debug_label!("Gradient pipeline draw masked content").as_deref(),
vertex_shader,
fragment_shader,
&pipeline_layout,
Some(wgpu::DepthStencilStateDescriptor {
format: wgpu::TextureFormat::Depth24PlusStencil8,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Equal,
stencil,
}),
&[wgpu::ColorStateDescriptor {
format: wgpu::TextureFormat::Bgra8Unorm,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
write_mask,
}],
vertex_buffers_description,
msaa_sample_count,
))
},
MaskState::ClearMaskStencil => {
let (stencil, write_mask) = mask_render_state(MaskState::ClearMaskStencil);
device.create_render_pipeline(&create_pipeline_descriptor(
create_debug_label!("Gradient pipeline clear mask stencil").as_deref(),
vertex_shader,
fragment_shader,
&pipeline_layout,
Some(wgpu::DepthStencilStateDescriptor {
format: wgpu::TextureFormat::Depth24PlusStencil8,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Always,
stencil,
}),
&[wgpu::ColorStateDescriptor {
format: wgpu::TextureFormat::Bgra8Unorm,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
write_mask,
}],
vertex_buffers_description,
msaa_sample_count,
))
}
};
ShapePipeline { mask_pipelines }
}
fn mask_render_state(state: MaskState) -> (wgpu::StencilStateDescriptor, wgpu::ColorWrite) {
let (stencil_state, color_write) = match state {
MaskState::NoMask => (
wgpu::StencilStateFaceDescriptor {
compare: wgpu::CompareFunction::Always,
fail_op: wgpu::StencilOperation::Keep,
depth_fail_op: wgpu::StencilOperation::Keep,
pass_op: wgpu::StencilOperation::Keep,
},
wgpu::ColorWrite::ALL,
),
MaskState::DrawMaskStencil => (
wgpu::StencilStateFaceDescriptor {
compare: wgpu::CompareFunction::Equal,
fail_op: wgpu::StencilOperation::Keep,
depth_fail_op: wgpu::StencilOperation::Keep,
pass_op: wgpu::StencilOperation::IncrementClamp,
},
wgpu::ColorWrite::empty(),
),
MaskState::DrawMaskedContent => (
wgpu::StencilStateFaceDescriptor {
compare: wgpu::CompareFunction::Equal,
fail_op: wgpu::StencilOperation::Keep,
depth_fail_op: wgpu::StencilOperation::Keep,
pass_op: wgpu::StencilOperation::Keep,
},
wgpu::ColorWrite::ALL,
),
MaskState::ClearMaskStencil => (
wgpu::StencilStateFaceDescriptor {
compare: wgpu::CompareFunction::Equal,
fail_op: wgpu::StencilOperation::Keep,
depth_fail_op: wgpu::StencilOperation::Keep,
pass_op: wgpu::StencilOperation::DecrementClamp,
},
wgpu::ColorWrite::empty(),
),
};
(
wgpu::StencilStateDescriptor {
front: stencil_state.clone(),
back: stencil_state,
read_mask: 0xff,
write_mask: 0xff,
},
color_write,
)
}
| 41.823129 | 97 | 0.534938 |
9c0da3a67d74b032dd4af71b389441fea9147756
| 374 |
use super::*;
pub struct ProcExeSymINode(ProcessRef);
impl ProcExeSymINode {
pub fn new(process_ref: &ProcessRef) -> Arc<dyn INode> {
Arc::new(SymLink::new(Self(Arc::clone(process_ref))))
}
}
impl ProcINode for ProcExeSymINode {
fn generate_data_in_bytes(&self) -> vfs::Result<Vec<u8>> {
Ok(self.0.exec_path().to_owned().into_bytes())
}
}
| 23.375 | 62 | 0.660428 |
abb9aa00454a79b61cec9dc647c206dd5897a7f7
| 68,891 |
//! AArch64 ISA: binary code emission.
use crate::binemit::{CodeOffset, Reloc};
use crate::ir::constant::ConstantData;
use crate::ir::types::*;
use crate::ir::TrapCode;
use crate::isa::aarch64::inst::*;
use crate::isa::aarch64::lower::ty_bits;
use regalloc::{Reg, RegClass, Writable};
use core::convert::TryFrom;
use log::debug;
/// Memory label/reference finalization: convert a MemLabel to a PC-relative
/// offset, possibly emitting relocation(s) as necessary.
pub fn memlabel_finalize(_insn_off: CodeOffset, label: &MemLabel) -> i32 {
match label {
&MemLabel::PCRel(rel) => rel,
}
}
/// Memory addressing mode finalization: convert "special" modes (e.g.,
/// generic arbitrary stack offset) into real addressing modes, possibly by
/// emitting some helper instructions that come immediately before the use
/// of this amode.
pub fn mem_finalize(
insn_off: CodeOffset,
mem: &MemArg,
state: &EmitState,
) -> (SmallVec<[Inst; 4]>, MemArg) {
match mem {
&MemArg::RegOffset(_, off, ty)
| &MemArg::SPOffset(off, ty)
| &MemArg::FPOffset(off, ty)
| &MemArg::NominalSPOffset(off, ty) => {
let basereg = match mem {
&MemArg::RegOffset(reg, _, _) => reg,
&MemArg::SPOffset(..) | &MemArg::NominalSPOffset(..) => stack_reg(),
&MemArg::FPOffset(..) => fp_reg(),
_ => unreachable!(),
};
let adj = match mem {
&MemArg::NominalSPOffset(..) => {
debug!(
"mem_finalize: nominal SP offset {} + adj {} -> {}",
off,
state.virtual_sp_offset,
off + state.virtual_sp_offset
);
state.virtual_sp_offset
}
_ => 0,
};
let off = off + adj;
if let Some(simm9) = SImm9::maybe_from_i64(off) {
let mem = MemArg::Unscaled(basereg, simm9);
(smallvec![], mem)
} else if let Some(uimm12s) = UImm12Scaled::maybe_from_i64(off, ty) {
let mem = MemArg::UnsignedOffset(basereg, uimm12s);
(smallvec![], mem)
} else {
let tmp = writable_spilltmp_reg();
let mut const_insts = Inst::load_constant(tmp, off as u64);
// N.B.: we must use AluRRRExtend because AluRRR uses the "shifted register" form
// (AluRRRShift) instead, which interprets register 31 as the zero reg, not SP. SP
// is a valid base (for SPOffset) which we must handle here.
// Also, SP needs to be the first arg, not second.
let add_inst = Inst::AluRRRExtend {
alu_op: ALUOp::Add64,
rd: tmp,
rn: basereg,
rm: tmp.to_reg(),
extendop: ExtendOp::UXTX,
};
const_insts.push(add_inst);
(const_insts, MemArg::reg(tmp.to_reg()))
}
}
&MemArg::Label(ref label) => {
let off = memlabel_finalize(insn_off, label);
(smallvec![], MemArg::Label(MemLabel::PCRel(off)))
}
_ => (smallvec![], mem.clone()),
}
}
/// Helper: get a ConstantData from a u64.
pub fn u64_constant(bits: u64) -> ConstantData {
let data = bits.to_le_bytes();
ConstantData::from(&data[..])
}
//=============================================================================
// Instructions and subcomponents: emission
fn machreg_to_gpr(m: Reg) -> u32 {
assert_eq!(m.get_class(), RegClass::I64);
u32::try_from(m.to_real_reg().get_hw_encoding()).unwrap()
}
fn machreg_to_vec(m: Reg) -> u32 {
assert_eq!(m.get_class(), RegClass::V128);
u32::try_from(m.to_real_reg().get_hw_encoding()).unwrap()
}
fn machreg_to_gpr_or_vec(m: Reg) -> u32 {
u32::try_from(m.to_real_reg().get_hw_encoding()).unwrap()
}
fn enc_arith_rrr(bits_31_21: u32, bits_15_10: u32, rd: Writable<Reg>, rn: Reg, rm: Reg) -> u32 {
(bits_31_21 << 21)
| (bits_15_10 << 10)
| machreg_to_gpr(rd.to_reg())
| (machreg_to_gpr(rn) << 5)
| (machreg_to_gpr(rm) << 16)
}
fn enc_arith_rr_imm12(
bits_31_24: u32,
immshift: u32,
imm12: u32,
rn: Reg,
rd: Writable<Reg>,
) -> u32 {
(bits_31_24 << 24)
| (immshift << 22)
| (imm12 << 10)
| (machreg_to_gpr(rn) << 5)
| machreg_to_gpr(rd.to_reg())
}
fn enc_arith_rr_imml(bits_31_23: u32, imm_bits: u32, rn: Reg, rd: Writable<Reg>) -> u32 {
(bits_31_23 << 23) | (imm_bits << 10) | (machreg_to_gpr(rn) << 5) | machreg_to_gpr(rd.to_reg())
}
fn enc_arith_rrrr(top11: u32, rm: Reg, bit15: u32, ra: Reg, rn: Reg, rd: Writable<Reg>) -> u32 {
(top11 << 21)
| (machreg_to_gpr(rm) << 16)
| (bit15 << 15)
| (machreg_to_gpr(ra) << 10)
| (machreg_to_gpr(rn) << 5)
| machreg_to_gpr(rd.to_reg())
}
fn enc_jump26(op_31_26: u32, off_26_0: u32) -> u32 {
assert!(off_26_0 < (1 << 26));
(op_31_26 << 26) | off_26_0
}
fn enc_cmpbr(op_31_24: u32, off_18_0: u32, reg: Reg) -> u32 {
assert!(off_18_0 < (1 << 19));
(op_31_24 << 24) | (off_18_0 << 5) | machreg_to_gpr(reg)
}
fn enc_cbr(op_31_24: u32, off_18_0: u32, op_4: u32, cond: u32) -> u32 {
assert!(off_18_0 < (1 << 19));
assert!(cond < (1 << 4));
(op_31_24 << 24) | (off_18_0 << 5) | (op_4 << 4) | cond
}
fn enc_conditional_br(taken: BranchTarget, kind: CondBrKind) -> u32 {
match kind {
CondBrKind::Zero(reg) => enc_cmpbr(0b1_011010_0, taken.as_offset19_or_zero(), reg),
CondBrKind::NotZero(reg) => enc_cmpbr(0b1_011010_1, taken.as_offset19_or_zero(), reg),
CondBrKind::Cond(c) => enc_cbr(0b01010100, taken.as_offset19_or_zero(), 0b0, c.bits()),
}
}
const MOVE_WIDE_FIXED: u32 = 0x92800000;
#[repr(u32)]
enum MoveWideOpcode {
MOVN = 0b00,
MOVZ = 0b10,
MOVK = 0b11,
}
fn enc_move_wide(op: MoveWideOpcode, rd: Writable<Reg>, imm: MoveWideConst) -> u32 {
assert!(imm.shift <= 0b11);
MOVE_WIDE_FIXED
| (op as u32) << 29
| u32::from(imm.shift) << 21
| u32::from(imm.bits) << 5
| machreg_to_gpr(rd.to_reg())
}
fn enc_ldst_pair(op_31_22: u32, simm7: SImm7Scaled, rn: Reg, rt: Reg, rt2: Reg) -> u32 {
(op_31_22 << 22)
| (simm7.bits() << 15)
| (machreg_to_gpr(rt2) << 10)
| (machreg_to_gpr(rn) << 5)
| machreg_to_gpr(rt)
}
fn enc_ldst_simm9(op_31_22: u32, simm9: SImm9, op_11_10: u32, rn: Reg, rd: Reg) -> u32 {
(op_31_22 << 22)
| (simm9.bits() << 12)
| (op_11_10 << 10)
| (machreg_to_gpr(rn) << 5)
| machreg_to_gpr_or_vec(rd)
}
fn enc_ldst_uimm12(op_31_22: u32, uimm12: UImm12Scaled, rn: Reg, rd: Reg) -> u32 {
(op_31_22 << 22)
| (0b1 << 24)
| (uimm12.bits() << 10)
| (machreg_to_gpr(rn) << 5)
| machreg_to_gpr_or_vec(rd)
}
fn enc_ldst_reg(
op_31_22: u32,
rn: Reg,
rm: Reg,
s_bit: bool,
extendop: Option<ExtendOp>,
rd: Reg,
) -> u32 {
let s_bit = if s_bit { 1 } else { 0 };
let extend_bits = match extendop {
Some(ExtendOp::UXTW) => 0b010,
Some(ExtendOp::SXTW) => 0b110,
Some(ExtendOp::SXTX) => 0b111,
None => 0b011, // LSL
_ => panic!("bad extend mode for ld/st MemArg"),
};
(op_31_22 << 22)
| (1 << 21)
| (machreg_to_gpr(rm) << 16)
| (extend_bits << 13)
| (s_bit << 12)
| (0b10 << 10)
| (machreg_to_gpr(rn) << 5)
| machreg_to_gpr_or_vec(rd)
}
fn enc_ldst_imm19(op_31_24: u32, imm19: u32, rd: Reg) -> u32 {
(op_31_24 << 24) | (imm19 << 5) | machreg_to_gpr_or_vec(rd)
}
fn enc_extend(top22: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
(top22 << 10) | (machreg_to_gpr(rn) << 5) | machreg_to_gpr(rd.to_reg())
}
fn enc_vec_rrr(top11: u32, rm: Reg, bit15_10: u32, rn: Reg, rd: Writable<Reg>) -> u32 {
(top11 << 21)
| (machreg_to_vec(rm) << 16)
| (bit15_10 << 10)
| (machreg_to_vec(rn) << 5)
| machreg_to_vec(rd.to_reg())
}
fn enc_bit_rr(size: u32, opcode2: u32, opcode1: u32, rn: Reg, rd: Writable<Reg>) -> u32 {
(0b01011010110 << 21)
| size << 31
| opcode2 << 16
| opcode1 << 10
| machreg_to_gpr(rn) << 5
| machreg_to_gpr(rd.to_reg())
}
fn enc_br(rn: Reg) -> u32 {
0b1101011_0000_11111_000000_00000_00000 | (machreg_to_gpr(rn) << 5)
}
fn enc_adr(off: i32, rd: Writable<Reg>) -> u32 {
let off = u32::try_from(off).unwrap();
let immlo = off & 3;
let immhi = (off >> 2) & ((1 << 19) - 1);
(0b00010000 << 24) | (immlo << 29) | (immhi << 5) | machreg_to_gpr(rd.to_reg())
}
fn enc_csel(rd: Writable<Reg>, rn: Reg, rm: Reg, cond: Cond) -> u32 {
0b100_11010100_00000_0000_00_00000_00000
| (machreg_to_gpr(rm) << 16)
| (machreg_to_gpr(rn) << 5)
| machreg_to_gpr(rd.to_reg())
| (cond.bits() << 12)
}
fn enc_fcsel(rd: Writable<Reg>, rn: Reg, rm: Reg, cond: Cond, size: InstSize) -> u32 {
let ty_bit = if size.is32() { 0 } else { 1 };
0b000_11110_00_1_00000_0000_11_00000_00000
| (machreg_to_vec(rm) << 16)
| (machreg_to_vec(rn) << 5)
| machreg_to_vec(rd.to_reg())
| (cond.bits() << 12)
| (ty_bit << 22)
}
fn enc_cset(rd: Writable<Reg>, cond: Cond) -> u32 {
0b100_11010100_11111_0000_01_11111_00000
| machreg_to_gpr(rd.to_reg())
| (cond.invert().bits() << 12)
}
fn enc_ccmp_imm(size: InstSize, rn: Reg, imm: UImm5, nzcv: NZCV, cond: Cond) -> u32 {
0b0_1_1_11010010_00000_0000_10_00000_0_0000
| size.sf_bit() << 31
| imm.bits() << 16
| cond.bits() << 12
| machreg_to_gpr(rn) << 5
| nzcv.bits()
}
fn enc_vecmov(is_16b: bool, rd: Writable<Reg>, rn: Reg) -> u32 {
0b00001110_101_00000_00011_1_00000_00000
| ((is_16b as u32) << 30)
| machreg_to_vec(rd.to_reg())
| (machreg_to_vec(rn) << 16)
| (machreg_to_vec(rn) << 5)
}
fn enc_fpurr(top22: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
(top22 << 10) | (machreg_to_vec(rn) << 5) | machreg_to_vec(rd.to_reg())
}
fn enc_fpurrr(top22: u32, rd: Writable<Reg>, rn: Reg, rm: Reg) -> u32 {
(top22 << 10)
| (machreg_to_vec(rm) << 16)
| (machreg_to_vec(rn) << 5)
| machreg_to_vec(rd.to_reg())
}
fn enc_fpurrrr(top17: u32, rd: Writable<Reg>, rn: Reg, rm: Reg, ra: Reg) -> u32 {
(top17 << 15)
| (machreg_to_vec(rm) << 16)
| (machreg_to_vec(ra) << 10)
| (machreg_to_vec(rn) << 5)
| machreg_to_vec(rd.to_reg())
}
fn enc_fcmp(size: InstSize, rn: Reg, rm: Reg) -> u32 {
let bits = if size.is32() {
0b000_11110_00_1_00000_00_1000_00000_00000
} else {
0b000_11110_01_1_00000_00_1000_00000_00000
};
bits | (machreg_to_vec(rm) << 16) | (machreg_to_vec(rn) << 5)
}
fn enc_fputoint(top16: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
(top16 << 16) | (machreg_to_vec(rn) << 5) | machreg_to_gpr(rd.to_reg())
}
fn enc_inttofpu(top16: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
(top16 << 16) | (machreg_to_gpr(rn) << 5) | machreg_to_vec(rd.to_reg())
}
fn enc_fround(top22: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
(top22 << 10) | (machreg_to_vec(rn) << 5) | machreg_to_vec(rd.to_reg())
}
fn enc_vec_rr_misc(bits_12_16: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
debug_assert_eq!(bits_12_16 & 0b11111, bits_12_16);
let bits = 0b0_1_1_01110_00_10000_00000_10_00000_00000;
bits | bits_12_16 << 12 | machreg_to_vec(rn) << 5 | machreg_to_vec(rd.to_reg())
}
fn enc_vec_lanes(q: u32, u: u32, size: u32, opcode: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
debug_assert_eq!(q & 0b1, q);
debug_assert_eq!(u & 0b1, u);
debug_assert_eq!(size & 0b11, size);
debug_assert_eq!(opcode & 0b11111, opcode);
0b0_0_0_01110_00_11000_0_0000_10_00000_00000
| q << 30
| u << 29
| size << 22
| opcode << 12
| machreg_to_vec(rn) << 5
| machreg_to_vec(rd.to_reg())
}
/// State carried between emissions of a sequence of instructions.
#[derive(Default, Clone, Debug)]
pub struct EmitState {
virtual_sp_offset: i64,
}
impl MachInstEmit for Inst {
type State = EmitState;
fn emit(&self, sink: &mut MachBuffer<Inst>, flags: &settings::Flags, state: &mut EmitState) {
// N.B.: we *must* not exceed the "worst-case size" used to compute
// where to insert islands, except when islands are explicitly triggered
// (with an `EmitIsland`). We check this in debug builds. This is `mut`
// to allow disabling the check for `JTSequence`, which is always
// emitted following an `EmitIsland`.
let mut start_off = sink.cur_offset();
match self {
&Inst::AluRRR { alu_op, rd, rn, rm } => {
let top11 = match alu_op {
ALUOp::Add32 => 0b00001011_000,
ALUOp::Add64 => 0b10001011_000,
ALUOp::Sub32 => 0b01001011_000,
ALUOp::Sub64 => 0b11001011_000,
ALUOp::Orr32 => 0b00101010_000,
ALUOp::Orr64 => 0b10101010_000,
ALUOp::And32 => 0b00001010_000,
ALUOp::And64 => 0b10001010_000,
ALUOp::Eor32 => 0b01001010_000,
ALUOp::Eor64 => 0b11001010_000,
ALUOp::OrrNot32 => 0b00101010_001,
ALUOp::OrrNot64 => 0b10101010_001,
ALUOp::AndNot32 => 0b00001010_001,
ALUOp::AndNot64 => 0b10001010_001,
ALUOp::EorNot32 => 0b01001010_001,
ALUOp::EorNot64 => 0b11001010_001,
ALUOp::AddS32 => 0b00101011_000,
ALUOp::AddS64 => 0b10101011_000,
ALUOp::SubS32 => 0b01101011_000,
ALUOp::SubS64 => 0b11101011_000,
ALUOp::SubS64XR => 0b11101011_001,
ALUOp::SDiv64 => 0b10011010_110,
ALUOp::UDiv64 => 0b10011010_110,
ALUOp::RotR32 | ALUOp::Lsr32 | ALUOp::Asr32 | ALUOp::Lsl32 => 0b00011010_110,
ALUOp::RotR64 | ALUOp::Lsr64 | ALUOp::Asr64 | ALUOp::Lsl64 => 0b10011010_110,
ALUOp::MAdd32
| ALUOp::MAdd64
| ALUOp::MSub32
| ALUOp::MSub64
| ALUOp::SMulH
| ALUOp::UMulH => {
//// RRRR ops.
panic!("Bad ALUOp {:?} in RRR form!", alu_op);
}
};
let bit15_10 = match alu_op {
ALUOp::SDiv64 => 0b000011,
ALUOp::UDiv64 => 0b000010,
ALUOp::RotR32 | ALUOp::RotR64 => 0b001011,
ALUOp::Lsr32 | ALUOp::Lsr64 => 0b001001,
ALUOp::Asr32 | ALUOp::Asr64 => 0b001010,
ALUOp::Lsl32 | ALUOp::Lsl64 => 0b001000,
ALUOp::SubS64XR => 0b011000,
_ => 0b000000,
};
debug_assert_ne!(writable_stack_reg(), rd);
// The stack pointer is the zero register if this instruction
// doesn't have access to extended registers, so this might be
// an indication that something is wrong.
if alu_op != ALUOp::SubS64XR {
debug_assert_ne!(stack_reg(), rn);
}
debug_assert_ne!(stack_reg(), rm);
sink.put4(enc_arith_rrr(top11, bit15_10, rd, rn, rm));
}
&Inst::AluRRRR {
alu_op,
rd,
rm,
rn,
ra,
} => {
let (top11, bit15) = match alu_op {
ALUOp::MAdd32 => (0b0_00_11011_000, 0),
ALUOp::MSub32 => (0b0_00_11011_000, 1),
ALUOp::MAdd64 => (0b1_00_11011_000, 0),
ALUOp::MSub64 => (0b1_00_11011_000, 1),
ALUOp::SMulH => (0b1_00_11011_010, 0),
ALUOp::UMulH => (0b1_00_11011_110, 0),
_ => unimplemented!("{:?}", alu_op),
};
sink.put4(enc_arith_rrrr(top11, rm, bit15, ra, rn, rd));
}
&Inst::AluRRImm12 {
alu_op,
rd,
rn,
ref imm12,
} => {
let top8 = match alu_op {
ALUOp::Add32 => 0b000_10001,
ALUOp::Add64 => 0b100_10001,
ALUOp::Sub32 => 0b010_10001,
ALUOp::Sub64 => 0b110_10001,
ALUOp::AddS32 => 0b001_10001,
ALUOp::AddS64 => 0b101_10001,
ALUOp::SubS32 => 0b011_10001,
ALUOp::SubS64 => 0b111_10001,
_ => unimplemented!("{:?}", alu_op),
};
sink.put4(enc_arith_rr_imm12(
top8,
imm12.shift_bits(),
imm12.imm_bits(),
rn,
rd,
));
}
&Inst::AluRRImmLogic {
alu_op,
rd,
rn,
ref imml,
} => {
let (top9, inv) = match alu_op {
ALUOp::Orr32 => (0b001_100100, false),
ALUOp::Orr64 => (0b101_100100, false),
ALUOp::And32 => (0b000_100100, false),
ALUOp::And64 => (0b100_100100, false),
ALUOp::Eor32 => (0b010_100100, false),
ALUOp::Eor64 => (0b110_100100, false),
ALUOp::OrrNot32 => (0b001_100100, true),
ALUOp::OrrNot64 => (0b101_100100, true),
ALUOp::AndNot32 => (0b000_100100, true),
ALUOp::AndNot64 => (0b100_100100, true),
ALUOp::EorNot32 => (0b010_100100, true),
ALUOp::EorNot64 => (0b110_100100, true),
_ => unimplemented!("{:?}", alu_op),
};
let imml = if inv { imml.invert() } else { imml.clone() };
sink.put4(enc_arith_rr_imml(top9, imml.enc_bits(), rn, rd));
}
&Inst::AluRRImmShift {
alu_op,
rd,
rn,
ref immshift,
} => {
let amt = immshift.value();
let (top10, immr, imms) = match alu_op {
ALUOp::RotR32 => (0b0001001110, machreg_to_gpr(rn), u32::from(amt)),
ALUOp::RotR64 => (0b1001001111, machreg_to_gpr(rn), u32::from(amt)),
ALUOp::Lsr32 => (0b0101001100, u32::from(amt), 0b011111),
ALUOp::Lsr64 => (0b1101001101, u32::from(amt), 0b111111),
ALUOp::Asr32 => (0b0001001100, u32::from(amt), 0b011111),
ALUOp::Asr64 => (0b1001001101, u32::from(amt), 0b111111),
ALUOp::Lsl32 => (0b0101001100, u32::from(32 - amt), u32::from(31 - amt)),
ALUOp::Lsl64 => (0b1101001101, u32::from(64 - amt), u32::from(63 - amt)),
_ => unimplemented!("{:?}", alu_op),
};
sink.put4(
(top10 << 22)
| (immr << 16)
| (imms << 10)
| (machreg_to_gpr(rn) << 5)
| machreg_to_gpr(rd.to_reg()),
);
}
&Inst::AluRRRShift {
alu_op,
rd,
rn,
rm,
ref shiftop,
} => {
let top11: u32 = match alu_op {
ALUOp::Add32 => 0b000_01011000,
ALUOp::Add64 => 0b100_01011000,
ALUOp::AddS32 => 0b001_01011000,
ALUOp::AddS64 => 0b101_01011000,
ALUOp::Sub32 => 0b010_01011000,
ALUOp::Sub64 => 0b110_01011000,
ALUOp::SubS32 => 0b011_01011000,
ALUOp::SubS64 => 0b111_01011000,
ALUOp::Orr32 => 0b001_01010000,
ALUOp::Orr64 => 0b101_01010000,
ALUOp::And32 => 0b000_01010000,
ALUOp::And64 => 0b100_01010000,
ALUOp::Eor32 => 0b010_01010000,
ALUOp::Eor64 => 0b110_01010000,
ALUOp::OrrNot32 => 0b001_01010001,
ALUOp::OrrNot64 => 0b101_01010001,
ALUOp::EorNot32 => 0b010_01010001,
ALUOp::EorNot64 => 0b110_01010001,
ALUOp::AndNot32 => 0b000_01010001,
ALUOp::AndNot64 => 0b100_01010001,
_ => unimplemented!("{:?}", alu_op),
};
let top11 = top11 | (u32::from(shiftop.op().bits()) << 1);
let bits_15_10 = u32::from(shiftop.amt().value());
sink.put4(enc_arith_rrr(top11, bits_15_10, rd, rn, rm));
}
&Inst::AluRRRExtend {
alu_op,
rd,
rn,
rm,
extendop,
} => {
let top11: u32 = match alu_op {
ALUOp::Add32 => 0b00001011001,
ALUOp::Add64 => 0b10001011001,
ALUOp::Sub32 => 0b01001011001,
ALUOp::Sub64 => 0b11001011001,
ALUOp::AddS32 => 0b00101011001,
ALUOp::AddS64 => 0b10101011001,
ALUOp::SubS32 => 0b01101011001,
ALUOp::SubS64 => 0b11101011001,
_ => unimplemented!("{:?}", alu_op),
};
let bits_15_10 = u32::from(extendop.bits()) << 3;
sink.put4(enc_arith_rrr(top11, bits_15_10, rd, rn, rm));
}
&Inst::BitRR { op, rd, rn, .. } => {
let size = if op.inst_size().is32() { 0b0 } else { 0b1 };
let (op1, op2) = match op {
BitOp::RBit32 | BitOp::RBit64 => (0b00000, 0b000000),
BitOp::Clz32 | BitOp::Clz64 => (0b00000, 0b000100),
BitOp::Cls32 | BitOp::Cls64 => (0b00000, 0b000101),
};
sink.put4(enc_bit_rr(size, op1, op2, rn, rd))
}
&Inst::ULoad8 {
rd,
ref mem,
srcloc,
}
| &Inst::SLoad8 {
rd,
ref mem,
srcloc,
}
| &Inst::ULoad16 {
rd,
ref mem,
srcloc,
}
| &Inst::SLoad16 {
rd,
ref mem,
srcloc,
}
| &Inst::ULoad32 {
rd,
ref mem,
srcloc,
}
| &Inst::SLoad32 {
rd,
ref mem,
srcloc,
}
| &Inst::ULoad64 {
rd,
ref mem,
srcloc,
..
}
| &Inst::FpuLoad32 {
rd,
ref mem,
srcloc,
}
| &Inst::FpuLoad64 {
rd,
ref mem,
srcloc,
}
| &Inst::FpuLoad128 {
rd,
ref mem,
srcloc,
} => {
let (mem_insts, mem) = mem_finalize(sink.cur_offset(), mem, state);
for inst in mem_insts.into_iter() {
inst.emit(sink, flags, state);
}
// ldst encoding helpers take Reg, not Writable<Reg>.
let rd = rd.to_reg();
// This is the base opcode (top 10 bits) for the "unscaled
// immediate" form (Unscaled). Other addressing modes will OR in
// other values for bits 24/25 (bits 1/2 of this constant).
let (op, bits) = match self {
&Inst::ULoad8 { .. } => (0b0011100001, 8),
&Inst::SLoad8 { .. } => (0b0011100010, 8),
&Inst::ULoad16 { .. } => (0b0111100001, 16),
&Inst::SLoad16 { .. } => (0b0111100010, 16),
&Inst::ULoad32 { .. } => (0b1011100001, 32),
&Inst::SLoad32 { .. } => (0b1011100010, 32),
&Inst::ULoad64 { .. } => (0b1111100001, 64),
&Inst::FpuLoad32 { .. } => (0b1011110001, 32),
&Inst::FpuLoad64 { .. } => (0b1111110001, 64),
&Inst::FpuLoad128 { .. } => (0b0011110011, 128),
_ => unreachable!(),
};
if let Some(srcloc) = srcloc {
// Register the offset at which the actual load instruction starts.
sink.add_trap(srcloc, TrapCode::HeapOutOfBounds);
}
match &mem {
&MemArg::Unscaled(reg, simm9) => {
sink.put4(enc_ldst_simm9(op, simm9, 0b00, reg, rd));
}
&MemArg::UnsignedOffset(reg, uimm12scaled) => {
if uimm12scaled.value() != 0 {
assert_eq!(bits, ty_bits(uimm12scaled.scale_ty()));
}
sink.put4(enc_ldst_uimm12(op, uimm12scaled, reg, rd));
}
&MemArg::RegReg(r1, r2) => {
sink.put4(enc_ldst_reg(
op, r1, r2, /* scaled = */ false, /* extendop = */ None, rd,
));
}
&MemArg::RegScaled(r1, r2, ty) | &MemArg::RegScaledExtended(r1, r2, ty, _) => {
assert_eq!(bits, ty_bits(ty));
let extendop = match &mem {
&MemArg::RegScaled(..) => None,
&MemArg::RegScaledExtended(_, _, _, op) => Some(op),
_ => unreachable!(),
};
sink.put4(enc_ldst_reg(
op, r1, r2, /* scaled = */ true, extendop, rd,
));
}
&MemArg::RegExtended(r1, r2, extendop) => {
sink.put4(enc_ldst_reg(
op,
r1,
r2,
/* scaled = */ false,
Some(extendop),
rd,
));
}
&MemArg::Label(ref label) => {
let offset = match label {
// cast i32 to u32 (two's-complement)
&MemLabel::PCRel(off) => off as u32,
} / 4;
assert!(offset < (1 << 19));
match self {
&Inst::ULoad32 { .. } => {
sink.put4(enc_ldst_imm19(0b00011000, offset, rd));
}
&Inst::SLoad32 { .. } => {
sink.put4(enc_ldst_imm19(0b10011000, offset, rd));
}
&Inst::FpuLoad32 { .. } => {
sink.put4(enc_ldst_imm19(0b00011100, offset, rd));
}
&Inst::ULoad64 { .. } => {
sink.put4(enc_ldst_imm19(0b01011000, offset, rd));
}
&Inst::FpuLoad64 { .. } => {
sink.put4(enc_ldst_imm19(0b01011100, offset, rd));
}
&Inst::FpuLoad128 { .. } => {
sink.put4(enc_ldst_imm19(0b10011100, offset, rd));
}
_ => panic!("Unspported size for LDR from constant pool!"),
}
}
&MemArg::PreIndexed(reg, simm9) => {
sink.put4(enc_ldst_simm9(op, simm9, 0b11, reg.to_reg(), rd));
}
&MemArg::PostIndexed(reg, simm9) => {
sink.put4(enc_ldst_simm9(op, simm9, 0b01, reg.to_reg(), rd));
}
// Eliminated by `mem_finalize()` above.
&MemArg::SPOffset(..)
| &MemArg::FPOffset(..)
| &MemArg::NominalSPOffset(..) => panic!("Should not see stack-offset here!"),
&MemArg::RegOffset(..) => panic!("SHould not see generic reg-offset here!"),
}
}
&Inst::Store8 {
rd,
ref mem,
srcloc,
}
| &Inst::Store16 {
rd,
ref mem,
srcloc,
}
| &Inst::Store32 {
rd,
ref mem,
srcloc,
}
| &Inst::Store64 {
rd,
ref mem,
srcloc,
..
}
| &Inst::FpuStore32 {
rd,
ref mem,
srcloc,
}
| &Inst::FpuStore64 {
rd,
ref mem,
srcloc,
}
| &Inst::FpuStore128 {
rd,
ref mem,
srcloc,
} => {
let (mem_insts, mem) = mem_finalize(sink.cur_offset(), mem, state);
for inst in mem_insts.into_iter() {
inst.emit(sink, flags, state);
}
let (op, bits) = match self {
&Inst::Store8 { .. } => (0b0011100000, 8),
&Inst::Store16 { .. } => (0b0111100000, 16),
&Inst::Store32 { .. } => (0b1011100000, 32),
&Inst::Store64 { .. } => (0b1111100000, 64),
&Inst::FpuStore32 { .. } => (0b1011110000, 32),
&Inst::FpuStore64 { .. } => (0b1111110000, 64),
&Inst::FpuStore128 { .. } => (0b0011110010, 128),
_ => unreachable!(),
};
if let Some(srcloc) = srcloc {
// Register the offset at which the actual load instruction starts.
sink.add_trap(srcloc, TrapCode::HeapOutOfBounds);
}
match &mem {
&MemArg::Unscaled(reg, simm9) => {
sink.put4(enc_ldst_simm9(op, simm9, 0b00, reg, rd));
}
&MemArg::UnsignedOffset(reg, uimm12scaled) => {
if uimm12scaled.value() != 0 {
assert_eq!(bits, ty_bits(uimm12scaled.scale_ty()));
}
sink.put4(enc_ldst_uimm12(op, uimm12scaled, reg, rd));
}
&MemArg::RegReg(r1, r2) => {
sink.put4(enc_ldst_reg(
op, r1, r2, /* scaled = */ false, /* extendop = */ None, rd,
));
}
&MemArg::RegScaled(r1, r2, _ty)
| &MemArg::RegScaledExtended(r1, r2, _ty, _) => {
let extendop = match &mem {
&MemArg::RegScaled(..) => None,
&MemArg::RegScaledExtended(_, _, _, op) => Some(op),
_ => unreachable!(),
};
sink.put4(enc_ldst_reg(
op, r1, r2, /* scaled = */ true, extendop, rd,
));
}
&MemArg::RegExtended(r1, r2, extendop) => {
sink.put4(enc_ldst_reg(
op,
r1,
r2,
/* scaled = */ false,
Some(extendop),
rd,
));
}
&MemArg::Label(..) => {
panic!("Store to a MemLabel not implemented!");
}
&MemArg::PreIndexed(reg, simm9) => {
sink.put4(enc_ldst_simm9(op, simm9, 0b11, reg.to_reg(), rd));
}
&MemArg::PostIndexed(reg, simm9) => {
sink.put4(enc_ldst_simm9(op, simm9, 0b01, reg.to_reg(), rd));
}
// Eliminated by `mem_finalize()` above.
&MemArg::SPOffset(..)
| &MemArg::FPOffset(..)
| &MemArg::NominalSPOffset(..) => panic!("Should not see stack-offset here!"),
&MemArg::RegOffset(..) => panic!("SHould not see generic reg-offset here!"),
}
}
&Inst::StoreP64 { rt, rt2, ref mem } => match mem {
&PairMemArg::SignedOffset(reg, simm7) => {
assert_eq!(simm7.scale_ty, I64);
sink.put4(enc_ldst_pair(0b1010100100, simm7, reg, rt, rt2));
}
&PairMemArg::PreIndexed(reg, simm7) => {
assert_eq!(simm7.scale_ty, I64);
sink.put4(enc_ldst_pair(0b1010100110, simm7, reg.to_reg(), rt, rt2));
}
&PairMemArg::PostIndexed(reg, simm7) => {
assert_eq!(simm7.scale_ty, I64);
sink.put4(enc_ldst_pair(0b1010100010, simm7, reg.to_reg(), rt, rt2));
}
},
&Inst::LoadP64 { rt, rt2, ref mem } => {
let rt = rt.to_reg();
let rt2 = rt2.to_reg();
match mem {
&PairMemArg::SignedOffset(reg, simm7) => {
assert_eq!(simm7.scale_ty, I64);
sink.put4(enc_ldst_pair(0b1010100101, simm7, reg, rt, rt2));
}
&PairMemArg::PreIndexed(reg, simm7) => {
assert_eq!(simm7.scale_ty, I64);
sink.put4(enc_ldst_pair(0b1010100111, simm7, reg.to_reg(), rt, rt2));
}
&PairMemArg::PostIndexed(reg, simm7) => {
assert_eq!(simm7.scale_ty, I64);
sink.put4(enc_ldst_pair(0b1010100011, simm7, reg.to_reg(), rt, rt2));
}
}
}
&Inst::Mov { rd, rm } => {
assert!(rd.to_reg().get_class() == rm.get_class());
assert!(rm.get_class() == RegClass::I64);
// MOV to SP is interpreted as MOV to XZR instead. And our codegen
// should never MOV to XZR.
assert!(rd.to_reg() != stack_reg());
if rm == stack_reg() {
// We can't use ORR here, so use an `add rd, sp, #0` instead.
let imm12 = Imm12::maybe_from_u64(0).unwrap();
sink.put4(enc_arith_rr_imm12(
0b100_10001,
imm12.shift_bits(),
imm12.imm_bits(),
rm,
rd,
));
} else {
// Encoded as ORR rd, rm, zero.
sink.put4(enc_arith_rrr(0b10101010_000, 0b000_000, rd, zero_reg(), rm));
}
}
&Inst::Mov32 { rd, rm } => {
// MOV to SP is interpreted as MOV to XZR instead. And our codegen
// should never MOV to XZR.
assert!(machreg_to_gpr(rd.to_reg()) != 31);
// Encoded as ORR rd, rm, zero.
sink.put4(enc_arith_rrr(0b00101010_000, 0b000_000, rd, zero_reg(), rm));
}
&Inst::MovZ { rd, imm } => sink.put4(enc_move_wide(MoveWideOpcode::MOVZ, rd, imm)),
&Inst::MovN { rd, imm } => sink.put4(enc_move_wide(MoveWideOpcode::MOVN, rd, imm)),
&Inst::MovK { rd, imm } => sink.put4(enc_move_wide(MoveWideOpcode::MOVK, rd, imm)),
&Inst::CSel { rd, rn, rm, cond } => {
sink.put4(enc_csel(rd, rn, rm, cond));
}
&Inst::CSet { rd, cond } => {
sink.put4(enc_cset(rd, cond));
}
&Inst::CCmpImm {
size,
rn,
imm,
nzcv,
cond,
} => {
sink.put4(enc_ccmp_imm(size, rn, imm, nzcv, cond));
}
&Inst::FpuMove64 { rd, rn } => {
sink.put4(enc_vecmov(/* 16b = */ false, rd, rn));
}
&Inst::FpuMove128 { rd, rn } => {
sink.put4(enc_vecmov(/* 16b = */ true, rd, rn));
}
&Inst::FpuMoveFromVec { rd, rn, idx, ty } => {
let (imm5, shift, mask) = match ty {
F32 => (0b00100, 3, 0b011),
F64 => (0b01000, 4, 0b001),
_ => unimplemented!(),
};
debug_assert_eq!(idx & mask, idx);
let imm5 = imm5 | ((idx as u32) << shift);
sink.put4(
0b010_11110000_00000_000001_00000_00000
| (imm5 << 16)
| (machreg_to_vec(rn) << 5)
| machreg_to_vec(rd.to_reg()),
);
}
&Inst::FpuRR { fpu_op, rd, rn } => {
let top22 = match fpu_op {
FPUOp1::Abs32 => 0b000_11110_00_1_000001_10000,
FPUOp1::Abs64 => 0b000_11110_01_1_000001_10000,
FPUOp1::Neg32 => 0b000_11110_00_1_000010_10000,
FPUOp1::Neg64 => 0b000_11110_01_1_000010_10000,
FPUOp1::Sqrt32 => 0b000_11110_00_1_000011_10000,
FPUOp1::Sqrt64 => 0b000_11110_01_1_000011_10000,
FPUOp1::Cvt32To64 => 0b000_11110_00_1_000101_10000,
FPUOp1::Cvt64To32 => 0b000_11110_01_1_000100_10000,
};
sink.put4(enc_fpurr(top22, rd, rn));
}
&Inst::FpuRRR { fpu_op, rd, rn, rm } => {
let top22 = match fpu_op {
FPUOp2::Add32 => 0b000_11110_00_1_00000_001010,
FPUOp2::Add64 => 0b000_11110_01_1_00000_001010,
FPUOp2::Sub32 => 0b000_11110_00_1_00000_001110,
FPUOp2::Sub64 => 0b000_11110_01_1_00000_001110,
FPUOp2::Mul32 => 0b000_11110_00_1_00000_000010,
FPUOp2::Mul64 => 0b000_11110_01_1_00000_000010,
FPUOp2::Div32 => 0b000_11110_00_1_00000_000110,
FPUOp2::Div64 => 0b000_11110_01_1_00000_000110,
FPUOp2::Max32 => 0b000_11110_00_1_00000_010010,
FPUOp2::Max64 => 0b000_11110_01_1_00000_010010,
FPUOp2::Min32 => 0b000_11110_00_1_00000_010110,
FPUOp2::Min64 => 0b000_11110_01_1_00000_010110,
};
sink.put4(enc_fpurrr(top22, rd, rn, rm));
}
&Inst::FpuRRI { fpu_op, rd, rn } => match fpu_op {
FPUOpRI::UShr32(imm) => {
debug_assert_eq!(32, imm.lane_size_in_bits);
sink.put4(
0b0_0_1_011110_0000000_00_0_0_0_1_00000_00000
| imm.enc() << 16
| machreg_to_vec(rn) << 5
| machreg_to_vec(rd.to_reg()),
)
}
FPUOpRI::UShr64(imm) => {
debug_assert_eq!(64, imm.lane_size_in_bits);
sink.put4(
0b01_1_111110_0000000_00_0_0_0_1_00000_00000
| imm.enc() << 16
| machreg_to_vec(rn) << 5
| machreg_to_vec(rd.to_reg()),
)
}
FPUOpRI::Sli64(imm) => {
debug_assert_eq!(64, imm.lane_size_in_bits);
sink.put4(
0b01_1_111110_0000000_010101_00000_00000
| imm.enc() << 16
| machreg_to_vec(rn) << 5
| machreg_to_vec(rd.to_reg()),
)
}
FPUOpRI::Sli32(imm) => {
debug_assert_eq!(32, imm.lane_size_in_bits);
sink.put4(
0b0_0_1_011110_0000000_010101_00000_00000
| imm.enc() << 16
| machreg_to_vec(rn) << 5
| machreg_to_vec(rd.to_reg()),
)
}
},
&Inst::FpuRRRR {
fpu_op,
rd,
rn,
rm,
ra,
} => {
let top17 = match fpu_op {
FPUOp3::MAdd32 => 0b000_11111_00_0_00000_0,
FPUOp3::MAdd64 => 0b000_11111_01_0_00000_0,
};
sink.put4(enc_fpurrrr(top17, rd, rn, rm, ra));
}
&Inst::VecMisc { op, rd, rn, ty } => {
let bits_12_16 = match op {
VecMisc2::Not => {
debug_assert_eq!(128, ty_bits(ty));
0b00101
}
};
sink.put4(enc_vec_rr_misc(bits_12_16, rd, rn));
}
&Inst::VecLanes { op, rd, rn, ty } => {
let (q, size) = match ty {
I8X16 => (0b1, 0b00),
I16X8 => (0b1, 0b01),
I32X4 => (0b1, 0b10),
_ => unreachable!(),
};
let (u, opcode) = match op {
VecLanesOp::Uminv => (0b1, 0b11010),
};
sink.put4(enc_vec_lanes(q, u, size, opcode, rd, rn));
}
&Inst::FpuCmp32 { rn, rm } => {
sink.put4(enc_fcmp(InstSize::Size32, rn, rm));
}
&Inst::FpuCmp64 { rn, rm } => {
sink.put4(enc_fcmp(InstSize::Size64, rn, rm));
}
&Inst::FpuToInt { op, rd, rn } => {
let top16 = match op {
// FCVTZS (32/32-bit)
FpuToIntOp::F32ToI32 => 0b000_11110_00_1_11_000,
// FCVTZU (32/32-bit)
FpuToIntOp::F32ToU32 => 0b000_11110_00_1_11_001,
// FCVTZS (32/64-bit)
FpuToIntOp::F32ToI64 => 0b100_11110_00_1_11_000,
// FCVTZU (32/64-bit)
FpuToIntOp::F32ToU64 => 0b100_11110_00_1_11_001,
// FCVTZS (64/32-bit)
FpuToIntOp::F64ToI32 => 0b000_11110_01_1_11_000,
// FCVTZU (64/32-bit)
FpuToIntOp::F64ToU32 => 0b000_11110_01_1_11_001,
// FCVTZS (64/64-bit)
FpuToIntOp::F64ToI64 => 0b100_11110_01_1_11_000,
// FCVTZU (64/64-bit)
FpuToIntOp::F64ToU64 => 0b100_11110_01_1_11_001,
};
sink.put4(enc_fputoint(top16, rd, rn));
}
&Inst::IntToFpu { op, rd, rn } => {
let top16 = match op {
// SCVTF (32/32-bit)
IntToFpuOp::I32ToF32 => 0b000_11110_00_1_00_010,
// UCVTF (32/32-bit)
IntToFpuOp::U32ToF32 => 0b000_11110_00_1_00_011,
// SCVTF (64/32-bit)
IntToFpuOp::I64ToF32 => 0b100_11110_00_1_00_010,
// UCVTF (64/32-bit)
IntToFpuOp::U64ToF32 => 0b100_11110_00_1_00_011,
// SCVTF (32/64-bit)
IntToFpuOp::I32ToF64 => 0b000_11110_01_1_00_010,
// UCVTF (32/64-bit)
IntToFpuOp::U32ToF64 => 0b000_11110_01_1_00_011,
// SCVTF (64/64-bit)
IntToFpuOp::I64ToF64 => 0b100_11110_01_1_00_010,
// UCVTF (64/64-bit)
IntToFpuOp::U64ToF64 => 0b100_11110_01_1_00_011,
};
sink.put4(enc_inttofpu(top16, rd, rn));
}
&Inst::LoadFpuConst32 { rd, const_data } => {
let inst = Inst::FpuLoad32 {
rd,
mem: MemArg::Label(MemLabel::PCRel(8)),
srcloc: None,
};
inst.emit(sink, flags, state);
let inst = Inst::Jump {
dest: BranchTarget::ResolvedOffset(8),
};
inst.emit(sink, flags, state);
sink.put4(const_data.to_bits());
}
&Inst::LoadFpuConst64 { rd, const_data } => {
let inst = Inst::FpuLoad64 {
rd,
mem: MemArg::Label(MemLabel::PCRel(8)),
srcloc: None,
};
inst.emit(sink, flags, state);
let inst = Inst::Jump {
dest: BranchTarget::ResolvedOffset(12),
};
inst.emit(sink, flags, state);
sink.put8(const_data.to_bits());
}
&Inst::LoadFpuConst128 { rd, const_data } => {
let inst = Inst::FpuLoad128 {
rd,
mem: MemArg::Label(MemLabel::PCRel(8)),
srcloc: None,
};
inst.emit(sink, flags, state);
let inst = Inst::Jump {
dest: BranchTarget::ResolvedOffset(20),
};
inst.emit(sink, flags, state);
for i in const_data.to_le_bytes().iter() {
sink.put1(*i);
}
}
&Inst::FpuCSel32 { rd, rn, rm, cond } => {
sink.put4(enc_fcsel(rd, rn, rm, cond, InstSize::Size32));
}
&Inst::FpuCSel64 { rd, rn, rm, cond } => {
sink.put4(enc_fcsel(rd, rn, rm, cond, InstSize::Size64));
}
&Inst::FpuRound { op, rd, rn } => {
let top22 = match op {
FpuRoundMode::Minus32 => 0b000_11110_00_1_001_010_10000,
FpuRoundMode::Minus64 => 0b000_11110_01_1_001_010_10000,
FpuRoundMode::Plus32 => 0b000_11110_00_1_001_001_10000,
FpuRoundMode::Plus64 => 0b000_11110_01_1_001_001_10000,
FpuRoundMode::Zero32 => 0b000_11110_00_1_001_011_10000,
FpuRoundMode::Zero64 => 0b000_11110_01_1_001_011_10000,
FpuRoundMode::Nearest32 => 0b000_11110_00_1_001_000_10000,
FpuRoundMode::Nearest64 => 0b000_11110_01_1_001_000_10000,
};
sink.put4(enc_fround(top22, rd, rn));
}
&Inst::MovToVec64 { rd, rn } => {
sink.put4(
0b010_01110000_01000_0_0011_1_00000_00000
| (machreg_to_gpr(rn) << 5)
| machreg_to_vec(rd.to_reg()),
);
}
&Inst::MovFromVec { rd, rn, idx, ty } => {
let (q, imm5, shift, mask) = match ty {
I8 => (0b0, 0b00001, 1, 0b1111),
I16 => (0b0, 0b00010, 2, 0b0111),
I32 => (0b0, 0b00100, 3, 0b0011),
I64 => (0b1, 0b01000, 4, 0b0001),
_ => unreachable!(),
};
debug_assert_eq!(idx & mask, idx);
let imm5 = imm5 | ((idx as u32) << shift);
sink.put4(
0b000_01110000_00000_0_0111_1_00000_00000
| (q << 30)
| (imm5 << 16)
| (machreg_to_vec(rn) << 5)
| machreg_to_gpr(rd.to_reg()),
);
}
&Inst::VecDup { rd, rn, ty } => {
let imm5 = match ty {
I8 => 0b00001,
I16 => 0b00010,
I32 => 0b00100,
I64 => 0b01000,
_ => unimplemented!(),
};
sink.put4(
0b010_01110000_00000_000011_00000_00000
| (imm5 << 16)
| (machreg_to_gpr(rn) << 5)
| machreg_to_vec(rd.to_reg()),
);
}
&Inst::VecDupFromFpu { rd, rn, ty } => {
let imm5 = match ty {
F32 => 0b00100,
F64 => 0b01000,
_ => unimplemented!(),
};
sink.put4(
0b010_01110000_00000_000001_00000_00000
| (imm5 << 16)
| (machreg_to_vec(rn) << 5)
| machreg_to_vec(rd.to_reg()),
);
}
&Inst::VecExtend { t, rd, rn } => {
let (u, immh) = match t {
VecExtendOp::Sxtl8 => (0b0, 0b001),
VecExtendOp::Sxtl16 => (0b0, 0b010),
VecExtendOp::Sxtl32 => (0b0, 0b100),
VecExtendOp::Uxtl8 => (0b1, 0b001),
VecExtendOp::Uxtl16 => (0b1, 0b010),
VecExtendOp::Uxtl32 => (0b1, 0b100),
};
sink.put4(
0b000_011110_0000_000_101001_00000_00000
| (u << 29)
| (immh << 19)
| (machreg_to_vec(rn) << 5)
| machreg_to_vec(rd.to_reg()),
);
}
&Inst::VecRRR {
rd,
rn,
rm,
alu_op,
ty,
} => {
let enc_size = match ty {
I8X16 => 0b00,
I16X8 => 0b01,
I32X4 => 0b10,
_ => 0,
};
let enc_size_for_fcmp = match ty {
F32X4 => 0b0,
F64X2 => 0b1,
_ => 0,
};
let (top11, bit15_10) = match alu_op {
VecALUOp::SQAddScalar => {
debug_assert_eq!(I64, ty);
(0b010_11110_11_1, 0b000011)
}
VecALUOp::SQSubScalar => {
debug_assert_eq!(I64, ty);
(0b010_11110_11_1, 0b001011)
}
VecALUOp::UQAddScalar => {
debug_assert_eq!(I64, ty);
(0b011_11110_11_1, 0b000011)
}
VecALUOp::UQSubScalar => {
debug_assert_eq!(I64, ty);
(0b011_11110_11_1, 0b001011)
}
VecALUOp::Cmeq => (0b011_01110_00_1 | enc_size << 1, 0b100011),
VecALUOp::Cmge => (0b010_01110_00_1 | enc_size << 1, 0b001111),
VecALUOp::Cmgt => (0b010_01110_00_1 | enc_size << 1, 0b001101),
VecALUOp::Cmhi => (0b011_01110_00_1 | enc_size << 1, 0b001101),
VecALUOp::Cmhs => (0b011_01110_00_1 | enc_size << 1, 0b001111),
VecALUOp::Fcmeq => (0b010_01110_00_1 | enc_size_for_fcmp << 1, 0b111001),
VecALUOp::Fcmgt => (0b011_01110_10_1 | enc_size_for_fcmp << 1, 0b111001),
VecALUOp::Fcmge => (0b011_01110_00_1 | enc_size_for_fcmp << 1, 0b111001),
// The following logical instructions operate on bytes, so are not encoded differently
// for the different vector types.
VecALUOp::And => {
debug_assert_eq!(128, ty_bits(ty));
(0b010_01110_00_1, 0b000111)
}
VecALUOp::Bic => {
debug_assert_eq!(128, ty_bits(ty));
(0b010_01110_01_1, 0b000111)
}
VecALUOp::Orr => {
debug_assert_eq!(128, ty_bits(ty));
(0b010_01110_10_1, 0b000111)
}
VecALUOp::Eor => {
debug_assert_eq!(128, ty_bits(ty));
(0b011_01110_00_1, 0b000111)
}
VecALUOp::Bsl => {
debug_assert_eq!(128, ty_bits(ty));
(0b011_01110_01_1, 0b000111)
}
VecALUOp::Umaxp => (0b011_01110_00_1 | enc_size << 1, 0b101001),
};
sink.put4(enc_vec_rrr(top11, rm, bit15_10, rn, rd));
}
&Inst::MovToNZCV { rn } => {
sink.put4(0xd51b4200 | machreg_to_gpr(rn));
}
&Inst::MovFromNZCV { rd } => {
sink.put4(0xd53b4200 | machreg_to_gpr(rd.to_reg()));
}
&Inst::CondSet { rd, cond } => {
sink.put4(
0b100_11010100_11111_0000_01_11111_00000
| (cond.invert().bits() << 12)
| machreg_to_gpr(rd.to_reg()),
);
}
&Inst::Extend {
rd,
rn,
signed,
from_bits,
to_bits,
} if from_bits >= 8 => {
let top22 = match (signed, from_bits, to_bits) {
(false, 8, 32) => 0b010_100110_0_000000_000111, // UXTB (32)
(false, 16, 32) => 0b010_100110_0_000000_001111, // UXTH (32)
(true, 8, 32) => 0b000_100110_0_000000_000111, // SXTB (32)
(true, 16, 32) => 0b000_100110_0_000000_001111, // SXTH (32)
// The 64-bit unsigned variants are the same as the 32-bit ones,
// because writes to Wn zero out the top 32 bits of Xn
(false, 8, 64) => 0b010_100110_0_000000_000111, // UXTB (64)
(false, 16, 64) => 0b010_100110_0_000000_001111, // UXTH (64)
(true, 8, 64) => 0b100_100110_1_000000_000111, // SXTB (64)
(true, 16, 64) => 0b100_100110_1_000000_001111, // SXTH (64)
// 32-to-64: the unsigned case is a 'mov' (special-cased below).
(false, 32, 64) => 0, // MOV
(true, 32, 64) => 0b100_100110_1_000000_011111, // SXTW (64)
_ => panic!(
"Unsupported extend combination: signed = {}, from_bits = {}, to_bits = {}",
signed, from_bits, to_bits
),
};
if top22 != 0 {
sink.put4(enc_extend(top22, rd, rn));
} else {
Inst::mov32(rd, rn).emit(sink, flags, state);
}
}
&Inst::Extend {
rd,
rn,
signed,
from_bits,
to_bits,
} if from_bits == 1 && signed => {
assert!(to_bits <= 64);
// Reduce sign-extend-from-1-bit to:
// - and rd, rn, #1
// - sub rd, zr, rd
// We don't have ImmLogic yet, so we just hardcode this. FIXME.
sink.put4(0x92400000 | (machreg_to_gpr(rn) << 5) | machreg_to_gpr(rd.to_reg()));
let sub_inst = Inst::AluRRR {
alu_op: ALUOp::Sub64,
rd,
rn: zero_reg(),
rm: rd.to_reg(),
};
sub_inst.emit(sink, flags, state);
}
&Inst::Extend {
rd,
rn,
signed,
from_bits,
to_bits,
} if from_bits == 1 && !signed => {
assert!(to_bits <= 64);
// Reduce zero-extend-from-1-bit to:
// - and rd, rn, #1
// We don't have ImmLogic yet, so we just hardcode this. FIXME.
sink.put4(0x92400000 | (machreg_to_gpr(rn) << 5) | machreg_to_gpr(rd.to_reg()));
}
&Inst::Extend { .. } => {
panic!("Unsupported extend variant");
}
&Inst::Jump { ref dest } => {
let off = sink.cur_offset();
// Indicate that the jump uses a label, if so, so that a fixup can occur later.
if let Some(l) = dest.as_label() {
sink.use_label_at_offset(off, l, LabelUse::Branch26);
sink.add_uncond_branch(off, off + 4, l);
}
// Emit the jump itself.
sink.put4(enc_jump26(0b000101, dest.as_offset26_or_zero()));
}
&Inst::Ret => {
sink.put4(0xd65f03c0);
}
&Inst::EpiloguePlaceholder => {
// Noop; this is just a placeholder for epilogues.
}
&Inst::Call { ref info } => {
sink.add_reloc(info.loc, Reloc::Arm64Call, &info.dest, 0);
sink.put4(enc_jump26(0b100101, 0));
if info.opcode.is_call() {
sink.add_call_site(info.loc, info.opcode);
}
}
&Inst::CallInd { ref info } => {
sink.put4(0b1101011_0001_11111_000000_00000_00000 | (machreg_to_gpr(info.rn) << 5));
if info.opcode.is_call() {
sink.add_call_site(info.loc, info.opcode);
}
}
&Inst::CondBr {
taken,
not_taken,
kind,
} => {
// Conditional part first.
let cond_off = sink.cur_offset();
if let Some(l) = taken.as_label() {
sink.use_label_at_offset(cond_off, l, LabelUse::Branch19);
let inverted = enc_conditional_br(taken, kind.invert()).to_le_bytes();
sink.add_cond_branch(cond_off, cond_off + 4, l, &inverted[..]);
}
sink.put4(enc_conditional_br(taken, kind));
// Unconditional part next.
let uncond_off = sink.cur_offset();
if let Some(l) = not_taken.as_label() {
sink.use_label_at_offset(uncond_off, l, LabelUse::Branch26);
sink.add_uncond_branch(uncond_off, uncond_off + 4, l);
}
sink.put4(enc_jump26(0b000101, not_taken.as_offset26_or_zero()));
}
&Inst::OneWayCondBr { target, kind } => {
let off = sink.cur_offset();
if let Some(l) = target.as_label() {
sink.use_label_at_offset(off, l, LabelUse::Branch19);
}
sink.put4(enc_conditional_br(target, kind));
}
&Inst::IndirectBr { rn, .. } => {
sink.put4(enc_br(rn));
}
&Inst::Nop0 => {}
&Inst::Nop4 => {
sink.put4(0xd503201f);
}
&Inst::Brk => {
sink.put4(0xd4200000);
}
&Inst::Udf { trap_info } => {
let (srcloc, code) = trap_info;
sink.add_trap(srcloc, code);
sink.put4(0xd4a00000);
}
&Inst::Adr { rd, off } => {
assert!(off > -(1 << 20));
assert!(off < (1 << 20));
sink.put4(enc_adr(off, rd));
}
&Inst::Word4 { data } => {
sink.put4(data);
}
&Inst::Word8 { data } => {
sink.put8(data);
}
&Inst::JTSequence {
ridx,
rtmp1,
rtmp2,
ref info,
..
} => {
// This sequence is *one* instruction in the vcode, and is expanded only here at
// emission time, because we cannot allow the regalloc to insert spills/reloads in
// the middle; we depend on hardcoded PC-rel addressing below.
// Save index in a tmp (the live range of ridx only goes to start of this
// sequence; rtmp1 or rtmp2 may overwrite it).
let inst = Inst::gen_move(rtmp2, ridx, I64);
inst.emit(sink, flags, state);
// Load address of jump table
let inst = Inst::Adr { rd: rtmp1, off: 16 };
inst.emit(sink, flags, state);
// Load value out of jump table
let inst = Inst::SLoad32 {
rd: rtmp2,
mem: MemArg::reg_plus_reg_scaled_extended(
rtmp1.to_reg(),
rtmp2.to_reg(),
I32,
ExtendOp::UXTW,
),
srcloc: None, // can't cause a user trap.
};
inst.emit(sink, flags, state);
// Add base of jump table to jump-table-sourced block offset
let inst = Inst::AluRRR {
alu_op: ALUOp::Add64,
rd: rtmp1,
rn: rtmp1.to_reg(),
rm: rtmp2.to_reg(),
};
inst.emit(sink, flags, state);
// Branch to computed address. (`targets` here is only used for successor queries
// and is not needed for emission.)
let inst = Inst::IndirectBr {
rn: rtmp1.to_reg(),
targets: vec![],
};
inst.emit(sink, flags, state);
// Emit jump table (table of 32-bit offsets).
let jt_off = sink.cur_offset();
for &target in info.targets.iter() {
let word_off = sink.cur_offset();
let off_into_table = word_off - jt_off;
sink.use_label_at_offset(
word_off,
target.as_label().unwrap(),
LabelUse::PCRel32,
);
sink.put4(off_into_table);
}
// Lowering produces an EmitIsland before using a JTSequence, so we can safely
// disable the worst-case-size check in this case.
start_off = sink.cur_offset();
}
&Inst::LoadConst64 { rd, const_data } => {
let inst = Inst::ULoad64 {
rd,
mem: MemArg::Label(MemLabel::PCRel(8)),
srcloc: None, // can't cause a user trap.
};
inst.emit(sink, flags, state);
let inst = Inst::Jump {
dest: BranchTarget::ResolvedOffset(12),
};
inst.emit(sink, flags, state);
sink.put8(const_data);
}
&Inst::LoadExtName {
rd,
ref name,
offset,
srcloc,
} => {
let inst = Inst::ULoad64 {
rd,
mem: MemArg::Label(MemLabel::PCRel(8)),
srcloc: None, // can't cause a user trap.
};
inst.emit(sink, flags, state);
let inst = Inst::Jump {
dest: BranchTarget::ResolvedOffset(12),
};
inst.emit(sink, flags, state);
sink.add_reloc(srcloc, Reloc::Abs8, name, offset);
if flags.emit_all_ones_funcaddrs() {
sink.put8(u64::max_value());
} else {
sink.put8(0);
}
}
&Inst::LoadAddr { rd, ref mem } => {
let (mem_insts, mem) = mem_finalize(sink.cur_offset(), mem, state);
for inst in mem_insts.into_iter() {
inst.emit(sink, flags, state);
}
let (reg, offset) = match mem {
MemArg::Unscaled(r, simm9) => (r, simm9.value()),
MemArg::UnsignedOffset(r, uimm12scaled) => (r, uimm12scaled.value() as i32),
_ => panic!("Unsupported case for LoadAddr: {:?}", mem),
};
let abs_offset = if offset < 0 {
-offset as u64
} else {
offset as u64
};
let alu_op = if offset < 0 {
ALUOp::Sub64
} else {
ALUOp::Add64
};
if offset == 0 {
let mov = Inst::mov(rd, reg);
mov.emit(sink, flags, state);
} else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) {
let add = Inst::AluRRImm12 {
alu_op,
rd,
rn: reg,
imm12,
};
add.emit(sink, flags, state);
} else {
// Use `tmp2` here: `reg` may be `spilltmp` if the `MemArg` on this instruction
// was initially an `SPOffset`. Assert that `tmp2` is truly free to use. Note
// that no other instructions will be inserted here (we're emitting directly),
// and a live range of `tmp2` should not span this instruction, so this use
// should otherwise be correct.
debug_assert!(rd.to_reg() != tmp2_reg());
debug_assert!(reg != tmp2_reg());
let tmp = writable_tmp2_reg();
for insn in Inst::load_constant(tmp, abs_offset).into_iter() {
insn.emit(sink, flags, state);
}
let add = Inst::AluRRR {
alu_op,
rd,
rn: reg,
rm: tmp.to_reg(),
};
add.emit(sink, flags, state);
}
}
&Inst::VirtualSPOffsetAdj { offset } => {
debug!(
"virtual sp offset adjusted by {} -> {}",
offset,
state.virtual_sp_offset + offset
);
state.virtual_sp_offset += offset;
}
&Inst::EmitIsland { needed_space } => {
if sink.island_needed(needed_space + 4) {
let jump_around_label = sink.get_label();
let jmp = Inst::Jump {
dest: BranchTarget::Label(jump_around_label),
};
jmp.emit(sink, flags, state);
sink.emit_island();
sink.bind_label(jump_around_label);
}
}
}
let end_off = sink.cur_offset();
debug_assert!((end_off - start_off) <= Inst::worst_case_size());
}
}
| 40.909145 | 106 | 0.434774 |
e8da38bd13af3da72966b57a170e17643c453ba1
| 3,084 |
use core::fmt;
use core::result::Result::Ok;
bitflags! {
pub struct FilePermissions: u32 {
const PF_R = 0o4;
const PF_W = 0o2;
const PF_X = 0o1;
}
}
bitflags! {
pub struct PermissionsMask: u32 {
const S_IRUSR = 0o0400;
const S_IWUSR = 0o0200;
const S_IXUSR = 0o0100;
const S_IRGRP = 0o0040;
const S_IWGRP = 0o0020;
const S_IXGRP = 0o0010;
const S_IROTH = 0o0004;
const S_IWOTH = 0o0002;
const S_IXOTH = 0o0001;
}
}
impl PermissionsMask {
fn _get_bits(&self, shift: u8) -> FilePermissions {
FilePermissions{bits: (self.bits >> shift) & 0o7}
}
pub fn user(&self) -> FilePermissions { self._get_bits(6) }
pub fn group(&self) -> FilePermissions { self._get_bits(3) }
pub fn other(&self) -> FilePermissions { self._get_bits(0) }
pub fn build(v: u32) -> Self {
Self { bits: v & 0o777 }
}
}
impl fmt::Display for FilePermissions {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let perms = [(FilePermissions::PF_R, "r"), (FilePermissions::PF_W, "w"), (FilePermissions::PF_X, "x")];
let formatted = perms.iter().map(|(p, s)| if self.contains(*p) {s} else {"-"}).collect::<Vec<&str>>().join("");
write!(f, "{}", formatted)
}
}
impl fmt::Display for PermissionsMask{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.user())?;
write!(f, "{}", self.group())?;
write!(f, "{}", self.other())
}
}
#[cfg(test)]
mod tests {
use super::{FilePermissions, PermissionsMask};
#[test]
fn test_file_permissions_values() {
assert_eq!(FilePermissions::PF_R.bits, 4);
assert_eq!(FilePermissions::PF_X.bits, 1);
assert_eq!(FilePermissions::PF_W.bits, 2);
assert_eq!((FilePermissions::PF_W | FilePermissions::PF_X | FilePermissions::PF_R).bits, 7);
}
#[test]
fn test_file_permissions_format() {
assert_eq!(format!("{}", (FilePermissions::PF_R)), "r--");
assert_eq!(format!("{}", (FilePermissions::PF_W)), "-w-");
assert_eq!(format!("{}", (FilePermissions::PF_R | FilePermissions::PF_W)), "rw-");
assert_eq!(format!("{}", (FilePermissions::PF_X)), "--x");
assert_eq!(format!("{}", (FilePermissions::PF_R | FilePermissions::PF_X)), "r-x");
assert_eq!(format!("{}", (FilePermissions::PF_W | FilePermissions::PF_X)), "-wx");
assert_eq!(format!("{}", (FilePermissions::PF_R | FilePermissions::PF_W | FilePermissions::PF_X)), "rwx");
}
#[test]
fn test_permissions_mask_groups() {
for i in 0..0o1000 {
let mask = PermissionsMask::build(i);
assert_eq!(mask.bits, (mask.user().bits << 6 | mask.group().bits << 3 | mask.other().bits));
}
}
#[test]
fn test_permissions_format() {
for i in 0..0o1000 {
let mask = PermissionsMask::build(i);
assert_eq!(format!("{}", mask), format!("{}{}{}", mask.user(), mask.group(), mask.other()));
}
}
}
| 32.808511 | 119 | 0.571012 |
d999e9c32e61b330a20058f3a660ab38a2debba2
| 7,078 |
// Copyright Materialize, Inc. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
use futures::SinkExt;
use sql::ast::{Raw, Statement};
use sql::plan::Params;
use crate::command::{
Command, ExecuteResponse, NoSessionExecuteResponse, Response, StartupMessage,
};
use crate::error::CoordError;
use crate::session::{EndTransactionAction, Session};
/// A client for a [`Coordinator`](crate::Coordinator).
///
/// A client is a simple handle to a communication channel with the coordinator.
/// They can be cheaply cloned.
#[derive(Debug, Clone)]
pub struct Client {
cmd_tx: futures::channel::mpsc::UnboundedSender<Command>,
}
impl Client {
/// Constructs a new client.
pub fn new(cmd_tx: futures::channel::mpsc::UnboundedSender<Command>) -> Client {
Client { cmd_tx }
}
/// Binds this client to a session.
pub fn for_session(&self, session: Session) -> SessionClient {
SessionClient {
inner: self.clone(),
session: Some(session),
started_up: false,
}
}
/// Dumps the catalog to a JSON string.
pub async fn dump_catalog(&mut self) -> String {
self.send(|tx| Command::DumpCatalog { tx }).await
}
/// Executes a statement as the system user that is not tied to a session.
///
/// This will execute in a pseudo session that is not able to create any
/// temporary resources that would normally need to be cleaned up by Terminate.
pub async fn execute(
&mut self,
stmt: Statement<Raw>,
params: Params,
) -> Result<NoSessionExecuteResponse, CoordError> {
self.send(|tx| Command::NoSessionExecute { stmt, params, tx })
.await
}
/// Cancel the query currently running on another connection.
pub async fn cancel_request(&mut self, conn_id: u32) {
self.cmd_tx
.send(Command::CancelRequest { conn_id })
.await
.expect("coordinator unexpectedly canceled request")
}
async fn send<T, F>(&mut self, f: F) -> T
where
F: FnOnce(futures::channel::oneshot::Sender<T>) -> Command,
{
let (tx, rx) = futures::channel::oneshot::channel();
self.cmd_tx
.send(f(tx))
.await
.expect("coordinator unexpectedly gone");
rx.await.expect("coordinator unexpectedly canceled request")
}
}
/// A [`Client`] that is bound to a session.
pub struct SessionClient {
inner: Client,
// Invariant: session may only be `None` during a method call. Every public
// method must ensure that `Session` is `Some` before it returns.
session: Option<Session>,
/// Whether the coordinator has been notified of this `SessionClient` via
/// a call to `startup`.
started_up: bool,
}
impl SessionClient {
/// Notifies the coordinator of a new client session.
///
/// Returns a list of messages that are intended to be displayed to the
/// user.
///
/// Once you observe a successful response to this method, you must not call
/// it again. You must observe a successful response to this method before
/// calling any other method on the client, besides
/// [`SessionClient::terminate`].
pub async fn startup(&mut self) -> Result<Vec<StartupMessage>, CoordError> {
assert!(!self.started_up);
match self
.send(|tx, session| Command::Startup { session, tx })
.await
{
Ok(messages) => {
self.started_up = true;
Ok(messages)
}
Err(e) => Err(e),
}
}
/// Saves the specified statement as a prepared statement.
///
/// The prepared statement is saved in the connection's [`sql::Session`]
/// under the specified name.
///
/// You must have observed a successful response to
/// [`SessionClient::startup`] before calling this method.
pub async fn describe(
&mut self,
name: String,
stmt: Option<Statement<Raw>>,
param_types: Vec<Option<pgrepr::Type>>,
) -> Result<(), CoordError> {
self.send(|tx, session| Command::Describe {
name,
stmt,
param_types,
session,
tx,
})
.await
}
/// Binds a statement to a portal.
///
/// You must have observed a successful response to
/// [`SessionClient::startup`] before calling this method.
pub async fn declare(
&mut self,
name: String,
stmt: Statement<Raw>,
param_types: Vec<Option<pgrepr::Type>>,
) -> Result<(), CoordError> {
self.send(|tx, session| Command::Declare {
name,
stmt,
param_types,
session,
tx,
})
.await
}
/// Executes a previously-bound portal.
///
/// You must have observed a successful response to
/// [`SessionClient::startup`] before calling this method.
pub async fn execute(&mut self, portal_name: String) -> Result<ExecuteResponse, CoordError> {
self.send(|tx, session| Command::Execute {
portal_name,
session,
tx,
})
.await
}
/// Ends a transaction.
///
/// You must have observed a successful response to
/// [`SessionClient::startup`] before calling this method.
pub async fn end_transaction(
&mut self,
action: EndTransactionAction,
) -> Result<ExecuteResponse, CoordError> {
self.send(|tx, session| Command::Commit {
action,
session,
tx,
})
.await
}
/// Terminates this client session.
///
/// This consumes this `SessionClient`. If the coordinator was notified of
/// this client session by `startup`, then this method will clean up any
/// state on the coordinator about this session.
pub async fn terminate(mut self) {
let session = self.session.take().expect("session invariant violated");
if self.started_up {
self.inner
.cmd_tx
.send(Command::Terminate { session })
.await
.expect("coordinator unexpectedly gone");
}
}
/// Returns a mutable reference to the session bound to this client.
pub fn session(&mut self) -> &mut Session {
self.session.as_mut().unwrap()
}
async fn send<T, F>(&mut self, f: F) -> Result<T, CoordError>
where
F: FnOnce(futures::channel::oneshot::Sender<Response<T>>, Session) -> Command,
{
let session = self.session.take().expect("session invariant violated");
let res = self.inner.send(|tx| f(tx, session)).await;
self.session = Some(res.session);
res.result
}
}
| 31.882883 | 97 | 0.59805 |
defd059e851c783eca57a1626dd533aa417e6d8d
| 1,898 |
use crate::PodState;
use chrono::Utc;
use k8s_openapi::api::core::v1::ContainerState as KubeContainerState;
use k8s_openapi::api::core::v1::ContainerStateRunning as KubeContainerStateRunning;
use k8s_openapi::api::core::v1::ContainerStatus as KubeContainerStatus;
use k8s_openapi::apimachinery::pkg::apis::meta::v1::Time as KubeTime;
use kubelet::state::prelude::*;
/// The Kubelet is running the Pod.
#[derive(Default, Debug)]
pub struct Running;
#[async_trait::async_trait]
impl State<PodState> for Running {
async fn next(self: Box<Self>, _pod_state: &mut PodState, _pod: &Pod) -> Transition<PodState> {
// Wascc has no notion of exiting so we just sleep.
// I _think_ that periodically awaiting will allow the task to be interrupted.
loop {
tokio::time::delay_for(std::time::Duration::from_secs(10)).await;
}
}
async fn json_status(
&self,
_pod_state: &mut PodState,
pod: &Pod,
) -> anyhow::Result<serde_json::Value> {
let ts = Utc::now();
let container_statuses: Vec<KubeContainerStatus> = pod
.containers()
.iter()
.map(|container| {
let state = KubeContainerState {
running: Some(KubeContainerStateRunning {
started_at: Some(KubeTime(ts)),
}),
..Default::default()
};
KubeContainerStatus {
name: container.name().to_string(),
ready: true,
started: Some(true),
state: Some(state),
..Default::default()
}
})
.collect();
Ok(make_status_with_containers(
Phase::Running,
"Running",
container_statuses,
vec![],
))
}
}
| 33.892857 | 99 | 0.554268 |
de903e7d47272a3c34450a30a95e77a63110c3b9
| 297 |
//!
//! The Solidity memory constants.
//!
/// The `keccak256` scratch space offset (cells).
pub const OFFSET_SCRATCH_SPACE: usize = 0;
/// The memory pointer offset (cells).
pub const OFFSET_MEMORY_POINTER: usize = 2;
/// The empty slot offset (cells).
pub const OFFSET_EMPTY_SLOT: usize = 3;
| 22.846154 | 49 | 0.710438 |
dbba723f5af92aef07a9b1a271fca24219c0a2dc
| 64 |
pub mod delete;
pub mod insert;
pub mod select;
pub mod update;
| 12.8 | 15 | 0.75 |
87b5d1b0453f1d32a1b4caa7d364880f9c16fbcc
| 9,172 |
use colored::*;
use comfy_table::presets::UTF8_HORIZONTAL_BORDERS_ONLY;
use comfy_table::{Cell, CellAlignment, ContentArrangement, Table};
use directories::UserDirs;
use flexi_logger::LogSpecBuilder;
use log::error;
use crate::{cli::AppConfig, errors::PaperoniError};
pub fn display_summary(
initial_article_count: usize,
succesful_articles_table: Table,
partial_downloads_count: usize,
errors: Vec<PaperoniError>,
) {
let successfully_downloaded_count =
initial_article_count - partial_downloads_count - errors.len();
println!(
"{}",
short_summary(DownloadCount::new(
initial_article_count,
successfully_downloaded_count,
partial_downloads_count,
errors.len()
))
.bold()
);
if successfully_downloaded_count > 0 {
println!("{}", succesful_articles_table);
}
if !errors.is_empty() {
println!("\n{}", "Failed article downloads".bright_red().bold());
let mut table_failed = Table::new();
table_failed
.load_preset(UTF8_HORIZONTAL_BORDERS_ONLY)
.set_header(vec![
Cell::new("Link").set_alignment(CellAlignment::Center),
Cell::new("Reason").set_alignment(CellAlignment::Center),
])
.set_content_arrangement(ContentArrangement::Dynamic);
for error in errors {
let error_source = error
.article_source()
.clone()
.unwrap_or_else(|| "<unknown link>".to_string());
table_failed.add_row(vec![&error_source, &format!("{}", error.kind())]);
error!("{}\n - {}", error, error_source);
}
println!("{}", table_failed);
}
}
/// Returns a string summary of the total number of failed and successful article downloads
fn short_summary(download_count: DownloadCount) -> String {
// TODO: Refactor this
if download_count.total
!= download_count.successful + download_count.failed + download_count.partial
{
panic!("initial_count must be equal to the sum of failed and successful count")
}
let get_noun = |count: usize| if count == 1 { "article" } else { "articles" };
if download_count.successful == download_count.total && download_count.successful == 1 {
"Article downloaded successfully".green().to_string()
} else if download_count.total == download_count.failed && download_count.failed == 1 {
"Article failed to download".red().to_string()
} else if download_count.total == download_count.partial && download_count.partial == 1 {
"Article partially failed to download".yellow().to_string()
} else if download_count.successful == download_count.total {
"All articles downloaded successfully".green().to_string()
} else if download_count.failed == download_count.total {
"All articles failed to download".red().to_string()
} else if download_count.partial == download_count.total {
"All articles partially failed to download"
.yellow()
.to_string()
} else if download_count.partial == 0 {
format!(
"{} {} downloaded successfully, {} {} failed",
download_count.successful,
get_noun(download_count.successful),
download_count.failed,
get_noun(download_count.failed)
)
.yellow()
.to_string()
} else if download_count.successful == 0
&& download_count.partial > 0
&& download_count.failed > 0
{
format!(
"{} {} partially failed to download, {} {} failed",
download_count.partial,
get_noun(download_count.partial),
download_count.failed,
get_noun(download_count.failed)
)
.yellow()
.to_string()
} else if download_count.failed == 0
&& download_count.successful > 0
&& download_count.partial > 0
{
format!(
"{} {} downloaded successfully, {} {} partially failed to download",
download_count.successful,
get_noun(download_count.successful),
download_count.partial,
get_noun(download_count.partial)
)
.yellow()
.to_string()
} else {
format!(
"{} {} downloaded successfully, {} {} partially failed to download, {} {} failed",
download_count.successful,
get_noun(download_count.successful),
download_count.partial,
get_noun(download_count.partial),
download_count.failed,
get_noun(download_count.failed)
)
.yellow()
.to_string()
}
}
struct DownloadCount {
total: usize,
successful: usize,
partial: usize,
failed: usize,
}
impl DownloadCount {
fn new(total: usize, successful: usize, partial: usize, failed: usize) -> Self {
Self {
total,
successful,
partial,
failed,
}
}
}
pub fn init_logger(app_config: &AppConfig) {
match UserDirs::new() {
Some(user_dirs) => {
let home_dir = user_dirs.home_dir();
let paperoni_dir = home_dir.join(".paperoni");
let log_dir = paperoni_dir.join("logs");
let log_spec = LogSpecBuilder::new()
.module("paperoni", app_config.log_level())
.build();
let formatted_timestamp = app_config.start_time().format("%Y-%m-%d_%H-%M-%S");
let mut logger = flexi_logger::Logger::with(log_spec);
if app_config.is_logging_to_file() && (!paperoni_dir.is_dir() || !log_dir.is_dir()) {
match std::fs::create_dir_all(&log_dir) {
Ok(_) => (),
Err(e) => {
eprintln!("Unable to create paperoni directories on home directory for logging purposes\n{}",e);
std::process::exit(1);
}
};
}
if app_config.is_logging_to_file() {
logger = logger
.directory(log_dir)
.discriminant(formatted_timestamp.to_string())
.suppress_timestamp()
.log_to_file();
}
match logger.start() {
Ok(_) => (),
Err(e) => eprintln!("Unable to start logger!\n{}", e),
}
}
None => eprintln!("Unable to get user directories for logging purposes"),
};
}
#[cfg(test)]
mod tests {
use super::{short_summary, DownloadCount};
use colored::*;
#[test]
fn test_short_summary() {
assert_eq!(
short_summary(DownloadCount::new(1, 1, 0, 0)),
"Article downloaded successfully".green().to_string()
);
assert_eq!(
short_summary(DownloadCount::new(1, 0, 0, 1)),
"Article failed to download".red().to_string()
);
assert_eq!(
short_summary(DownloadCount::new(10, 10, 0, 0)),
"All articles downloaded successfully".green().to_string()
);
assert_eq!(
short_summary(DownloadCount::new(10, 0, 0, 10)),
"All articles failed to download".red().to_string()
);
assert_eq!(
short_summary(DownloadCount::new(10, 8, 0, 2)),
"8 articles downloaded successfully, 2 articles failed"
.yellow()
.to_string()
);
assert_eq!(
short_summary(DownloadCount::new(10, 1, 0, 9)),
"1 article downloaded successfully, 9 articles failed"
.yellow()
.to_string()
);
assert_eq!(
short_summary(DownloadCount::new(7, 6, 0, 1)),
"6 articles downloaded successfully, 1 article failed"
.yellow()
.to_string()
);
assert_eq!(
short_summary(DownloadCount::new(7, 4, 2, 1)),
"4 articles downloaded successfully, 2 articles partially failed to download, 1 article failed"
.yellow()
.to_string()
);
assert_eq!(
short_summary(DownloadCount::new(12, 6, 6, 0)),
"6 articles downloaded successfully, 6 articles partially failed to download"
.yellow()
.to_string()
);
assert_eq!(
short_summary(DownloadCount::new(5, 0, 4, 1)),
"4 articles partially failed to download, 1 article failed"
.yellow()
.to_string()
);
assert_eq!(
short_summary(DownloadCount::new(4, 0, 4, 0)),
"All articles partially failed to download"
.yellow()
.to_string()
);
}
#[test]
#[should_panic(
expected = "initial_count must be equal to the sum of failed and successful count"
)]
fn test_short_summary_panics_on_invalid_input() {
short_summary(DownloadCount::new(0, 12, 0, 43));
}
}
| 35.141762 | 120 | 0.563127 |
1e0bf73fa169ce628539128c93c6e37b759e85ba
| 1,111 |
use serde::Serialize;
use crate::toggl::Project;
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct AlfredText {
copy: String,
large_type: String,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct AlfredItem {
title: String,
// subtitle: Option<String>,
arg: String,
text: AlfredText,
}
impl AlfredItem {
pub fn from_project(project: &Project) -> Self {
Self {
title: project.name.clone(),
// subtitle: None,
arg: project.id.to_string(),
text: AlfredText {
copy: project.name.clone(),
large_type: project.name.clone(),
},
}
}
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct AlfredFormat {
pub items: Vec<AlfredItem>,
}
impl AlfredFormat {
pub fn from_projects(projects: &Vec<Project>) -> Self {
let mut items: Vec<AlfredItem> = vec![];
for project in projects {
items.push(AlfredItem::from_project(&project));
}
Self { items }
}
}
| 21.784314 | 59 | 0.581458 |
eff78e79431469d4bc917b9b78bfc7d6d62509d5
| 2,525 |
use super::*;
use crate::lkm::structs::LoadedModule;
use alloc::string::String;
use alloc::sync::Arc;
use core::alloc::{GlobalAlloc, Layout};
use core::slice::from_raw_parts;
pub fn get_module(this_module: usize) -> &'static mut LoadedModule {
unsafe {
let ptr = this_module as *mut LoadedModule;
&mut (*ptr) as &'static mut LoadedModule
}
}
pub unsafe fn cstr_to_str(ptr: *const u8, max_size: usize) -> String {
(0..max_size)
.find(|&i| ptr.offset(i as isize).read() == 0)
.and_then(|len| core::str::from_utf8(core::slice::from_raw_parts(ptr, len)).ok())
.map(|s| String::from(s))
.unwrap()
}
#[no_mangle]
pub extern "C" fn lkm_api_pong() -> usize {
println!("Pong from Kernel Module!");
println!(
"This indicates that a kernel module is successfully loaded into kernel and called a stub."
);
114514
}
#[no_mangle]
pub extern "C" fn lkm_api_debug(this_module: usize) {
let module = get_module(this_module);
module.lock.lock();
println!(
"[LKM] Current module info: name={} version={} api_version={}\nref_count={} dep_count={}",
module.info.name,
module.info.version,
module.info.api_version,
Arc::strong_count(&module.using_counts),
module.used_counts
);
}
#[no_mangle]
pub extern "C" fn lkm_api_query_symbol(symbol: *const u8) -> usize {
manager::ModuleManager::with(|man| {
match man.resolve_symbol(&unsafe { cstr_to_str(symbol, 256) }) {
Some(x) => x,
None => 0,
}
})
}
#[no_mangle]
pub extern "C" fn lkm_api_kmalloc(size: usize) -> usize {
unsafe { crate::HEAP_ALLOCATOR.alloc(Layout::from_size_align(size, 8).unwrap()) as usize }
}
#[no_mangle]
pub extern "C" fn lkm_api_kfree(ptr: usize, size: usize) {
unsafe {
crate::HEAP_ALLOCATOR.dealloc(ptr as *mut u8, Layout::from_size_align(size, 8).unwrap());
}
}
#[no_mangle]
pub extern "C" fn lkm_api_info(ptr: *const u8) {
let text = unsafe { cstr_to_str(ptr, 1024) };
info!("{}", text);
}
#[no_mangle]
pub extern "C" fn lkm_api_add_kernel_symbols(start: usize, end: usize) {
use crate::lkm::manager::LKM_MANAGER;
let length = end - start;
use core::str::from_utf8;
let symbols = unsafe { from_utf8(from_raw_parts(start as *const u8, length)) }.unwrap();
let global_lkmm = &LKM_MANAGER;
let mut locked_lkmm = global_lkmm.lock();
let lkmm = locked_lkmm.as_mut().unwrap();
lkmm.init_kernel_symbols(symbols);
}
| 29.705882 | 99 | 0.639208 |
01a37ad1a39f29100ecf42ec986825d8bebdf648
| 857 |
pub struct LolConfig {}
impl LolConfig {
pub fn create_with_hud_scale(global_scale: &str) -> Self {
use std::io::Write;
std::fs::create_dir_all("../target/Config").expect("Unable to create config dir");
let mut file = std::fs::File::create("../target/Config/game.cfg")
.expect("Unable to create config game.cfg");
file.write_all(
format!(
r#"
[General]
Colors=32
Height=720
Width=1280
CfgVersion=9.24.4052
[HUD]
ShowAllChannelChat=0
GlobalScale={}
MinimapScale=1.0000"#,
global_scale
)
.as_bytes(),
)
.expect("Unable to write to game.cfg");
Self {}
}
}
impl Drop for LolConfig {
fn drop(&mut self) {
std::fs::remove_file("../target/Config/game.cfg").expect("game.cfg cannot be removed");
}
}
| 22.552632 | 95 | 0.576429 |
b98c975ba43008077c6492c22e7ef4ddbc57f40d
| 902 |
use crate::linter::{Rule, RuleResult};
use sv_parser::{AlwaysKeyword, NodeEvent, RefNode, SyntaxTree};
#[derive(Default)]
pub struct LegacyAlways;
impl Rule for LegacyAlways {
fn check(&mut self, _syntax_tree: &SyntaxTree, event: &NodeEvent) -> RuleResult {
let node = match event {
NodeEvent::Enter(x) => x,
NodeEvent::Leave(_) => {
return RuleResult::Pass;
}
};
match node {
RefNode::AlwaysKeyword(AlwaysKeyword::Always(_)) => RuleResult::Fail,
_ => RuleResult::Pass,
}
}
fn name(&self) -> String {
String::from("legacy_always")
}
fn hint(&self) -> String {
String::from("`always_comb`/`always_ff`/`always_latch` must be used")
}
fn reason(&self) -> String {
String::from("`always` can't detect blocking/non-blocking mistake")
}
}
| 27.333333 | 85 | 0.575388 |
2671b5682ebd235ad665e74090e2592c6ecef0ab
| 6,870 |
#[doc = "Reader of register FSEC"]
pub type R = crate::R<u8, super::FSEC>;
#[doc = "Flash Security\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SEC_A {
#[doc = "0: no description available"]
SEC_0,
#[doc = "1: no description available"]
SEC_1,
#[doc = "2: MCU security status is unsecure (The standard shipping condition of the flash module is unsecure.)"]
SEC_2,
#[doc = "3: no description available"]
SEC_3,
}
impl From<SEC_A> for u8 {
#[inline(always)]
fn from(variant: SEC_A) -> Self {
match variant {
SEC_A::SEC_0 => 0,
SEC_A::SEC_1 => 1,
SEC_A::SEC_2 => 2,
SEC_A::SEC_3 => 3,
}
}
}
#[doc = "Reader of field `SEC`"]
pub type SEC_R = crate::R<u8, SEC_A>;
impl SEC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SEC_A {
match self.bits {
0 => SEC_A::SEC_0,
1 => SEC_A::SEC_1,
2 => SEC_A::SEC_2,
3 => SEC_A::SEC_3,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `SEC_0`"]
#[inline(always)]
pub fn is_sec_0(&self) -> bool {
*self == SEC_A::SEC_0
}
#[doc = "Checks if the value of the field is `SEC_1`"]
#[inline(always)]
pub fn is_sec_1(&self) -> bool {
*self == SEC_A::SEC_1
}
#[doc = "Checks if the value of the field is `SEC_2`"]
#[inline(always)]
pub fn is_sec_2(&self) -> bool {
*self == SEC_A::SEC_2
}
#[doc = "Checks if the value of the field is `SEC_3`"]
#[inline(always)]
pub fn is_sec_3(&self) -> bool {
*self == SEC_A::SEC_3
}
}
#[doc = "Factory Security Level Access Code\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FSLACC_A {
#[doc = "0: Factory access granted"]
FSLACC_0,
#[doc = "1: Factory access denied"]
FSLACC_1,
#[doc = "2: Factory access denied"]
FSLACC_2,
#[doc = "3: Factory access granted"]
FSLACC_3,
}
impl From<FSLACC_A> for u8 {
#[inline(always)]
fn from(variant: FSLACC_A) -> Self {
match variant {
FSLACC_A::FSLACC_0 => 0,
FSLACC_A::FSLACC_1 => 1,
FSLACC_A::FSLACC_2 => 2,
FSLACC_A::FSLACC_3 => 3,
}
}
}
#[doc = "Reader of field `FSLACC`"]
pub type FSLACC_R = crate::R<u8, FSLACC_A>;
impl FSLACC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> FSLACC_A {
match self.bits {
0 => FSLACC_A::FSLACC_0,
1 => FSLACC_A::FSLACC_1,
2 => FSLACC_A::FSLACC_2,
3 => FSLACC_A::FSLACC_3,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `FSLACC_0`"]
#[inline(always)]
pub fn is_fslacc_0(&self) -> bool {
*self == FSLACC_A::FSLACC_0
}
#[doc = "Checks if the value of the field is `FSLACC_1`"]
#[inline(always)]
pub fn is_fslacc_1(&self) -> bool {
*self == FSLACC_A::FSLACC_1
}
#[doc = "Checks if the value of the field is `FSLACC_2`"]
#[inline(always)]
pub fn is_fslacc_2(&self) -> bool {
*self == FSLACC_A::FSLACC_2
}
#[doc = "Checks if the value of the field is `FSLACC_3`"]
#[inline(always)]
pub fn is_fslacc_3(&self) -> bool {
*self == FSLACC_A::FSLACC_3
}
}
#[doc = "Mass Erase Enable Bits\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum MEEN_A {
#[doc = "0: Mass erase is enabled"]
MEEN_0,
#[doc = "1: Mass erase is enabled"]
MEEN_1,
#[doc = "2: no description available"]
MEEN_2,
#[doc = "3: Mass erase is enabled"]
MEEN_3,
}
impl From<MEEN_A> for u8 {
#[inline(always)]
fn from(variant: MEEN_A) -> Self {
match variant {
MEEN_A::MEEN_0 => 0,
MEEN_A::MEEN_1 => 1,
MEEN_A::MEEN_2 => 2,
MEEN_A::MEEN_3 => 3,
}
}
}
#[doc = "Reader of field `MEEN`"]
pub type MEEN_R = crate::R<u8, MEEN_A>;
impl MEEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> MEEN_A {
match self.bits {
0 => MEEN_A::MEEN_0,
1 => MEEN_A::MEEN_1,
2 => MEEN_A::MEEN_2,
3 => MEEN_A::MEEN_3,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `MEEN_0`"]
#[inline(always)]
pub fn is_meen_0(&self) -> bool {
*self == MEEN_A::MEEN_0
}
#[doc = "Checks if the value of the field is `MEEN_1`"]
#[inline(always)]
pub fn is_meen_1(&self) -> bool {
*self == MEEN_A::MEEN_1
}
#[doc = "Checks if the value of the field is `MEEN_2`"]
#[inline(always)]
pub fn is_meen_2(&self) -> bool {
*self == MEEN_A::MEEN_2
}
#[doc = "Checks if the value of the field is `MEEN_3`"]
#[inline(always)]
pub fn is_meen_3(&self) -> bool {
*self == MEEN_A::MEEN_3
}
}
#[doc = "Backdoor Key Security Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum KEYEN_A {
#[doc = "0: Backdoor key access disabled"]
KEYEN_0,
#[doc = "1: Backdoor key access disabled (preferred KEYEN state to disable backdoor key access)"]
KEYEN_1,
#[doc = "2: Backdoor key access enabled"]
KEYEN_2,
#[doc = "3: Backdoor key access disabled"]
KEYEN_3,
}
impl From<KEYEN_A> for u8 {
#[inline(always)]
fn from(variant: KEYEN_A) -> Self {
match variant {
KEYEN_A::KEYEN_0 => 0,
KEYEN_A::KEYEN_1 => 1,
KEYEN_A::KEYEN_2 => 2,
KEYEN_A::KEYEN_3 => 3,
}
}
}
#[doc = "Reader of field `KEYEN`"]
pub type KEYEN_R = crate::R<u8, KEYEN_A>;
impl KEYEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> KEYEN_A {
match self.bits {
0 => KEYEN_A::KEYEN_0,
1 => KEYEN_A::KEYEN_1,
2 => KEYEN_A::KEYEN_2,
3 => KEYEN_A::KEYEN_3,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `KEYEN_0`"]
#[inline(always)]
pub fn is_keyen_0(&self) -> bool {
*self == KEYEN_A::KEYEN_0
}
#[doc = "Checks if the value of the field is `KEYEN_1`"]
#[inline(always)]
pub fn is_keyen_1(&self) -> bool {
*self == KEYEN_A::KEYEN_1
}
#[doc = "Checks if the value of the field is `KEYEN_2`"]
#[inline(always)]
pub fn is_keyen_2(&self) -> bool {
*self == KEYEN_A::KEYEN_2
}
#[doc = "Checks if the value of the field is `KEYEN_3`"]
#[inline(always)]
pub fn is_keyen_3(&self) -> bool {
*self == KEYEN_A::KEYEN_3
}
}
impl R {
#[doc = "Bits 0:1 - Flash Security"]
#[inline(always)]
pub fn sec(&self) -> SEC_R {
SEC_R::new((self.bits & 0x03) as u8)
}
#[doc = "Bits 2:3 - Factory Security Level Access Code"]
#[inline(always)]
pub fn fslacc(&self) -> FSLACC_R {
FSLACC_R::new(((self.bits >> 2) & 0x03) as u8)
}
#[doc = "Bits 4:5 - Mass Erase Enable Bits"]
#[inline(always)]
pub fn meen(&self) -> MEEN_R {
MEEN_R::new(((self.bits >> 4) & 0x03) as u8)
}
#[doc = "Bits 6:7 - Backdoor Key Security Enable"]
#[inline(always)]
pub fn keyen(&self) -> KEYEN_R {
KEYEN_R::new(((self.bits >> 6) & 0x03) as u8)
}
}
| 26.731518 | 114 | 0.594614 |
018d4d701d37f3479f120ca75527175c7e859ebf
| 111 |
use crate::model::parser::Parser;
pub trait Cache {
fn get_parser_token<P: Parser>() -> Option<String>;
}
| 18.5 | 55 | 0.675676 |
7592b50cc17e0287946ac4353dd4268d1bbd1aa6
| 3,190 |
use crate::z3::ast;
use ast::Ast;
use std::convert::TryInto;
use std::ffi::CStr;
use std::fmt;
use z3_sys::*;
use crate::z3::{Context, FuncDecl, Sort, Symbol, Z3_MUTEX};
impl<'ctx> FuncDecl<'ctx> {
pub fn new<S: Into<Symbol>>(
ctx: &'ctx Context,
name: S,
domain: &[&Sort<'ctx>],
range: &Sort<'ctx>,
) -> Self {
assert!(domain.iter().all(|s| s.ctx.z3_ctx == ctx.z3_ctx));
assert_eq!(ctx.z3_ctx, range.ctx.z3_ctx);
let domain: Vec<_> = domain.iter().map(|s| s.z3_sort).collect();
unsafe {
Self::from_raw(
ctx,
Z3_mk_func_decl(
ctx.z3_ctx,
name.into().as_z3_symbol(ctx),
domain.len().try_into().unwrap(),
domain.as_ptr(),
range.z3_sort,
),
)
}
}
pub unsafe fn from_raw(ctx: &'ctx Context, z3_func_decl: Z3_func_decl) -> Self {
let guard = Z3_MUTEX.lock().unwrap();
Z3_inc_ref(ctx.z3_ctx, Z3_func_decl_to_ast(ctx.z3_ctx, z3_func_decl));
Self { ctx, z3_func_decl }
}
/// Return the number of arguments of a function declaration.
///
/// If the function declaration is a constant, then the arity is `0`.
///
/// ```
/// # use z3::{Config, Context, FuncDecl, Solver, Sort, Symbol};
/// # let cfg = Config::new();
/// # let ctx = Context::new(&cfg);
/// let f = FuncDecl::new(
/// &ctx,
/// "f",
/// &[&Sort::int(&ctx), &Sort::real(&ctx)],
/// &Sort::int(&ctx));
/// assert_eq!(f.arity(), 2);
/// ```
pub fn arity(&self) -> usize {
unsafe { Z3_get_arity(self.ctx.z3_ctx, self.z3_func_decl) as usize }
}
/// Create a constant (if `args` has length 0) or function application (otherwise).
///
/// Note that `args` should have the types corresponding to the `domain` of the `FuncDecl`.
pub fn apply(&self, args: &[&ast::Dynamic<'ctx>]) -> ast::Dynamic<'ctx> {
assert!(args.iter().all(|s| s.get_ctx().z3_ctx == self.ctx.z3_ctx));
let args: Vec<_> = args.iter().map(|a| a.get_z3_ast()).collect();
ast::Dynamic::new(self.ctx, unsafe {
let guard = Z3_MUTEX.lock().unwrap();
Z3_mk_app(
self.ctx.z3_ctx,
self.z3_func_decl,
args.len().try_into().unwrap(),
args.as_ptr(),
)
})
}
}
impl<'ctx> fmt::Display for FuncDecl<'ctx> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let p = unsafe { Z3_func_decl_to_string(self.ctx.z3_ctx, self.z3_func_decl) };
if p.is_null() {
return Result::Err(fmt::Error);
}
match unsafe { CStr::from_ptr(p) }.to_str() {
Ok(s) => write!(f, "{}", s),
Err(_) => Result::Err(fmt::Error),
}
}
}
impl<'ctx> Drop for FuncDecl<'ctx> {
fn drop(&mut self) {
unsafe {
Z3_dec_ref(
self.ctx.z3_ctx,
Z3_func_decl_to_ast(self.ctx.z3_ctx, self.z3_func_decl),
);
}
}
}
| 30.380952 | 95 | 0.509091 |
2f7a6ffc63fad0e6ed62f40acb6572cafd7c2884
| 4,241 |
use weasel::actor::{Actor, ActorRules};
use weasel::battle::{Battle, BattleController, BattleRules};
use weasel::character::{Character, CharacterRules};
use weasel::entropy::{Entropy, ResetEntropy};
use weasel::event::EventTrigger;
use weasel::metric::WriteMetrics;
use weasel::rules::ability::SimpleAbility;
use weasel::rules::entropy::UniformDistribution;
use weasel::rules::statistic::SimpleStatistic;
use weasel::server::Server;
use weasel::{battle_rules, rules::empty::*};
#[cfg(feature = "serialization")]
mod helper;
const SEED: u64 = 1_204_678_643_940_597_513;
const TEAM_1_ID: u32 = 1;
const CREATURE_1_ID: u32 = 1;
const STAT_ID: u32 = 1;
const STAT_VALUE_MIN: i32 = 1;
const STAT_VALUE_MAX: i32 = 1000;
const STAT_VALUE: i32 = 820;
const ABILITY_ID: u32 = 1;
const ABILITY_POWER_MIN: i32 = 1;
const ABILITY_POWER_MAX: i32 = 1000;
const ABILITY_POWER: i32 = 33;
#[derive(Default)]
pub struct CustomCharacterRules {}
impl CharacterRules<CustomRules> for CustomCharacterRules {
type CreatureId = u32;
type ObjectId = ();
type Statistic = SimpleStatistic<u32, i32>;
type StatisticsSeed = ();
type StatisticsAlteration = ();
type Status = EmptyStatus;
type StatusesAlteration = ();
fn generate_statistics(
&self,
_seed: &Option<Self::StatisticsSeed>,
entropy: &mut Entropy<CustomRules>,
_metrics: &mut WriteMetrics<CustomRules>,
) -> Box<dyn Iterator<Item = Self::Statistic>> {
let value = entropy.generate(STAT_VALUE_MIN, STAT_VALUE_MAX);
let v = vec![SimpleStatistic::new(STAT_ID, value)];
Box::new(v.into_iter())
}
}
#[derive(Default)]
pub struct CustomActorRules {}
impl ActorRules<CustomRules> for CustomActorRules {
type Ability = SimpleAbility<u32, i32>;
type AbilitiesSeed = ();
type Activation = i32;
type AbilitiesAlteration = ();
fn generate_abilities(
&self,
_: &Option<Self::AbilitiesSeed>,
entropy: &mut Entropy<CustomRules>,
_metrics: &mut WriteMetrics<CustomRules>,
) -> Box<dyn Iterator<Item = Self::Ability>> {
let power = entropy.generate(ABILITY_POWER_MIN, ABILITY_POWER_MAX);
let v = vec![SimpleAbility::new(ABILITY_ID, power)];
Box::new(v.into_iter())
}
}
battle_rules! {
EmptyTeamRules,
CustomCharacterRules,
CustomActorRules,
EmptyFightRules,
EmptyUserRules,
EmptySpaceRules,
EmptyRoundsRules,
UniformDistribution<i32>
}
/// Creates a scenario with a custom entropy model, one team and a creature.
macro_rules! scenario {
() => {{
// Create the battle.
let battle = Battle::builder(CustomRules::new()).build();
let mut server = Server::builder(battle).build();
assert_eq!(
ResetEntropy::trigger(&mut server).seed(SEED).fire().err(),
None
);
// Create a team.
util::team(&mut server, TEAM_1_ID);
// Create a creature.
util::creature(&mut server, CREATURE_1_ID, TEAM_1_ID, ());
server
}};
}
/// Checks that statistics and abilities have been randomized as predicted.
macro_rules! stat_abi_randomness_check {
($server: expr) => {{
let creature = $server.battle().entities().creature(&CREATURE_1_ID);
assert!(creature.is_some());
let creature = creature.unwrap();
assert_eq!(creature.statistic(&STAT_ID).unwrap().value(), STAT_VALUE);
assert_eq!(
creature.ability(&ABILITY_ID).unwrap().power(),
ABILITY_POWER
);
}};
}
#[test]
fn use_entropy() {
let server = scenario!();
// Check that statistics and abilities have been randomized.
stat_abi_randomness_check!(server);
}
#[cfg(feature = "serialization")]
#[test]
fn entropy_reload() {
let server = scenario!();
// Check that statistics and abilities have been randomized.
stat_abi_randomness_check!(server);
// Save the battle.
let history_json = helper::history_as_json(server.battle());
// Restore the battle.
let mut server = util::server(CustomRules::new());
helper::load_json_history(&mut server, history_json);
// Verify that randomization is the same.
stat_abi_randomness_check!(server);
}
| 30.956204 | 78 | 0.668474 |
eda27337e34cb731b1fc7282a356ad2b64555f14
| 1,535 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
#[derive(Deserialize)]
pub struct Field {
pub m: String,
pub p: String,
}
#[derive(Deserialize)]
pub struct Map {
pub name: String,
}
#[derive(Deserialize)]
pub struct Point {
pub x: String,
pub y: String,
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
pub struct Bls12381Ro {
pub L: String,
pub Z: String,
pub ciphersuite: String,
pub curve: String,
pub dst: String,
pub expand: String,
pub field: Field,
pub hash: String,
pub k: String,
pub map: Map,
pub randomOracle: bool,
pub vectors: Vec<Bls12381RoVectors>,
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
pub struct Bls12381RoVectors {
pub P: Point,
pub Q0: Point,
pub Q1: Point,
pub msg: String,
pub u: Vec<String>,
}
| 24.365079 | 60 | 0.714658 |
4b0fbc5e15b423d16f068925a07371278d282ae2
| 3,535 |
use crate::utils::{is_direct_expn_of, is_expn_of, match_def_path, paths, resolve_node, span_lint};
use if_chain::if_chain;
use rustc::hir::ptr::P;
use rustc::hir::*;
use rustc::lint::{LateContext, LateLintPass, LintArray, LintPass};
use rustc::{declare_lint_pass, declare_tool_lint};
use syntax::ast::LitKind;
use syntax_pos::Span;
declare_clippy_lint! {
/// **What it does:** Checks for missing parameters in `panic!`.
///
/// **Why is this bad?** Contrary to the `format!` family of macros, there are
/// two forms of `panic!`: if there are no parameters given, the first argument
/// is not a format string and used literally. So while `format!("{}")` will
/// fail to compile, `panic!("{}")` will not.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```no_run
/// panic!("This `panic!` is probably missing a parameter there: {}");
/// ```
pub PANIC_PARAMS,
style,
"missing parameters in `panic!` calls"
}
declare_clippy_lint! {
/// **What it does:** Checks for usage of `unimplemented!`.
///
/// **Why is this bad?** This macro should not be present in production code
///
/// **Known problems:** None.
///
/// **Example:**
/// ```no_run
/// unimplemented!();
/// ```
pub UNIMPLEMENTED,
restriction,
"`unimplemented!` should not be present in production code"
}
declare_lint_pass!(PanicUnimplemented => [PANIC_PARAMS, UNIMPLEMENTED]);
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for PanicUnimplemented {
fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
if_chain! {
if let ExprKind::Block(ref block, _) = expr.kind;
if let Some(ref ex) = block.expr;
if let ExprKind::Call(ref fun, ref params) = ex.kind;
if let ExprKind::Path(ref qpath) = fun.kind;
if let Some(fun_def_id) = resolve_node(cx, qpath, fun.hir_id).opt_def_id();
if match_def_path(cx, fun_def_id, &paths::BEGIN_PANIC);
if params.len() == 2;
then {
if is_expn_of(expr.span, "unimplemented").is_some() {
let span = get_outer_span(expr);
span_lint(cx, UNIMPLEMENTED, span,
"`unimplemented` should not be present in production code");
} else {
match_panic(params, expr, cx);
}
}
}
}
}
fn get_outer_span(expr: &Expr) -> Span {
if_chain! {
if expr.span.from_expansion();
let first = expr.span.ctxt().outer_expn_data();
if first.call_site.from_expansion();
let second = first.call_site.ctxt().outer_expn_data();
then {
second.call_site
} else {
expr.span
}
}
}
fn match_panic(params: &P<[Expr]>, expr: &Expr, cx: &LateContext<'_, '_>) {
if_chain! {
if let ExprKind::Lit(ref lit) = params[0].kind;
if is_direct_expn_of(expr.span, "panic").is_some();
if let LitKind::Str(ref string, _) = lit.node;
let string = string.as_str().replace("{{", "").replace("}}", "");
if let Some(par) = string.find('{');
if string[par..].contains('}');
if params[0].span.source_callee().is_none();
if params[0].span.lo() != params[0].span.hi();
then {
span_lint(cx, PANIC_PARAMS, params[0].span,
"you probably are missing some parameter in your format string");
}
}
}
| 35.35 | 98 | 0.570014 |
910c2e30365c66b883c51723b5d7120e33ddedb2
| 5,263 |
use apng;
use apng::Encoder;
use apng::{Frame, PNGImage};
use std::cell::RefCell;
use std::io::BufWriter;
use std::rc::Rc;
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsCast;
use web_sys::{
window, Blob, BlobPropertyBag, Document, Element, Event, HtmlButtonElement, HtmlInputElement,
Url,
};
use crate::state::State;
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(js_namespace = console)]
fn log(msg: &str);
fn alert(s: &str);
}
macro_rules! console_log {
($($t:tt)*) => (log(&format_args!($($t)*).to_string()))
}
pub fn init_generate(state: &Rc<RefCell<State>>) -> Result<(), JsValue> {
let document = window()
.unwrap()
.document()
.expect("Could not find `document`");
let generate = document.get_element_by_id("generate").unwrap();
let slider = create_frame_speed_slider(&document, state)?;
generate.append_child(&slider)?;
let button = create_generate_button(&document, state)?;
generate.append_child(&button)?;
Ok(())
}
fn create_frame_speed_slider(
document: &Document,
state: &Rc<RefCell<State>>,
) -> Result<Element, JsValue> {
let div = document.create_element("div")?;
div.set_attribute("style", "width: 80%; text-align: center;")?;
div.set_inner_html("frame speed: ");
let element = document.create_element("div")?;
element.set_attribute("class", "slider-frame-speed")?;
element.set_attribute(
"style",
"display: flex; flex-direction: row; align-items: center;",
)?;
let slider = document
.create_element("input")?
.dyn_into::<HtmlInputElement>()?;
let val = document.create_element("strong")?;
val.set_inner_html(format!("{}", state.borrow().get_frame_speed()).as_str());
// set slider attribute
slider.set_attribute("class", "slider")?;
slider.set_attribute("type", "range")?;
slider.set_attribute("min", "0.01")?;
slider.set_attribute("max", "1")?;
slider.set_attribute("step", "0.01")?;
slider.set_attribute(
"value",
format!("{}", state.borrow().get_frame_speed()).as_str(),
)?;
let state = state.clone();
let val_clone = val.clone();
let handle_input = Closure::wrap(Box::new(move |e: Event| {
let target = e.target().unwrap().dyn_into::<HtmlInputElement>().unwrap();
let frame_speed = target.value();
val_clone.set_inner_html(&frame_speed);
let frame_speed: f64 = frame_speed.parse().unwrap();
state.borrow_mut().set_frame_speed(frame_speed);
}) as Box<dyn FnMut(_)>);
slider.add_event_listener_with_callback("input", handle_input.as_ref().unchecked_ref())?;
handle_input.forget();
element.append_child(&slider)?;
div.append_child(&val)?;
div.append_child(&element)?;
Ok(div)
}
fn create_generate_button(
document: &Document,
state: &Rc<RefCell<State>>,
) -> Result<HtmlButtonElement, JsValue> {
let button = document
.create_element("button")?
.dyn_into::<HtmlButtonElement>()?;
button.set_attribute("class", "btn")?;
button.set_inner_html("APNG generate");
let state = state.clone();
let handle_click = Closure::wrap(Box::new(move || {
if state.borrow().get_preview_image_len() == 0 {
alert("not added image");
return;
}
let preview_images = state.borrow().get_preview_image();
let mut png_images: Vec<PNGImage> = Vec::new();
for data in preview_images {
let v = data.to_string().replace("data:image/png;base64,", "");
let buffer = base64::decode(&v).unwrap();
let img =
image::load_from_memory_with_format(&buffer, image::ImageFormat::PNG).unwrap();
png_images.push(apng::load_dynamic_image(img).unwrap());
}
let mut buf = Vec::new();
{
let mut buf_writer = BufWriter::new(&mut buf);
let config = apng::create_config(&png_images, None).unwrap();
let mut encoder = Encoder::new(&mut buf_writer, config).unwrap();
// calculate frame speed
let frame_speed = state.borrow().get_frame_speed();
let d_num = frame_speed * (100 as f64);
let d_den = 100;
let frame = Frame {
delay_num: Some(d_num as u16),
delay_den: Some(d_den),
..Default::default()
};
match encoder.encode_all(png_images, Some(&frame)) {
Ok(_n) => log("success apng encode!!!"),
Err(err) => console_log!("{}", err),
}
}
let b = js_sys::Uint8Array::new(&unsafe { js_sys::Uint8Array::view(&buf) }.into());
let array = js_sys::Array::new();
array.push(&b.buffer());
let blob = Blob::new_with_u8_array_sequence_and_options(
&array,
BlobPropertyBag::new().type_("image/png"),
)
.unwrap();
let url = Url::create_object_url_with_blob(&blob).unwrap();
let window = window().unwrap();
window.open_with_url(&url).unwrap();
}) as Box<dyn FnMut()>);
button.set_onclick(Some(handle_click.as_ref().unchecked_ref()));
handle_click.forget();
Ok(button)
}
| 31.51497 | 97 | 0.601938 |
5d3c33be3f6479b1dcf82f0188ada9a16adbb399
| 1,095 |
use super::*;
extern crate elias_fano_rust;
pub fn bench_ef_function() {
let v = test_vector();
let start_memory = measure_mem();
let ef = elias_fano_rust::EliasFano::from_vec(&v).unwrap();
let end_memory = measure_mem();
let memory = end_memory - start_memory;
let mut rank_total_cycles = 0.0;
let mut seed = 0xdeadbeef;
for _ in 0..TIME_TRIALS {
seed = xorshift(seed);
let index = seed % SIZE;
let start = rdtsc() as f64;
ef.rank(index);
let end = rdtsc() as f64;
rank_total_cycles += end - start;
}
rank_total_cycles /= TIME_TRIALS as f64;
let mut select_total_cycles = 0.0;
let mut seed = 0xdeadbeef;
for _ in 0..TIME_TRIALS {
seed = xorshift(seed);
let index = seed % SIZE;
let start = rdtsc() as f64;
ef.select(index).unwrap();
let end = rdtsc() as f64;
select_total_cycles += end - start;
}
select_total_cycles /= TIME_TRIALS as f64;
println!("ef,{},{},{}", memory, rank_total_cycles, select_total_cycles);
}
| 24.886364 | 76 | 0.6 |
56ecf31435bfbbcc223347cd14b42d3647e1304b
| 849 |
use ::safer_ffi::prelude::*;
use crate::{decrypt_barcode, parse_decrypted};
#[ffi_export]
fn free_buf(vec: repr_c::Vec<u8>) {
drop(vec); // And that's it!
}
#[ffi_export]
fn c_decrypt_and_parse(barcode_data: repr_c::Vec<u8>) -> repr_c::Vec<u8> {
let returnval;
match decrypt_barcode(barcode_data.to_vec()) {
Ok(decrypted) => match parse_decrypted(decrypted) {
Ok(data) => returnval = serde_json::to_string(&data).unwrap(),
Err(_) => returnval = "{error: true}".to_string(),
},
Err(_) => returnval = "{error: true}".to_string(),
};
returnval.as_bytes().to_vec().into()
}
#[::safer_ffi::cfg_headers]
#[test]
fn generate_headers() -> ::std::io::Result<()> {
::safer_ffi::headers::builder()
.to_file("librusty_sadl.h")?
.generate()
}
| 29.275862 | 75 | 0.590106 |
cce452027c4f965c16279e484c172e6af0c68b0f
| 3,659 |
use rustup::dist::dist::TargetTriple;
use rustup::dist::manifest::Manifest;
use rustup::ErrorKind;
// Example manifest from https://public.etherpad-mozilla.org/p/Rust-infra-work-week
static EXAMPLE: &str = include_str!("channel-rust-nightly-example.toml");
// From brson's live build-rust-manifest.py script
static EXAMPLE2: &str = include_str!("channel-rust-nightly-example2.toml");
#[test]
fn parse_smoke_test() {
let x86_64_unknown_linux_gnu = TargetTriple::new("x86_64-unknown-linux-gnu");
let x86_64_unknown_linux_musl = TargetTriple::new("x86_64-unknown-linux-musl");
let pkg = Manifest::parse(EXAMPLE).unwrap();
pkg.get_package("rust").unwrap();
pkg.get_package("rustc").unwrap();
pkg.get_package("cargo").unwrap();
pkg.get_package("rust-std").unwrap();
pkg.get_package("rust-docs").unwrap();
let rust_pkg = pkg.get_package("rust").unwrap();
assert!(rust_pkg.version.contains("1.3.0"));
let rust_target_pkg = rust_pkg
.get_target(Some(&x86_64_unknown_linux_gnu))
.unwrap();
assert_eq!(rust_target_pkg.available(), true);
assert_eq!(rust_target_pkg.bins.clone().unwrap().url, "example.com");
assert_eq!(rust_target_pkg.bins.clone().unwrap().hash, "...");
let component = &rust_target_pkg.components[0];
assert_eq!(component.short_name_in_manifest(), "rustc");
assert_eq!(component.target.as_ref(), Some(&x86_64_unknown_linux_gnu));
let component = &rust_target_pkg.components[4];
assert_eq!(component.short_name_in_manifest(), "rust-std");
assert_eq!(component.target.as_ref(), Some(&x86_64_unknown_linux_musl));
let docs_pkg = pkg.get_package("rust-docs").unwrap();
let docs_target_pkg = docs_pkg
.get_target(Some(&x86_64_unknown_linux_gnu))
.unwrap();
assert_eq!(docs_target_pkg.bins.clone().unwrap().url, "example.com");
}
#[test]
fn renames() {
let manifest = Manifest::parse(EXAMPLE2).unwrap();
assert_eq!(1, manifest.renames.len());
assert_eq!(manifest.renames["cargo-old"], "cargo");
assert_eq!(1, manifest.reverse_renames.len());
assert_eq!(manifest.reverse_renames["cargo"], "cargo-old");
}
#[test]
fn parse_round_trip() {
let original = Manifest::parse(EXAMPLE).unwrap();
let serialized = original.clone().stringify();
let new = Manifest::parse(&serialized).unwrap();
assert_eq!(original, new);
let original = Manifest::parse(EXAMPLE2).unwrap();
let serialized = original.clone().stringify();
let new = Manifest::parse(&serialized).unwrap();
assert_eq!(original, new);
}
#[test]
fn validate_components_have_corresponding_packages() {
let manifest = r#"
manifest-version = "2"
date = "2015-10-10"
[pkg.rust]
version = "rustc 1.3.0 (9a92aaf19 2015-09-15)"
[pkg.rust.target.x86_64-unknown-linux-gnu]
available = true
url = "example.com"
hash = "..."
[[pkg.rust.target.x86_64-unknown-linux-gnu.components]]
pkg = "rustc"
target = "x86_64-unknown-linux-gnu"
[[pkg.rust.target.x86_64-unknown-linux-gnu.extensions]]
pkg = "rust-std"
target = "x86_64-unknown-linux-musl"
[pkg.rustc]
version = "rustc 1.3.0 (9a92aaf19 2015-09-15)"
[pkg.rustc.target.x86_64-unknown-linux-gnu]
available = true
url = "example.com"
hash = "..."
"#;
let err = Manifest::parse(manifest).unwrap_err();
match *err.kind() {
ErrorKind::MissingPackageForComponent(_) => {}
_ => panic!(),
}
}
// #248
#[test]
fn manifest_can_contain_unknown_targets() {
let manifest = EXAMPLE.replace("x86_64-unknown-linux-gnu", "mycpu-myvendor-myos");
assert!(Manifest::parse(&manifest).is_ok());
}
| 33.263636 | 86 | 0.679967 |
f7e48907e0141257af96fe784e082cb3d81637eb
| 87,183 |
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(in_band_lifetimes)]
#![feature(nll)]
#![recursion_limit = "256"]
#[macro_use]
extern crate syntax;
use rustc::bug;
use rustc::hir::def::{DefKind, Res};
use rustc::hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc::hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc::hir::itemlikevisit::DeepVisitor;
use rustc::hir::{self, AssocItemKind, Node, PatKind};
use rustc::lint;
use rustc::middle::privacy::{AccessLevel, AccessLevels};
use rustc::ty::fold::TypeVisitor;
use rustc::ty::query::Providers;
use rustc::ty::subst::InternalSubsts;
use rustc::ty::{self, GenericParamDefKind, TraitRef, Ty, TyCtxt, TypeFoldable};
use rustc::util::nodemap::HirIdSet;
use rustc_data_structures::fx::FxHashSet;
use syntax::ast::Ident;
use syntax::attr;
use syntax::symbol::{kw, sym};
use syntax_pos::hygiene::Transparency;
use syntax_pos::Span;
use std::marker::PhantomData;
use std::{cmp, fmt, mem};
use rustc_error_codes::*;
////////////////////////////////////////////////////////////////////////////////
/// Generic infrastructure used to implement specific visitors below.
////////////////////////////////////////////////////////////////////////////////
/// Implemented to visit all `DefId`s in a type.
/// Visiting `DefId`s is useful because visibilities and reachabilities are attached to them.
/// The idea is to visit "all components of a type", as documented in
/// https://github.com/rust-lang/rfcs/blob/master/text/2145-type-privacy.md#how-to-determine-visibility-of-a-type.
/// The default type visitor (`TypeVisitor`) does most of the job, but it has some shortcomings.
/// First, it doesn't have overridable `fn visit_trait_ref`, so we have to catch trait `DefId`s
/// manually. Second, it doesn't visit some type components like signatures of fn types, or traits
/// in `impl Trait`, see individual comments in `DefIdVisitorSkeleton::visit_ty`.
trait DefIdVisitor<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx>;
fn shallow(&self) -> bool {
false
}
fn skip_assoc_tys(&self) -> bool {
false
}
fn visit_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool;
/// Not overridden, but used to actually visit types and traits.
fn skeleton(&mut self) -> DefIdVisitorSkeleton<'_, 'tcx, Self> {
DefIdVisitorSkeleton {
def_id_visitor: self,
visited_opaque_tys: Default::default(),
dummy: Default::default(),
}
}
fn visit(&mut self, ty_fragment: impl TypeFoldable<'tcx>) -> bool {
ty_fragment.visit_with(&mut self.skeleton())
}
fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> bool {
self.skeleton().visit_trait(trait_ref)
}
fn visit_predicates(&mut self, predicates: ty::GenericPredicates<'tcx>) -> bool {
self.skeleton().visit_predicates(predicates)
}
}
struct DefIdVisitorSkeleton<'v, 'tcx, V>
where
V: DefIdVisitor<'tcx> + ?Sized,
{
def_id_visitor: &'v mut V,
visited_opaque_tys: FxHashSet<DefId>,
dummy: PhantomData<TyCtxt<'tcx>>,
}
impl<'tcx, V> DefIdVisitorSkeleton<'_, 'tcx, V>
where
V: DefIdVisitor<'tcx> + ?Sized,
{
fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> bool {
let TraitRef { def_id, substs } = trait_ref;
self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref.print_only_trait_path())
|| (!self.def_id_visitor.shallow() && substs.visit_with(self))
}
fn visit_predicates(&mut self, predicates: ty::GenericPredicates<'tcx>) -> bool {
let ty::GenericPredicates { parent: _, predicates } = predicates;
for (predicate, _span) in predicates {
match predicate {
ty::Predicate::Trait(poly_predicate) => {
let ty::TraitPredicate { trait_ref } = *poly_predicate.skip_binder();
if self.visit_trait(trait_ref) {
return true;
}
}
ty::Predicate::Projection(poly_predicate) => {
let ty::ProjectionPredicate { projection_ty, ty } =
*poly_predicate.skip_binder();
if ty.visit_with(self) {
return true;
}
if self.visit_trait(projection_ty.trait_ref(self.def_id_visitor.tcx())) {
return true;
}
}
ty::Predicate::TypeOutlives(poly_predicate) => {
let ty::OutlivesPredicate(ty, _region) = *poly_predicate.skip_binder();
if ty.visit_with(self) {
return true;
}
}
ty::Predicate::RegionOutlives(..) => {}
_ => bug!("unexpected predicate: {:?}", predicate),
}
}
false
}
}
impl<'tcx, V> TypeVisitor<'tcx> for DefIdVisitorSkeleton<'_, 'tcx, V>
where
V: DefIdVisitor<'tcx> + ?Sized,
{
fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
let tcx = self.def_id_visitor.tcx();
// InternalSubsts are not visited here because they are visited below in `super_visit_with`.
match ty.kind {
ty::Adt(&ty::AdtDef { did: def_id, .. }, ..)
| ty::Foreign(def_id)
| ty::FnDef(def_id, ..)
| ty::Closure(def_id, ..)
| ty::Generator(def_id, ..) => {
if self.def_id_visitor.visit_def_id(def_id, "type", &ty) {
return true;
}
if self.def_id_visitor.shallow() {
return false;
}
// Default type visitor doesn't visit signatures of fn types.
// Something like `fn() -> Priv {my_func}` is considered a private type even if
// `my_func` is public, so we need to visit signatures.
if let ty::FnDef(..) = ty.kind {
if tcx.fn_sig(def_id).visit_with(self) {
return true;
}
}
// Inherent static methods don't have self type in substs.
// Something like `fn() {my_method}` type of the method
// `impl Pub<Priv> { pub fn my_method() {} }` is considered a private type,
// so we need to visit the self type additionally.
if let Some(assoc_item) = tcx.opt_associated_item(def_id) {
if let ty::ImplContainer(impl_def_id) = assoc_item.container {
if tcx.type_of(impl_def_id).visit_with(self) {
return true;
}
}
}
}
ty::Projection(proj) | ty::UnnormalizedProjection(proj) => {
if self.def_id_visitor.skip_assoc_tys() {
// Visitors searching for minimal visibility/reachability want to
// conservatively approximate associated types like `<Type as Trait>::Alias`
// as visible/reachable even if both `Type` and `Trait` are private.
// Ideally, associated types should be substituted in the same way as
// free type aliases, but this isn't done yet.
return false;
}
// This will also visit substs if necessary, so we don't need to recurse.
return self.visit_trait(proj.trait_ref(tcx));
}
ty::Dynamic(predicates, ..) => {
// All traits in the list are considered the "primary" part of the type
// and are visited by shallow visitors.
for predicate in *predicates.skip_binder() {
let trait_ref = match *predicate {
ty::ExistentialPredicate::Trait(trait_ref) => trait_ref,
ty::ExistentialPredicate::Projection(proj) => proj.trait_ref(tcx),
ty::ExistentialPredicate::AutoTrait(def_id) => {
ty::ExistentialTraitRef { def_id, substs: InternalSubsts::empty() }
}
};
let ty::ExistentialTraitRef { def_id, substs: _ } = trait_ref;
if self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref) {
return true;
}
}
}
ty::Opaque(def_id, ..) => {
// Skip repeated `Opaque`s to avoid infinite recursion.
if self.visited_opaque_tys.insert(def_id) {
// The intent is to treat `impl Trait1 + Trait2` identically to
// `dyn Trait1 + Trait2`. Therefore we ignore def-id of the opaque type itself
// (it either has no visibility, or its visibility is insignificant, like
// visibilities of type aliases) and recurse into predicates instead to go
// through the trait list (default type visitor doesn't visit those traits).
// All traits in the list are considered the "primary" part of the type
// and are visited by shallow visitors.
if self.visit_predicates(tcx.predicates_of(def_id)) {
return true;
}
}
}
// These types don't have their own def-ids (but may have subcomponents
// with def-ids that should be visited recursively).
ty::Bool
| ty::Char
| ty::Int(..)
| ty::Uint(..)
| ty::Float(..)
| ty::Str
| ty::Never
| ty::Array(..)
| ty::Slice(..)
| ty::Tuple(..)
| ty::RawPtr(..)
| ty::Ref(..)
| ty::FnPtr(..)
| ty::Param(..)
| ty::Error
| ty::GeneratorWitness(..) => {}
ty::Bound(..) | ty::Placeholder(..) | ty::Infer(..) => {
bug!("unexpected type: {:?}", ty)
}
}
!self.def_id_visitor.shallow() && ty.super_visit_with(self)
}
}
fn def_id_visibility<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: DefId,
) -> (ty::Visibility, Span, &'static str) {
match tcx.hir().as_local_hir_id(def_id) {
Some(hir_id) => {
let vis = match tcx.hir().get(hir_id) {
Node::Item(item) => &item.vis,
Node::ForeignItem(foreign_item) => &foreign_item.vis,
Node::MacroDef(macro_def) => {
if attr::contains_name(¯o_def.attrs, sym::macro_export) {
return (ty::Visibility::Public, macro_def.span, "public");
} else {
¯o_def.vis
}
}
Node::TraitItem(..) | Node::Variant(..) => {
return def_id_visibility(tcx, tcx.hir().get_parent_did(hir_id));
}
Node::ImplItem(impl_item) => {
match tcx.hir().get(tcx.hir().get_parent_item(hir_id)) {
Node::Item(item) => match &item.kind {
hir::ItemKind::Impl(.., None, _, _) => &impl_item.vis,
hir::ItemKind::Impl(.., Some(trait_ref), _, _) => {
return def_id_visibility(tcx, trait_ref.path.res.def_id());
}
kind => bug!("unexpected item kind: {:?}", kind),
},
node => bug!("unexpected node kind: {:?}", node),
}
}
Node::Ctor(vdata) => {
let parent_hir_id = tcx.hir().get_parent_node(hir_id);
match tcx.hir().get(parent_hir_id) {
Node::Variant(..) => {
let parent_did = tcx.hir().local_def_id(parent_hir_id);
let (mut ctor_vis, mut span, mut descr) =
def_id_visibility(tcx, parent_did);
let adt_def = tcx.adt_def(tcx.hir().get_parent_did(hir_id));
let ctor_did = tcx.hir().local_def_id(vdata.ctor_hir_id().unwrap());
let variant = adt_def.variant_with_ctor_id(ctor_did);
if variant.is_field_list_non_exhaustive()
&& ctor_vis == ty::Visibility::Public
{
ctor_vis =
ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX));
let attrs = tcx.get_attrs(variant.def_id);
span =
attr::find_by_name(&attrs, sym::non_exhaustive).unwrap().span;
descr = "crate-visible";
}
return (ctor_vis, span, descr);
}
Node::Item(..) => {
let item = match tcx.hir().get(parent_hir_id) {
Node::Item(item) => item,
node => bug!("unexpected node kind: {:?}", node),
};
let (mut ctor_vis, mut span, mut descr) = (
ty::Visibility::from_hir(&item.vis, parent_hir_id, tcx),
item.vis.span,
item.vis.node.descr(),
);
for field in vdata.fields() {
let field_vis = ty::Visibility::from_hir(&field.vis, hir_id, tcx);
if ctor_vis.is_at_least(field_vis, tcx) {
ctor_vis = field_vis;
span = field.vis.span;
descr = field.vis.node.descr();
}
}
// If the structure is marked as non_exhaustive then lower the
// visibility to within the crate.
if ctor_vis == ty::Visibility::Public {
let adt_def = tcx.adt_def(tcx.hir().get_parent_did(hir_id));
if adt_def.non_enum_variant().is_field_list_non_exhaustive() {
ctor_vis =
ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX));
span = attr::find_by_name(&item.attrs, sym::non_exhaustive)
.unwrap()
.span;
descr = "crate-visible";
}
}
return (ctor_vis, span, descr);
}
node => bug!("unexpected node kind: {:?}", node),
}
}
Node::Expr(expr) => {
return (
ty::Visibility::Restricted(tcx.hir().get_module_parent(expr.hir_id)),
expr.span,
"private",
);
}
node => bug!("unexpected node kind: {:?}", node),
};
(ty::Visibility::from_hir(vis, hir_id, tcx), vis.span, vis.node.descr())
}
None => {
let vis = tcx.visibility(def_id);
let descr = if vis == ty::Visibility::Public { "public" } else { "private" };
(vis, tcx.def_span(def_id), descr)
}
}
}
// Set the correct `TypeckTables` for the given `item_id` (or an empty table if
// there is no `TypeckTables` for the item).
fn item_tables<'a, 'tcx>(
tcx: TyCtxt<'tcx>,
hir_id: hir::HirId,
empty_tables: &'a ty::TypeckTables<'tcx>,
) -> &'a ty::TypeckTables<'tcx> {
let def_id = tcx.hir().local_def_id(hir_id);
if tcx.has_typeck_tables(def_id) { tcx.typeck_tables_of(def_id) } else { empty_tables }
}
fn min(vis1: ty::Visibility, vis2: ty::Visibility, tcx: TyCtxt<'_>) -> ty::Visibility {
if vis1.is_at_least(vis2, tcx) { vis2 } else { vis1 }
}
////////////////////////////////////////////////////////////////////////////////
/// Visitor used to determine if pub(restricted) is used anywhere in the crate.
///
/// This is done so that `private_in_public` warnings can be turned into hard errors
/// in crates that have been updated to use pub(restricted).
////////////////////////////////////////////////////////////////////////////////
struct PubRestrictedVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
has_pub_restricted: bool,
}
impl Visitor<'tcx> for PubRestrictedVisitor<'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(&self.tcx.hir())
}
fn visit_vis(&mut self, vis: &'tcx hir::Visibility) {
self.has_pub_restricted = self.has_pub_restricted || vis.node.is_pub_restricted();
}
}
////////////////////////////////////////////////////////////////////////////////
/// Visitor used to determine impl visibility and reachability.
////////////////////////////////////////////////////////////////////////////////
struct FindMin<'a, 'tcx, VL: VisibilityLike> {
tcx: TyCtxt<'tcx>,
access_levels: &'a AccessLevels,
min: VL,
}
impl<'a, 'tcx, VL: VisibilityLike> DefIdVisitor<'tcx> for FindMin<'a, 'tcx, VL> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn shallow(&self) -> bool {
VL::SHALLOW
}
fn skip_assoc_tys(&self) -> bool {
true
}
fn visit_def_id(&mut self, def_id: DefId, _kind: &str, _descr: &dyn fmt::Display) -> bool {
self.min = VL::new_min(self, def_id);
false
}
}
trait VisibilityLike: Sized {
const MAX: Self;
const SHALLOW: bool = false;
fn new_min(find: &FindMin<'_, '_, Self>, def_id: DefId) -> Self;
// Returns an over-approximation (`skip_assoc_tys` = true) of visibility due to
// associated types for which we can't determine visibility precisely.
fn of_impl(hir_id: hir::HirId, tcx: TyCtxt<'_>, access_levels: &AccessLevels) -> Self {
let mut find = FindMin { tcx, access_levels, min: Self::MAX };
let def_id = tcx.hir().local_def_id(hir_id);
find.visit(tcx.type_of(def_id));
if let Some(trait_ref) = tcx.impl_trait_ref(def_id) {
find.visit_trait(trait_ref);
}
find.min
}
}
impl VisibilityLike for ty::Visibility {
const MAX: Self = ty::Visibility::Public;
fn new_min(find: &FindMin<'_, '_, Self>, def_id: DefId) -> Self {
min(def_id_visibility(find.tcx, def_id).0, find.min, find.tcx)
}
}
impl VisibilityLike for Option<AccessLevel> {
const MAX: Self = Some(AccessLevel::Public);
// Type inference is very smart sometimes.
// It can make an impl reachable even some components of its type or trait are unreachable.
// E.g. methods of `impl ReachableTrait<UnreachableTy> for ReachableTy<UnreachableTy> { ... }`
// can be usable from other crates (#57264). So we skip substs when calculating reachability
// and consider an impl reachable if its "shallow" type and trait are reachable.
//
// The assumption we make here is that type-inference won't let you use an impl without knowing
// both "shallow" version of its self type and "shallow" version of its trait if it exists
// (which require reaching the `DefId`s in them).
const SHALLOW: bool = true;
fn new_min(find: &FindMin<'_, '_, Self>, def_id: DefId) -> Self {
cmp::min(
if let Some(hir_id) = find.tcx.hir().as_local_hir_id(def_id) {
find.access_levels.map.get(&hir_id).cloned()
} else {
Self::MAX
},
find.min,
)
}
}
////////////////////////////////////////////////////////////////////////////////
/// The embargo visitor, used to determine the exports of the AST.
////////////////////////////////////////////////////////////////////////////////
struct EmbargoVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
/// Accessibility levels for reachable nodes.
access_levels: AccessLevels,
/// A set of pairs corresponding to modules, where the first module is
/// reachable via a macro that's defined in the second module. This cannot
/// be represented as reachable because it can't handle the following case:
///
/// pub mod n { // Should be `Public`
/// pub(crate) mod p { // Should *not* be accessible
/// pub fn f() -> i32 { 12 } // Must be `Reachable`
/// }
/// }
/// pub macro m() {
/// n::p::f()
/// }
macro_reachable: FxHashSet<(hir::HirId, DefId)>,
/// Previous accessibility level; `None` means unreachable.
prev_level: Option<AccessLevel>,
/// Has something changed in the level map?
changed: bool,
}
struct ReachEverythingInTheInterfaceVisitor<'a, 'tcx> {
access_level: Option<AccessLevel>,
item_def_id: DefId,
ev: &'a mut EmbargoVisitor<'tcx>,
}
impl EmbargoVisitor<'tcx> {
fn get(&self, id: hir::HirId) -> Option<AccessLevel> {
self.access_levels.map.get(&id).cloned()
}
/// Updates node level and returns the updated level.
fn update(&mut self, id: hir::HirId, level: Option<AccessLevel>) -> Option<AccessLevel> {
let old_level = self.get(id);
// Accessibility levels can only grow.
if level > old_level {
self.access_levels.map.insert(id, level.unwrap());
self.changed = true;
level
} else {
old_level
}
}
fn reach(
&mut self,
item_id: hir::HirId,
access_level: Option<AccessLevel>,
) -> ReachEverythingInTheInterfaceVisitor<'_, 'tcx> {
ReachEverythingInTheInterfaceVisitor {
access_level: cmp::min(access_level, Some(AccessLevel::Reachable)),
item_def_id: self.tcx.hir().local_def_id(item_id),
ev: self,
}
}
/// Updates the item as being reachable through a macro defined in the given
/// module. Returns `true` if the level has changed.
fn update_macro_reachable(&mut self, reachable_mod: hir::HirId, defining_mod: DefId) -> bool {
if self.macro_reachable.insert((reachable_mod, defining_mod)) {
self.update_macro_reachable_mod(reachable_mod, defining_mod);
true
} else {
false
}
}
fn update_macro_reachable_mod(&mut self, reachable_mod: hir::HirId, defining_mod: DefId) {
let module_def_id = self.tcx.hir().local_def_id(reachable_mod);
let module = self.tcx.hir().get_module(module_def_id).0;
for item_id in module.item_ids {
let hir_id = item_id.id;
let item_def_id = self.tcx.hir().local_def_id(hir_id);
if let Some(def_kind) = self.tcx.def_kind(item_def_id) {
let item = self.tcx.hir().expect_item(hir_id);
let vis = ty::Visibility::from_hir(&item.vis, hir_id, self.tcx);
self.update_macro_reachable_def(hir_id, def_kind, vis, defining_mod);
}
}
if let Some(exports) = self.tcx.module_exports(module_def_id) {
for export in exports {
if export.vis.is_accessible_from(defining_mod, self.tcx) {
if let Res::Def(def_kind, def_id) = export.res {
let vis = def_id_visibility(self.tcx, def_id).0;
if let Some(hir_id) = self.tcx.hir().as_local_hir_id(def_id) {
self.update_macro_reachable_def(hir_id, def_kind, vis, defining_mod);
}
}
}
}
}
}
fn update_macro_reachable_def(
&mut self,
hir_id: hir::HirId,
def_kind: DefKind,
vis: ty::Visibility,
module: DefId,
) {
let level = Some(AccessLevel::Reachable);
if let ty::Visibility::Public = vis {
self.update(hir_id, level);
}
match def_kind {
// No type privacy, so can be directly marked as reachable.
DefKind::Const
| DefKind::Macro(_)
| DefKind::Static
| DefKind::TraitAlias
| DefKind::TyAlias => {
if vis.is_accessible_from(module, self.tcx) {
self.update(hir_id, level);
}
}
// We can't use a module name as the final segment of a path, except
// in use statements. Since re-export checking doesn't consider
// hygiene these don't need to be marked reachable. The contents of
// the module, however may be reachable.
DefKind::Mod => {
if vis.is_accessible_from(module, self.tcx) {
self.update_macro_reachable(hir_id, module);
}
}
DefKind::Struct | DefKind::Union => {
// While structs and unions have type privacy, their fields do
// not.
if let ty::Visibility::Public = vis {
let item = self.tcx.hir().expect_item(hir_id);
if let hir::ItemKind::Struct(ref struct_def, _)
| hir::ItemKind::Union(ref struct_def, _) = item.kind
{
for field in struct_def.fields() {
let field_vis =
ty::Visibility::from_hir(&field.vis, field.hir_id, self.tcx);
if field_vis.is_accessible_from(module, self.tcx) {
self.reach(field.hir_id, level).ty();
}
}
} else {
bug!("item {:?} with DefKind {:?}", item, def_kind);
}
}
}
// These have type privacy, so are not reachable unless they're
// public
DefKind::AssocConst
| DefKind::AssocTy
| DefKind::AssocOpaqueTy
| DefKind::ConstParam
| DefKind::Ctor(_, _)
| DefKind::Enum
| DefKind::ForeignTy
| DefKind::Fn
| DefKind::OpaqueTy
| DefKind::Method
| DefKind::Trait
| DefKind::TyParam
| DefKind::Variant => (),
}
}
/// Given the path segments of a `ItemKind::Use`, then we need
/// to update the visibility of the intermediate use so that it isn't linted
/// by `unreachable_pub`.
///
/// This isn't trivial as `path.res` has the `DefId` of the eventual target
/// of the use statement not of the next intermediate use statement.
///
/// To do this, consider the last two segments of the path to our intermediate
/// use statement. We expect the penultimate segment to be a module and the
/// last segment to be the name of the item we are exporting. We can then
/// look at the items contained in the module for the use statement with that
/// name and update that item's visibility.
///
/// FIXME: This solution won't work with glob imports and doesn't respect
/// namespaces. See <https://github.com/rust-lang/rust/pull/57922#discussion_r251234202>.
fn update_visibility_of_intermediate_use_statements(&mut self, segments: &[hir::PathSegment]) {
if let Some([module, segment]) = segments.rchunks_exact(2).next() {
if let Some(item) = module
.res
.and_then(|res| res.mod_def_id())
.and_then(|def_id| self.tcx.hir().as_local_hir_id(def_id))
.map(|module_hir_id| self.tcx.hir().expect_item(module_hir_id))
{
if let hir::ItemKind::Mod(m) = &item.kind {
for item_id in m.item_ids.as_ref() {
let item = self.tcx.hir().expect_item(item_id.id);
let def_id = self.tcx.hir().local_def_id(item_id.id);
if !self.tcx.hygienic_eq(segment.ident, item.ident, def_id) {
continue;
}
if let hir::ItemKind::Use(..) = item.kind {
self.update(item.hir_id, Some(AccessLevel::Exported));
}
}
}
}
}
}
}
impl Visitor<'tcx> for EmbargoVisitor<'tcx> {
/// We want to visit items in the context of their containing
/// module and so forth, so supply a crate for doing a deep walk.
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(&self.tcx.hir())
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
let inherited_item_level = match item.kind {
hir::ItemKind::Impl(..) => {
Option::<AccessLevel>::of_impl(item.hir_id, self.tcx, &self.access_levels)
}
// Foreign modules inherit level from parents.
hir::ItemKind::ForeignMod(..) => self.prev_level,
// Other `pub` items inherit levels from parents.
hir::ItemKind::Const(..)
| hir::ItemKind::Enum(..)
| hir::ItemKind::ExternCrate(..)
| hir::ItemKind::GlobalAsm(..)
| hir::ItemKind::Fn(..)
| hir::ItemKind::Mod(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Struct(..)
| hir::ItemKind::Trait(..)
| hir::ItemKind::TraitAlias(..)
| hir::ItemKind::OpaqueTy(..)
| hir::ItemKind::TyAlias(..)
| hir::ItemKind::Union(..)
| hir::ItemKind::Use(..) => {
if item.vis.node.is_pub() {
self.prev_level
} else {
None
}
}
};
// Update level of the item itself.
let item_level = self.update(item.hir_id, inherited_item_level);
// Update levels of nested things.
match item.kind {
hir::ItemKind::Enum(ref def, _) => {
for variant in def.variants {
let variant_level = self.update(variant.id, item_level);
if let Some(ctor_hir_id) = variant.data.ctor_hir_id() {
self.update(ctor_hir_id, item_level);
}
for field in variant.data.fields() {
self.update(field.hir_id, variant_level);
}
}
}
hir::ItemKind::Impl(.., ref trait_ref, _, impl_item_refs) => {
for impl_item_ref in impl_item_refs {
if trait_ref.is_some() || impl_item_ref.vis.node.is_pub() {
self.update(impl_item_ref.id.hir_id, item_level);
}
}
}
hir::ItemKind::Trait(.., trait_item_refs) => {
for trait_item_ref in trait_item_refs {
self.update(trait_item_ref.id.hir_id, item_level);
}
}
hir::ItemKind::Struct(ref def, _) | hir::ItemKind::Union(ref def, _) => {
if let Some(ctor_hir_id) = def.ctor_hir_id() {
self.update(ctor_hir_id, item_level);
}
for field in def.fields() {
if field.vis.node.is_pub() {
self.update(field.hir_id, item_level);
}
}
}
hir::ItemKind::ForeignMod(ref foreign_mod) => {
for foreign_item in foreign_mod.items {
if foreign_item.vis.node.is_pub() {
self.update(foreign_item.hir_id, item_level);
}
}
}
hir::ItemKind::OpaqueTy(..)
| hir::ItemKind::Use(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Const(..)
| hir::ItemKind::GlobalAsm(..)
| hir::ItemKind::TyAlias(..)
| hir::ItemKind::Mod(..)
| hir::ItemKind::TraitAlias(..)
| hir::ItemKind::Fn(..)
| hir::ItemKind::ExternCrate(..) => {}
}
// Mark all items in interfaces of reachable items as reachable.
match item.kind {
// The interface is empty.
hir::ItemKind::ExternCrate(..) => {}
// All nested items are checked by `visit_item`.
hir::ItemKind::Mod(..) => {}
// Re-exports are handled in `visit_mod`. However, in order to avoid looping over
// all of the items of a mod in `visit_mod` looking for use statements, we handle
// making sure that intermediate use statements have their visibilities updated here.
hir::ItemKind::Use(ref path, _) => {
if item_level.is_some() {
self.update_visibility_of_intermediate_use_statements(path.segments.as_ref());
}
}
// The interface is empty.
hir::ItemKind::GlobalAsm(..) => {}
hir::ItemKind::OpaqueTy(..) => {
// FIXME: This is some serious pessimization intended to workaround deficiencies
// in the reachability pass (`middle/reachable.rs`). Types are marked as link-time
// reachable if they are returned via `impl Trait`, even from private functions.
let exist_level = cmp::max(item_level, Some(AccessLevel::ReachableFromImplTrait));
self.reach(item.hir_id, exist_level).generics().predicates().ty();
}
// Visit everything.
hir::ItemKind::Const(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Fn(..)
| hir::ItemKind::TyAlias(..) => {
if item_level.is_some() {
self.reach(item.hir_id, item_level).generics().predicates().ty();
}
}
hir::ItemKind::Trait(.., trait_item_refs) => {
if item_level.is_some() {
self.reach(item.hir_id, item_level).generics().predicates();
for trait_item_ref in trait_item_refs {
let mut reach = self.reach(trait_item_ref.id.hir_id, item_level);
reach.generics().predicates();
if trait_item_ref.kind == AssocItemKind::Type
&& !trait_item_ref.defaultness.has_value()
{
// No type to visit.
} else {
reach.ty();
}
}
}
}
hir::ItemKind::TraitAlias(..) => {
if item_level.is_some() {
self.reach(item.hir_id, item_level).generics().predicates();
}
}
// Visit everything except for private impl items.
hir::ItemKind::Impl(.., impl_item_refs) => {
if item_level.is_some() {
self.reach(item.hir_id, item_level).generics().predicates().ty().trait_ref();
for impl_item_ref in impl_item_refs {
let impl_item_level = self.get(impl_item_ref.id.hir_id);
if impl_item_level.is_some() {
self.reach(impl_item_ref.id.hir_id, impl_item_level)
.generics()
.predicates()
.ty();
}
}
}
}
// Visit everything, but enum variants have their own levels.
hir::ItemKind::Enum(ref def, _) => {
if item_level.is_some() {
self.reach(item.hir_id, item_level).generics().predicates();
}
for variant in def.variants {
let variant_level = self.get(variant.id);
if variant_level.is_some() {
for field in variant.data.fields() {
self.reach(field.hir_id, variant_level).ty();
}
// Corner case: if the variant is reachable, but its
// enum is not, make the enum reachable as well.
self.update(item.hir_id, variant_level);
}
}
}
// Visit everything, but foreign items have their own levels.
hir::ItemKind::ForeignMod(ref foreign_mod) => {
for foreign_item in foreign_mod.items {
let foreign_item_level = self.get(foreign_item.hir_id);
if foreign_item_level.is_some() {
self.reach(foreign_item.hir_id, foreign_item_level)
.generics()
.predicates()
.ty();
}
}
}
// Visit everything except for private fields.
hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
if item_level.is_some() {
self.reach(item.hir_id, item_level).generics().predicates();
for field in struct_def.fields() {
let field_level = self.get(field.hir_id);
if field_level.is_some() {
self.reach(field.hir_id, field_level).ty();
}
}
}
}
}
let orig_level = mem::replace(&mut self.prev_level, item_level);
intravisit::walk_item(self, item);
self.prev_level = orig_level;
}
fn visit_block(&mut self, b: &'tcx hir::Block) {
// Blocks can have public items, for example impls, but they always
// start as completely private regardless of publicity of a function,
// constant, type, field, etc., in which this block resides.
let orig_level = mem::replace(&mut self.prev_level, None);
intravisit::walk_block(self, b);
self.prev_level = orig_level;
}
fn visit_mod(&mut self, m: &'tcx hir::Mod<'tcx>, _sp: Span, id: hir::HirId) {
// This code is here instead of in visit_item so that the
// crate module gets processed as well.
if self.prev_level.is_some() {
let def_id = self.tcx.hir().local_def_id(id);
if let Some(exports) = self.tcx.module_exports(def_id) {
for export in exports.iter() {
if export.vis == ty::Visibility::Public {
if let Some(def_id) = export.res.opt_def_id() {
if let Some(hir_id) = self.tcx.hir().as_local_hir_id(def_id) {
self.update(hir_id, Some(AccessLevel::Exported));
}
}
}
}
}
}
intravisit::walk_mod(self, m, id);
}
fn visit_macro_def(&mut self, md: &'tcx hir::MacroDef<'tcx>) {
if attr::find_transparency(&md.attrs, md.legacy).0 != Transparency::Opaque {
self.update(md.hir_id, Some(AccessLevel::Public));
return;
}
let macro_module_def_id =
ty::DefIdTree::parent(self.tcx, self.tcx.hir().local_def_id(md.hir_id)).unwrap();
let mut module_id = match self.tcx.hir().as_local_hir_id(macro_module_def_id) {
Some(module_id) if self.tcx.hir().is_hir_id_module(module_id) => module_id,
// `module_id` doesn't correspond to a `mod`, return early (#63164, #65252).
_ => return,
};
let level = if md.vis.node.is_pub() { self.get(module_id) } else { None };
let new_level = self.update(md.hir_id, level);
if new_level.is_none() {
return;
}
loop {
let changed_reachability = self.update_macro_reachable(module_id, macro_module_def_id);
if changed_reachability || module_id == hir::CRATE_HIR_ID {
break;
}
module_id = self.tcx.hir().get_parent_node(module_id);
}
}
}
impl ReachEverythingInTheInterfaceVisitor<'_, 'tcx> {
fn generics(&mut self) -> &mut Self {
for param in &self.ev.tcx.generics_of(self.item_def_id).params {
match param.kind {
GenericParamDefKind::Lifetime => {}
GenericParamDefKind::Type { has_default, .. } => {
if has_default {
self.visit(self.ev.tcx.type_of(param.def_id));
}
}
GenericParamDefKind::Const => {
self.visit(self.ev.tcx.type_of(param.def_id));
}
}
}
self
}
fn predicates(&mut self) -> &mut Self {
self.visit_predicates(self.ev.tcx.predicates_of(self.item_def_id));
self
}
fn ty(&mut self) -> &mut Self {
self.visit(self.ev.tcx.type_of(self.item_def_id));
self
}
fn trait_ref(&mut self) -> &mut Self {
if let Some(trait_ref) = self.ev.tcx.impl_trait_ref(self.item_def_id) {
self.visit_trait(trait_ref);
}
self
}
}
impl DefIdVisitor<'tcx> for ReachEverythingInTheInterfaceVisitor<'_, 'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.ev.tcx
}
fn visit_def_id(&mut self, def_id: DefId, _kind: &str, _descr: &dyn fmt::Display) -> bool {
if let Some(hir_id) = self.ev.tcx.hir().as_local_hir_id(def_id) {
if let ((ty::Visibility::Public, ..), _)
| (_, Some(AccessLevel::ReachableFromImplTrait)) =
(def_id_visibility(self.tcx(), def_id), self.access_level)
{
self.ev.update(hir_id, self.access_level);
}
}
false
}
}
//////////////////////////////////////////////////////////////////////////////////////
/// Name privacy visitor, checks privacy and reports violations.
/// Most of name privacy checks are performed during the main resolution phase,
/// or later in type checking when field accesses and associated items are resolved.
/// This pass performs remaining checks for fields in struct expressions and patterns.
//////////////////////////////////////////////////////////////////////////////////////
struct NamePrivacyVisitor<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
tables: &'a ty::TypeckTables<'tcx>,
current_item: hir::HirId,
empty_tables: &'a ty::TypeckTables<'tcx>,
}
impl<'a, 'tcx> NamePrivacyVisitor<'a, 'tcx> {
// Checks that a field in a struct constructor (expression or pattern) is accessible.
fn check_field(
&mut self,
use_ctxt: Span, // syntax context of the field name at the use site
span: Span, // span of the field pattern, e.g., `x: 0`
def: &'tcx ty::AdtDef, // definition of the struct or enum
field: &'tcx ty::FieldDef,
) {
// definition of the field
let ident = Ident::new(kw::Invalid, use_ctxt);
let current_hir = self.current_item;
let def_id = self.tcx.adjust_ident_and_get_scope(ident, def.did, current_hir).1;
if !def.is_enum() && !field.vis.is_accessible_from(def_id, self.tcx) {
struct_span_err!(
self.tcx.sess,
span,
E0451,
"field `{}` of {} `{}` is private",
field.ident,
def.variant_descr(),
self.tcx.def_path_str(def.did)
)
.span_label(span, format!("field `{}` is private", field.ident))
.emit();
}
}
}
impl<'a, 'tcx> Visitor<'tcx> for NamePrivacyVisitor<'a, 'tcx> {
/// We want to visit items in the context of their containing
/// module and so forth, so supply a crate for doing a deep walk.
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(&self.tcx.hir())
}
fn visit_mod(&mut self, _m: &'tcx hir::Mod<'tcx>, _s: Span, _n: hir::HirId) {
// Don't visit nested modules, since we run a separate visitor walk
// for each module in `privacy_access_levels`
}
fn visit_nested_body(&mut self, body: hir::BodyId) {
let orig_tables = mem::replace(&mut self.tables, self.tcx.body_tables(body));
let body = self.tcx.hir().body(body);
self.visit_body(body);
self.tables = orig_tables;
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
let orig_current_item = mem::replace(&mut self.current_item, item.hir_id);
let orig_tables =
mem::replace(&mut self.tables, item_tables(self.tcx, item.hir_id, self.empty_tables));
intravisit::walk_item(self, item);
self.current_item = orig_current_item;
self.tables = orig_tables;
}
fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem<'tcx>) {
let orig_tables =
mem::replace(&mut self.tables, item_tables(self.tcx, ti.hir_id, self.empty_tables));
intravisit::walk_trait_item(self, ti);
self.tables = orig_tables;
}
fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem<'tcx>) {
let orig_tables =
mem::replace(&mut self.tables, item_tables(self.tcx, ii.hir_id, self.empty_tables));
intravisit::walk_impl_item(self, ii);
self.tables = orig_tables;
}
fn visit_expr(&mut self, expr: &'tcx hir::Expr) {
match expr.kind {
hir::ExprKind::Struct(ref qpath, ref fields, ref base) => {
let res = self.tables.qpath_res(qpath, expr.hir_id);
let adt = self.tables.expr_ty(expr).ty_adt_def().unwrap();
let variant = adt.variant_of_res(res);
if let Some(ref base) = *base {
// If the expression uses FRU we need to make sure all the unmentioned fields
// are checked for privacy (RFC 736). Rather than computing the set of
// unmentioned fields, just check them all.
for (vf_index, variant_field) in variant.fields.iter().enumerate() {
let field = fields
.iter()
.find(|f| self.tcx.field_index(f.hir_id, self.tables) == vf_index);
let (use_ctxt, span) = match field {
Some(field) => (field.ident.span, field.span),
None => (base.span, base.span),
};
self.check_field(use_ctxt, span, adt, variant_field);
}
} else {
for field in fields {
let use_ctxt = field.ident.span;
let index = self.tcx.field_index(field.hir_id, self.tables);
self.check_field(use_ctxt, field.span, adt, &variant.fields[index]);
}
}
}
_ => {}
}
intravisit::walk_expr(self, expr);
}
fn visit_pat(&mut self, pat: &'tcx hir::Pat) {
match pat.kind {
PatKind::Struct(ref qpath, ref fields, _) => {
let res = self.tables.qpath_res(qpath, pat.hir_id);
let adt = self.tables.pat_ty(pat).ty_adt_def().unwrap();
let variant = adt.variant_of_res(res);
for field in fields {
let use_ctxt = field.ident.span;
let index = self.tcx.field_index(field.hir_id, self.tables);
self.check_field(use_ctxt, field.span, adt, &variant.fields[index]);
}
}
_ => {}
}
intravisit::walk_pat(self, pat);
}
}
////////////////////////////////////////////////////////////////////////////////////////////
/// Type privacy visitor, checks types for privacy and reports violations.
/// Both explicitly written types and inferred types of expressions and patters are checked.
/// Checks are performed on "semantic" types regardless of names and their hygiene.
////////////////////////////////////////////////////////////////////////////////////////////
struct TypePrivacyVisitor<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
tables: &'a ty::TypeckTables<'tcx>,
current_item: DefId,
in_body: bool,
span: Span,
empty_tables: &'a ty::TypeckTables<'tcx>,
}
impl<'a, 'tcx> TypePrivacyVisitor<'a, 'tcx> {
fn item_is_accessible(&self, did: DefId) -> bool {
def_id_visibility(self.tcx, did).0.is_accessible_from(self.current_item, self.tcx)
}
// Take node-id of an expression or pattern and check its type for privacy.
fn check_expr_pat_type(&mut self, id: hir::HirId, span: Span) -> bool {
self.span = span;
if self.visit(self.tables.node_type(id)) || self.visit(self.tables.node_substs(id)) {
return true;
}
if let Some(adjustments) = self.tables.adjustments().get(id) {
for adjustment in adjustments {
if self.visit(adjustment.target) {
return true;
}
}
}
false
}
fn check_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool {
let is_error = !self.item_is_accessible(def_id);
if is_error {
self.tcx.sess.span_err(self.span, &format!("{} `{}` is private", kind, descr));
}
is_error
}
}
impl<'a, 'tcx> Visitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> {
/// We want to visit items in the context of their containing
/// module and so forth, so supply a crate for doing a deep walk.
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(&self.tcx.hir())
}
fn visit_mod(&mut self, _m: &'tcx hir::Mod<'tcx>, _s: Span, _n: hir::HirId) {
// Don't visit nested modules, since we run a separate visitor walk
// for each module in `privacy_access_levels`
}
fn visit_nested_body(&mut self, body: hir::BodyId) {
let orig_tables = mem::replace(&mut self.tables, self.tcx.body_tables(body));
let orig_in_body = mem::replace(&mut self.in_body, true);
let body = self.tcx.hir().body(body);
self.visit_body(body);
self.tables = orig_tables;
self.in_body = orig_in_body;
}
fn visit_ty(&mut self, hir_ty: &'tcx hir::Ty) {
self.span = hir_ty.span;
if self.in_body {
// Types in bodies.
if self.visit(self.tables.node_type(hir_ty.hir_id)) {
return;
}
} else {
// Types in signatures.
// FIXME: This is very ineffective. Ideally each HIR type should be converted
// into a semantic type only once and the result should be cached somehow.
if self.visit(rustc_typeck::hir_ty_to_ty(self.tcx, hir_ty)) {
return;
}
}
intravisit::walk_ty(self, hir_ty);
}
fn visit_trait_ref(&mut self, trait_ref: &'tcx hir::TraitRef) {
self.span = trait_ref.path.span;
if !self.in_body {
// Avoid calling `hir_trait_to_predicates` in bodies, it will ICE.
// The traits' privacy in bodies is already checked as a part of trait object types.
let bounds = rustc_typeck::hir_trait_to_predicates(self.tcx, trait_ref);
for (trait_predicate, _) in bounds.trait_bounds {
if self.visit_trait(*trait_predicate.skip_binder()) {
return;
}
}
for (poly_predicate, _) in bounds.projection_bounds {
let tcx = self.tcx;
if self.visit(poly_predicate.skip_binder().ty)
|| self.visit_trait(poly_predicate.skip_binder().projection_ty.trait_ref(tcx))
{
return;
}
}
}
intravisit::walk_trait_ref(self, trait_ref);
}
// Check types of expressions
fn visit_expr(&mut self, expr: &'tcx hir::Expr) {
if self.check_expr_pat_type(expr.hir_id, expr.span) {
// Do not check nested expressions if the error already happened.
return;
}
match expr.kind {
hir::ExprKind::Assign(.., ref rhs) | hir::ExprKind::Match(ref rhs, ..) => {
// Do not report duplicate errors for `x = y` and `match x { ... }`.
if self.check_expr_pat_type(rhs.hir_id, rhs.span) {
return;
}
}
hir::ExprKind::MethodCall(_, span, _) => {
// Method calls have to be checked specially.
self.span = span;
if let Some(def_id) = self.tables.type_dependent_def_id(expr.hir_id) {
if self.visit(self.tcx.type_of(def_id)) {
return;
}
} else {
self.tcx
.sess
.delay_span_bug(expr.span, "no type-dependent def for method call");
}
}
_ => {}
}
intravisit::walk_expr(self, expr);
}
// Prohibit access to associated items with insufficient nominal visibility.
//
// Additionally, until better reachability analysis for macros 2.0 is available,
// we prohibit access to private statics from other crates, this allows to give
// more code internal visibility at link time. (Access to private functions
// is already prohibited by type privacy for function types.)
fn visit_qpath(&mut self, qpath: &'tcx hir::QPath, id: hir::HirId, span: Span) {
let def = match self.tables.qpath_res(qpath, id) {
Res::Def(kind, def_id) => Some((kind, def_id)),
_ => None,
};
let def = def.filter(|(kind, _)| match kind {
DefKind::Method
| DefKind::AssocConst
| DefKind::AssocTy
| DefKind::AssocOpaqueTy
| DefKind::Static => true,
_ => false,
});
if let Some((kind, def_id)) = def {
let is_local_static =
if let DefKind::Static = kind { def_id.is_local() } else { false };
if !self.item_is_accessible(def_id) && !is_local_static {
let name = match *qpath {
hir::QPath::Resolved(_, ref path) => path.to_string(),
hir::QPath::TypeRelative(_, ref segment) => segment.ident.to_string(),
};
let msg = format!("{} `{}` is private", kind.descr(def_id), name);
self.tcx.sess.span_err(span, &msg);
return;
}
}
intravisit::walk_qpath(self, qpath, id, span);
}
// Check types of patterns.
fn visit_pat(&mut self, pattern: &'tcx hir::Pat) {
if self.check_expr_pat_type(pattern.hir_id, pattern.span) {
// Do not check nested patterns if the error already happened.
return;
}
intravisit::walk_pat(self, pattern);
}
fn visit_local(&mut self, local: &'tcx hir::Local) {
if let Some(ref init) = local.init {
if self.check_expr_pat_type(init.hir_id, init.span) {
// Do not report duplicate errors for `let x = y`.
return;
}
}
intravisit::walk_local(self, local);
}
// Check types in item interfaces.
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
let orig_current_item =
mem::replace(&mut self.current_item, self.tcx.hir().local_def_id(item.hir_id));
let orig_in_body = mem::replace(&mut self.in_body, false);
let orig_tables =
mem::replace(&mut self.tables, item_tables(self.tcx, item.hir_id, self.empty_tables));
intravisit::walk_item(self, item);
self.tables = orig_tables;
self.in_body = orig_in_body;
self.current_item = orig_current_item;
}
fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem<'tcx>) {
let orig_tables =
mem::replace(&mut self.tables, item_tables(self.tcx, ti.hir_id, self.empty_tables));
intravisit::walk_trait_item(self, ti);
self.tables = orig_tables;
}
fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem<'tcx>) {
let orig_tables =
mem::replace(&mut self.tables, item_tables(self.tcx, ii.hir_id, self.empty_tables));
intravisit::walk_impl_item(self, ii);
self.tables = orig_tables;
}
}
impl DefIdVisitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn visit_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool {
self.check_def_id(def_id, kind, descr)
}
}
///////////////////////////////////////////////////////////////////////////////
/// Obsolete visitors for checking for private items in public interfaces.
/// These visitors are supposed to be kept in frozen state and produce an
/// "old error node set". For backward compatibility the new visitor reports
/// warnings instead of hard errors when the erroneous node is not in this old set.
///////////////////////////////////////////////////////////////////////////////
struct ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
access_levels: &'a AccessLevels,
in_variant: bool,
// Set of errors produced by this obsolete visitor.
old_error_set: HirIdSet,
}
struct ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> {
inner: &'a ObsoleteVisiblePrivateTypesVisitor<'b, 'tcx>,
/// Whether the type refers to private types.
contains_private: bool,
/// Whether we've recurred at all (i.e., if we're pointing at the
/// first type on which `visit_ty` was called).
at_outer_type: bool,
/// Whether that first type is a public path.
outer_type_is_public_path: bool,
}
impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
fn path_is_private_type(&self, path: &hir::Path) -> bool {
let did = match path.res {
Res::PrimTy(..) | Res::SelfTy(..) | Res::Err => return false,
res => res.def_id(),
};
// A path can only be private if:
// it's in this crate...
if let Some(hir_id) = self.tcx.hir().as_local_hir_id(did) {
// .. and it corresponds to a private type in the AST (this returns
// `None` for type parameters).
match self.tcx.hir().find(hir_id) {
Some(Node::Item(ref item)) => !item.vis.node.is_pub(),
Some(_) | None => false,
}
} else {
return false;
}
}
fn trait_is_public(&self, trait_id: hir::HirId) -> bool {
// FIXME: this would preferably be using `exported_items`, but all
// traits are exported currently (see `EmbargoVisitor.exported_trait`).
self.access_levels.is_public(trait_id)
}
fn check_generic_bound(&mut self, bound: &hir::GenericBound) {
if let hir::GenericBound::Trait(ref trait_ref, _) = *bound {
if self.path_is_private_type(&trait_ref.trait_ref.path) {
self.old_error_set.insert(trait_ref.trait_ref.hir_ref_id);
}
}
}
fn item_is_public(&self, id: &hir::HirId, vis: &hir::Visibility) -> bool {
self.access_levels.is_reachable(*id) || vis.node.is_pub()
}
}
impl<'a, 'b, 'tcx, 'v> Visitor<'v> for ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> {
NestedVisitorMap::None
}
fn visit_ty(&mut self, ty: &hir::Ty) {
if let hir::TyKind::Path(hir::QPath::Resolved(_, ref path)) = ty.kind {
if self.inner.path_is_private_type(path) {
self.contains_private = true;
// Found what we're looking for, so let's stop working.
return;
}
}
if let hir::TyKind::Path(_) = ty.kind {
if self.at_outer_type {
self.outer_type_is_public_path = true;
}
}
self.at_outer_type = false;
intravisit::walk_ty(self, ty)
}
// Don't want to recurse into `[, .. expr]`.
fn visit_expr(&mut self, _: &hir::Expr) {}
}
impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
/// We want to visit items in the context of their containing
/// module and so forth, so supply a crate for doing a deep walk.
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(&self.tcx.hir())
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
match item.kind {
// Contents of a private mod can be re-exported, so we need
// to check internals.
hir::ItemKind::Mod(_) => {}
// An `extern {}` doesn't introduce a new privacy
// namespace (the contents have their own privacies).
hir::ItemKind::ForeignMod(_) => {}
hir::ItemKind::Trait(.., ref bounds, _) => {
if !self.trait_is_public(item.hir_id) {
return;
}
for bound in bounds.iter() {
self.check_generic_bound(bound)
}
}
// Impls need some special handling to try to offer useful
// error messages without (too many) false positives
// (i.e., we could just return here to not check them at
// all, or some worse estimation of whether an impl is
// publicly visible).
hir::ItemKind::Impl(.., ref g, ref trait_ref, ref self_, impl_item_refs) => {
// `impl [... for] Private` is never visible.
let self_contains_private;
// `impl [... for] Public<...>`, but not `impl [... for]
// Vec<Public>` or `(Public,)`, etc.
let self_is_public_path;
// Check the properties of the `Self` type:
{
let mut visitor = ObsoleteCheckTypeForPrivatenessVisitor {
inner: self,
contains_private: false,
at_outer_type: true,
outer_type_is_public_path: false,
};
visitor.visit_ty(&self_);
self_contains_private = visitor.contains_private;
self_is_public_path = visitor.outer_type_is_public_path;
}
// Miscellaneous info about the impl:
// `true` iff this is `impl Private for ...`.
let not_private_trait = trait_ref.as_ref().map_or(
true, // no trait counts as public trait
|tr| {
let did = tr.path.res.def_id();
if let Some(hir_id) = self.tcx.hir().as_local_hir_id(did) {
self.trait_is_public(hir_id)
} else {
true // external traits must be public
}
},
);
// `true` iff this is a trait impl or at least one method is public.
//
// `impl Public { $( fn ...() {} )* }` is not visible.
//
// This is required over just using the methods' privacy
// directly because we might have `impl<T: Foo<Private>> ...`,
// and we shouldn't warn about the generics if all the methods
// are private (because `T` won't be visible externally).
let trait_or_some_public_method = trait_ref.is_some()
|| impl_item_refs.iter().any(|impl_item_ref| {
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
match impl_item.kind {
hir::ImplItemKind::Const(..) | hir::ImplItemKind::Method(..) => {
self.access_levels.is_reachable(impl_item_ref.id.hir_id)
}
hir::ImplItemKind::OpaqueTy(..) | hir::ImplItemKind::TyAlias(_) => {
false
}
}
});
if !self_contains_private && not_private_trait && trait_or_some_public_method {
intravisit::walk_generics(self, g);
match *trait_ref {
None => {
for impl_item_ref in impl_item_refs {
// This is where we choose whether to walk down
// further into the impl to check its items. We
// should only walk into public items so that we
// don't erroneously report errors for private
// types in private items.
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
match impl_item.kind {
hir::ImplItemKind::Const(..)
| hir::ImplItemKind::Method(..)
if self
.item_is_public(&impl_item.hir_id, &impl_item.vis) =>
{
intravisit::walk_impl_item(self, impl_item)
}
hir::ImplItemKind::TyAlias(..) => {
intravisit::walk_impl_item(self, impl_item)
}
_ => {}
}
}
}
Some(ref tr) => {
// Any private types in a trait impl fall into three
// categories.
// 1. mentioned in the trait definition
// 2. mentioned in the type params/generics
// 3. mentioned in the associated types of the impl
//
// Those in 1. can only occur if the trait is in
// this crate and will've been warned about on the
// trait definition (there's no need to warn twice
// so we don't check the methods).
//
// Those in 2. are warned via walk_generics and this
// call here.
intravisit::walk_path(self, &tr.path);
// Those in 3. are warned with this call.
for impl_item_ref in impl_item_refs {
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
if let hir::ImplItemKind::TyAlias(ref ty) = impl_item.kind {
self.visit_ty(ty);
}
}
}
}
} else if trait_ref.is_none() && self_is_public_path {
// `impl Public<Private> { ... }`. Any public static
// methods will be visible as `Public::foo`.
let mut found_pub_static = false;
for impl_item_ref in impl_item_refs {
if self.item_is_public(&impl_item_ref.id.hir_id, &impl_item_ref.vis) {
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
match impl_item_ref.kind {
AssocItemKind::Const => {
found_pub_static = true;
intravisit::walk_impl_item(self, impl_item);
}
AssocItemKind::Method { has_self: false } => {
found_pub_static = true;
intravisit::walk_impl_item(self, impl_item);
}
_ => {}
}
}
}
if found_pub_static {
intravisit::walk_generics(self, g)
}
}
return;
}
// `type ... = ...;` can contain private types, because
// we're introducing a new name.
hir::ItemKind::TyAlias(..) => return,
// Not at all public, so we don't care.
_ if !self.item_is_public(&item.hir_id, &item.vis) => {
return;
}
_ => {}
}
// We've carefully constructed it so that if we're here, then
// any `visit_ty`'s will be called on things that are in
// public signatures, i.e., things that we're interested in for
// this visitor.
intravisit::walk_item(self, item);
}
fn visit_generics(&mut self, generics: &'tcx hir::Generics) {
for param in &generics.params {
for bound in ¶m.bounds {
self.check_generic_bound(bound);
}
}
for predicate in &generics.where_clause.predicates {
match predicate {
hir::WherePredicate::BoundPredicate(bound_pred) => {
for bound in bound_pred.bounds.iter() {
self.check_generic_bound(bound)
}
}
hir::WherePredicate::RegionPredicate(_) => {}
hir::WherePredicate::EqPredicate(eq_pred) => {
self.visit_ty(&eq_pred.rhs_ty);
}
}
}
}
fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
if self.access_levels.is_reachable(item.hir_id) {
intravisit::walk_foreign_item(self, item)
}
}
fn visit_ty(&mut self, t: &'tcx hir::Ty) {
if let hir::TyKind::Path(hir::QPath::Resolved(_, ref path)) = t.kind {
if self.path_is_private_type(path) {
self.old_error_set.insert(t.hir_id);
}
}
intravisit::walk_ty(self, t)
}
fn visit_variant(
&mut self,
v: &'tcx hir::Variant<'tcx>,
g: &'tcx hir::Generics,
item_id: hir::HirId,
) {
if self.access_levels.is_reachable(v.id) {
self.in_variant = true;
intravisit::walk_variant(self, v, g, item_id);
self.in_variant = false;
}
}
fn visit_struct_field(&mut self, s: &'tcx hir::StructField<'tcx>) {
if s.vis.node.is_pub() || self.in_variant {
intravisit::walk_struct_field(self, s);
}
}
// We don't need to introspect into these at all: an
// expression/block context can't possibly contain exported things.
// (Making them no-ops stops us from traversing the whole AST without
// having to be super careful about our `walk_...` calls above.)
fn visit_block(&mut self, _: &'tcx hir::Block) {}
fn visit_expr(&mut self, _: &'tcx hir::Expr) {}
}
///////////////////////////////////////////////////////////////////////////////
/// SearchInterfaceForPrivateItemsVisitor traverses an item's interface and
/// finds any private components in it.
/// PrivateItemsInPublicInterfacesVisitor ensures there are no private types
/// and traits in public interfaces.
///////////////////////////////////////////////////////////////////////////////
struct SearchInterfaceForPrivateItemsVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
item_id: hir::HirId,
item_def_id: DefId,
span: Span,
/// The visitor checks that each component type is at least this visible.
required_visibility: ty::Visibility,
has_pub_restricted: bool,
has_old_errors: bool,
in_assoc_ty: bool,
}
impl SearchInterfaceForPrivateItemsVisitor<'tcx> {
fn generics(&mut self) -> &mut Self {
for param in &self.tcx.generics_of(self.item_def_id).params {
match param.kind {
GenericParamDefKind::Lifetime => {}
GenericParamDefKind::Type { has_default, .. } => {
if has_default {
self.visit(self.tcx.type_of(param.def_id));
}
}
GenericParamDefKind::Const => {
self.visit(self.tcx.type_of(param.def_id));
}
}
}
self
}
fn predicates(&mut self) -> &mut Self {
// N.B., we use `explicit_predicates_of` and not `predicates_of`
// because we don't want to report privacy errors due to where
// clauses that the compiler inferred. We only want to
// consider the ones that the user wrote. This is important
// for the inferred outlives rules; see
// `src/test/ui/rfc-2093-infer-outlives/privacy.rs`.
self.visit_predicates(self.tcx.explicit_predicates_of(self.item_def_id));
self
}
fn ty(&mut self) -> &mut Self {
self.visit(self.tcx.type_of(self.item_def_id));
self
}
fn check_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool {
if self.leaks_private_dep(def_id) {
self.tcx.lint_hir(
lint::builtin::EXPORTED_PRIVATE_DEPENDENCIES,
self.item_id,
self.span,
&format!(
"{} `{}` from private dependency '{}' in public \
interface",
kind,
descr,
self.tcx.crate_name(def_id.krate)
),
);
}
let hir_id = match self.tcx.hir().as_local_hir_id(def_id) {
Some(hir_id) => hir_id,
None => return false,
};
let (vis, vis_span, vis_descr) = def_id_visibility(self.tcx, def_id);
if !vis.is_at_least(self.required_visibility, self.tcx) {
let msg = format!("{} {} `{}` in public interface", vis_descr, kind, descr);
if self.has_pub_restricted || self.has_old_errors || self.in_assoc_ty {
let mut err = if kind == "trait" {
struct_span_err!(self.tcx.sess, self.span, E0445, "{}", msg)
} else {
struct_span_err!(self.tcx.sess, self.span, E0446, "{}", msg)
};
err.span_label(self.span, format!("can't leak {} {}", vis_descr, kind));
err.span_label(vis_span, format!("`{}` declared as {}", descr, vis_descr));
err.emit();
} else {
let err_code = if kind == "trait" { "E0445" } else { "E0446" };
self.tcx.lint_hir(
lint::builtin::PRIVATE_IN_PUBLIC,
hir_id,
self.span,
&format!("{} (error {})", msg, err_code),
);
}
}
false
}
/// An item is 'leaked' from a private dependency if all
/// of the following are true:
/// 1. It's contained within a public type
/// 2. It comes from a private crate
fn leaks_private_dep(&self, item_id: DefId) -> bool {
let ret = self.required_visibility == ty::Visibility::Public
&& self.tcx.is_private_dep(item_id.krate);
log::debug!("leaks_private_dep(item_id={:?})={}", item_id, ret);
return ret;
}
}
impl DefIdVisitor<'tcx> for SearchInterfaceForPrivateItemsVisitor<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn visit_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool {
self.check_def_id(def_id, kind, descr)
}
}
struct PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
has_pub_restricted: bool,
old_error_set: &'a HirIdSet,
}
impl<'a, 'tcx> PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> {
fn check(
&self,
item_id: hir::HirId,
required_visibility: ty::Visibility,
) -> SearchInterfaceForPrivateItemsVisitor<'tcx> {
let mut has_old_errors = false;
// Slow path taken only if there any errors in the crate.
for &id in self.old_error_set {
// Walk up the nodes until we find `item_id` (or we hit a root).
let mut id = id;
loop {
if id == item_id {
has_old_errors = true;
break;
}
let parent = self.tcx.hir().get_parent_node(id);
if parent == id {
break;
}
id = parent;
}
if has_old_errors {
break;
}
}
SearchInterfaceForPrivateItemsVisitor {
tcx: self.tcx,
item_id,
item_def_id: self.tcx.hir().local_def_id(item_id),
span: self.tcx.hir().span(item_id),
required_visibility,
has_pub_restricted: self.has_pub_restricted,
has_old_errors,
in_assoc_ty: false,
}
}
fn check_assoc_item(
&self,
hir_id: hir::HirId,
assoc_item_kind: AssocItemKind,
defaultness: hir::Defaultness,
vis: ty::Visibility,
) {
let mut check = self.check(hir_id, vis);
let (check_ty, is_assoc_ty) = match assoc_item_kind {
AssocItemKind::Const | AssocItemKind::Method { .. } => (true, false),
AssocItemKind::Type => (defaultness.has_value(), true),
// `ty()` for opaque types is the underlying type,
// it's not a part of interface, so we skip it.
AssocItemKind::OpaqueTy => (false, true),
};
check.in_assoc_ty = is_assoc_ty;
check.generics().predicates();
if check_ty {
check.ty();
}
}
}
impl<'a, 'tcx> Visitor<'tcx> for PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::OnlyBodies(&self.tcx.hir())
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
let tcx = self.tcx;
let item_visibility = ty::Visibility::from_hir(&item.vis, item.hir_id, tcx);
match item.kind {
// Crates are always public.
hir::ItemKind::ExternCrate(..) => {}
// All nested items are checked by `visit_item`.
hir::ItemKind::Mod(..) => {}
// Checked in resolve.
hir::ItemKind::Use(..) => {}
// No subitems.
hir::ItemKind::GlobalAsm(..) => {}
// Subitems of these items have inherited publicity.
hir::ItemKind::Const(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Fn(..)
| hir::ItemKind::TyAlias(..) => {
self.check(item.hir_id, item_visibility).generics().predicates().ty();
}
hir::ItemKind::OpaqueTy(..) => {
// `ty()` for opaque types is the underlying type,
// it's not a part of interface, so we skip it.
self.check(item.hir_id, item_visibility).generics().predicates();
}
hir::ItemKind::Trait(.., trait_item_refs) => {
self.check(item.hir_id, item_visibility).generics().predicates();
for trait_item_ref in trait_item_refs {
self.check_assoc_item(
trait_item_ref.id.hir_id,
trait_item_ref.kind,
trait_item_ref.defaultness,
item_visibility,
);
}
}
hir::ItemKind::TraitAlias(..) => {
self.check(item.hir_id, item_visibility).generics().predicates();
}
hir::ItemKind::Enum(ref def, _) => {
self.check(item.hir_id, item_visibility).generics().predicates();
for variant in def.variants {
for field in variant.data.fields() {
self.check(field.hir_id, item_visibility).ty();
}
}
}
// Subitems of foreign modules have their own publicity.
hir::ItemKind::ForeignMod(ref foreign_mod) => {
for foreign_item in foreign_mod.items {
let vis = ty::Visibility::from_hir(&foreign_item.vis, item.hir_id, tcx);
self.check(foreign_item.hir_id, vis).generics().predicates().ty();
}
}
// Subitems of structs and unions have their own publicity.
hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
self.check(item.hir_id, item_visibility).generics().predicates();
for field in struct_def.fields() {
let field_visibility = ty::Visibility::from_hir(&field.vis, item.hir_id, tcx);
self.check(field.hir_id, min(item_visibility, field_visibility, tcx)).ty();
}
}
// An inherent impl is public when its type is public
// Subitems of inherent impls have their own publicity.
// A trait impl is public when both its type and its trait are public
// Subitems of trait impls have inherited publicity.
hir::ItemKind::Impl(.., ref trait_ref, _, impl_item_refs) => {
let impl_vis = ty::Visibility::of_impl(item.hir_id, tcx, &Default::default());
self.check(item.hir_id, impl_vis).generics().predicates();
for impl_item_ref in impl_item_refs {
let impl_item = tcx.hir().impl_item(impl_item_ref.id);
let impl_item_vis = if trait_ref.is_none() {
min(
ty::Visibility::from_hir(&impl_item.vis, item.hir_id, tcx),
impl_vis,
tcx,
)
} else {
impl_vis
};
self.check_assoc_item(
impl_item_ref.id.hir_id,
impl_item_ref.kind,
impl_item_ref.defaultness,
impl_item_vis,
);
}
}
}
}
}
pub fn provide(providers: &mut Providers<'_>) {
*providers = Providers {
privacy_access_levels,
check_private_in_public,
check_mod_privacy,
..*providers
};
}
fn check_mod_privacy(tcx: TyCtxt<'_>, module_def_id: DefId) {
let empty_tables = ty::TypeckTables::empty(None);
// Check privacy of names not checked in previous compilation stages.
let mut visitor = NamePrivacyVisitor {
tcx,
tables: &empty_tables,
current_item: hir::DUMMY_HIR_ID,
empty_tables: &empty_tables,
};
let (module, span, hir_id) = tcx.hir().get_module(module_def_id);
intravisit::walk_mod(&mut visitor, module, hir_id);
// Check privacy of explicitly written types and traits as well as
// inferred types of expressions and patterns.
let mut visitor = TypePrivacyVisitor {
tcx,
tables: &empty_tables,
current_item: module_def_id,
in_body: false,
span,
empty_tables: &empty_tables,
};
intravisit::walk_mod(&mut visitor, module, hir_id);
}
fn privacy_access_levels(tcx: TyCtxt<'_>, krate: CrateNum) -> &AccessLevels {
assert_eq!(krate, LOCAL_CRATE);
// Build up a set of all exported items in the AST. This is a set of all
// items which are reachable from external crates based on visibility.
let mut visitor = EmbargoVisitor {
tcx,
access_levels: Default::default(),
macro_reachable: Default::default(),
prev_level: Some(AccessLevel::Public),
changed: false,
};
loop {
intravisit::walk_crate(&mut visitor, tcx.hir().krate());
if visitor.changed {
visitor.changed = false;
} else {
break;
}
}
visitor.update(hir::CRATE_HIR_ID, Some(AccessLevel::Public));
tcx.arena.alloc(visitor.access_levels)
}
fn check_private_in_public(tcx: TyCtxt<'_>, krate: CrateNum) {
assert_eq!(krate, LOCAL_CRATE);
let access_levels = tcx.privacy_access_levels(LOCAL_CRATE);
let krate = tcx.hir().krate();
let mut visitor = ObsoleteVisiblePrivateTypesVisitor {
tcx,
access_levels: &access_levels,
in_variant: false,
old_error_set: Default::default(),
};
intravisit::walk_crate(&mut visitor, krate);
let has_pub_restricted = {
let mut pub_restricted_visitor = PubRestrictedVisitor { tcx, has_pub_restricted: false };
intravisit::walk_crate(&mut pub_restricted_visitor, krate);
pub_restricted_visitor.has_pub_restricted
};
// Check for private types and traits in public interfaces.
let mut visitor = PrivateItemsInPublicInterfacesVisitor {
tcx,
has_pub_restricted,
old_error_set: &visitor.old_error_set,
};
krate.visit_all_item_likes(&mut DeepVisitor::new(&mut visitor));
}
| 41.555291 | 114 | 0.517177 |
4aa28cc03d6c94ea404e8e34ca9e8c8ccbee341b
| 7,853 |
use crate::*;
// κ and ε parameters used in conversion between XYZ and La*b*. See
// http://www.brucelindbloom.com/LContinuity.html for explanation as to why
// those are different values than those provided by CIE standard.
const KAPPA: f32 = 24389.0 / 27.0;
const EPSILON: f32 = 216.0 / 24389.0;
const CBRT_EPSILON: f32 = 0.20689655172413796;
//
// CIE to CIE
//
impl From<CIELabColor> for CIEXYZColor {
fn from(f: CIELabColor) -> Self {
let fy = (f.l + 16.0) / 116.0;
let fx = (f.a / 500.0) + fy;
let fz = fy - (f.b / 200.0);
let xr = if fx > CBRT_EPSILON {
fx.powi(3)
} else {
((fx * 116.0) - 16.0) / KAPPA
};
let yr = if f.l > EPSILON * KAPPA {
fy.powi(3)
} else {
f.l / KAPPA
};
let zr = if fz > CBRT_EPSILON {
fz.powi(3)
} else {
((fz * 116.0) - 16.0) / KAPPA
};
CIEXYZColor{
x: xr * 0.95047,
y: yr,
z: zr * 1.08883
}
}
}
impl From<CIEXYZColor> for CIELabColor {
fn from(f: CIEXYZColor) -> Self {
let x = xyz_to_lab_map(f.x / 0.95047);
let y = xyz_to_lab_map(f.y);
let z = xyz_to_lab_map(f.z / 1.08883);
CIELabColor {
l: (116.0 * y) - 16.0,
a: 500.0 * (x - y),
b: 200.0 * (y - z),
}
}
}
#[inline]
fn xyz_to_lab_map(c: f32) -> f32 {
if c > EPSILON {
c.powf(1.0 / 3.0)
} else {
(KAPPA * c + 16.0) / 116.0
}
}
//
// Gray to CIE
//
impl From<DigitalGrayscaleColor> for CIELabColor {
fn from(f: DigitalGrayscaleColor) -> Self {
CIELabColor {
l: (f.v as f32) / 255.0 * 100.0,
a: 0.0,
b: 0.0
}
}
}
impl From<NormalizedGrayscaleColor> for CIELabColor {
fn from(f: NormalizedGrayscaleColor) -> Self {
CIELabColor {
l: f.v * 100.0,
a: 0.0,
b: 0.0
}
}
}
impl From<DigitalGrayscaleColor> for CIEXYZColor {
fn from(f: DigitalGrayscaleColor) -> Self {
CIEXYZColor::from(CIELabColor::from(f))
}
}
impl From<NormalizedGrayscaleColor> for CIEXYZColor {
fn from(f: NormalizedGrayscaleColor) -> Self {
CIEXYZColor::from(CIELabColor::from(f))
}
}
//
// RGB to CIE
//
/// using [sRGB conversion matrix](http://www.brucelindbloom.com/index.html?Calc.html)
impl From<DigitalRGBColor> for CIEXYZColor {
fn from(f: DigitalRGBColor) -> Self {
let r = rgb_to_xyz_map(f.r);
let g = rgb_to_xyz_map(f.g);
let b = rgb_to_xyz_map(f.b);
CIEXYZColor{
x: r * 0.4124564390896921 + g * 0.357576077643909 + b * 0.18043748326639894,
y: r * 0.21267285140562248 + g * 0.715152155287818 + b * 0.07217499330655958,
z: r * 0.019333895582329317 + g * 0.119192025881303 + b * 0.9503040785363677
}
}
}
/// using [sRGB conversion matrix](http://www.brucelindbloom.com/index.html?Calc.html)
impl From<NormalizedRGBColor> for CIEXYZColor {
fn from(f: NormalizedRGBColor) -> Self {
CIEXYZColor::from(DigitalRGBColor::from(f))
}
}
/// using [sRGB conversion matrix](http://www.brucelindbloom.com/index.html?Calc.html)
impl From<DigitalRGBAColor> for CIEXYZColor {
fn from(f: DigitalRGBAColor) -> Self {
CIEXYZColor::from(DigitalRGBColor::from(f))
}
}
/// using [sRGB conversion matrix](http://www.brucelindbloom.com/index.html?Calc.html)
impl From<NormalizedRGBAColor> for CIEXYZColor {
fn from(f: NormalizedRGBAColor) -> Self {
CIEXYZColor::from(DigitalRGBColor::from(f))
}
}
/// using [sRGB conversion matrix](http://www.brucelindbloom.com/index.html?Calc.html)
impl From<DigitalRGBColor> for CIELabColor {
fn from(f: DigitalRGBColor) -> Self {
CIELabColor::from(CIEXYZColor::from(f))
}
}
/// using [sRGB conversion matrix](http://www.brucelindbloom.com/index.html?Calc.html)
impl From<NormalizedRGBColor> for CIELabColor {
fn from(f: NormalizedRGBColor) -> Self {
CIELabColor::from(CIEXYZColor::from(f))
}
}
/// using [sRGB conversion matrix](http://www.brucelindbloom.com/index.html?Calc.html)
impl From<DigitalRGBAColor> for CIELabColor {
fn from(f: DigitalRGBAColor) -> Self {
CIELabColor::from(CIEXYZColor::from(f))
}
}
/// using [sRGB conversion matrix](http://www.brucelindbloom.com/index.html?Calc.html)
impl From<NormalizedRGBAColor> for CIELabColor {
fn from(f: NormalizedRGBAColor) -> Self {
CIELabColor::from(CIEXYZColor::from(f))
}
}
#[inline]
fn rgb_to_xyz_map(c: u8) -> f32 {
if c > 10 {
const A: f32 = 0.055 * 255.0;
const D: f32 = 1.055 * 255.0;
((c as f32 + A) / D).powf(2.4)
} else {
const D: f32 = 12.92 * 255.0;
c as f32 / D
}
}
//
// YCbCr to CIE
//
/// using [JFIF/JPEG conversion](https://www.w3.org/Graphics/JPEG/jfif3.pdf) for conversion to RGB
/// and [sRGB conversion matrix](http://www.brucelindbloom.com/index.html?Calc.html) for conversion from CIE
impl From<NormalizedYCbCrColor> for CIEXYZColor {
fn from(f: NormalizedYCbCrColor) -> Self {
CIEXYZColor::from(DigitalRGBColor::from(f))
}
}
/// using [JFIF/JPEG conversion](https://www.w3.org/Graphics/JPEG/jfif3.pdf) for conversion to RGB
/// and [sRGB conversion matrix](http://www.brucelindbloom.com/index.html?Calc.html) for conversion from CIE
impl From<DigitalYCbCrColor> for CIEXYZColor {
fn from(f: DigitalYCbCrColor) -> Self {
CIEXYZColor::from(DigitalRGBColor::from(f))
}
}
/// using [JFIF/JPEG conversion](https://www.w3.org/Graphics/JPEG/jfif3.pdf) for conversion to RGB
/// and [sRGB conversion matrix](http://www.brucelindbloom.com/index.html?Calc.html) for conversion from CIE
impl From<NormalizedYCbCrColor> for CIELabColor {
fn from(f: NormalizedYCbCrColor) -> Self {
CIELabColor::from(CIEXYZColor::from(f))
}
}
/// using [JFIF/JPEG conversion](https://www.w3.org/Graphics/JPEG/jfif3.pdf) for conversion to RGB
/// and [sRGB conversion matrix](http://www.brucelindbloom.com/index.html?Calc.html) for conversion from CIE
impl From<DigitalYCbCrColor> for CIELabColor {
fn from(f: DigitalYCbCrColor) -> Self {
CIELabColor::from(CIEXYZColor::from(f))
}
}
//
// Color conversion traits
//
impl YCbCrConvertible for CIELabColor {
fn convert_vec_ycbcr(items: Vec<Self>) -> Vec<DigitalYCbCrColor> {
items.into_iter().map(|x| DigitalYCbCrColor::from(x)).collect()
}
}
impl YCbCrConvertible for CIEXYZColor {
fn convert_vec_ycbcr(items: Vec<Self>) -> Vec<DigitalYCbCrColor> {
items.into_iter().map(|x| DigitalYCbCrColor::from(x)).collect()
}
}
impl RGBConvertible for CIELabColor {
fn convert_vec_rgb(items: Vec<Self>) -> Vec<DigitalRGBColor> {
items.into_iter().map(|x| DigitalRGBColor::from(x)).collect()
}
fn convert_iter_rgb(items: Box<dyn Iterator<Item = Self>>) -> Box<dyn Iterator<Item = DigitalRGBColor>> {
Box::new(items.map(|x| DigitalRGBColor::from(x)))
}
}
impl RGBConvertible for CIEXYZColor {
fn convert_vec_rgb(items: Vec<Self>) -> Vec<DigitalRGBColor> {
items.into_iter().map(|x| DigitalRGBColor::from(x)).collect()
}
fn convert_iter_rgb(items: Box<dyn Iterator<Item = Self>>) -> Box<dyn Iterator<Item = DigitalRGBColor>> {
Box::new(items.map(|x| DigitalRGBColor::from(x)))
}
}
impl RGBAConvertible for CIELabColor {
fn convert_vec_rgba(items: Vec<Self>) -> Vec<DigitalRGBAColor> {
items.into_iter().map(|x| DigitalRGBAColor::from(x)).collect()
}
}
impl RGBAConvertible for CIEXYZColor {
fn convert_vec_rgba(items: Vec<Self>) -> Vec<DigitalRGBAColor> {
items.into_iter().map(|x| DigitalRGBAColor::from(x)).collect()
}
}
| 29.633962 | 110 | 0.624602 |
9ce60217c7237e05f5b722bfd04e5622968379d5
| 31,148 |
use std::error::Error;
use std::fmt;
use std::io;
use std::io::Read;
use std::mem;
use std::ops::{Deref, DerefMut};
use buffer::{ImageBuffer, Pixel};
use color;
use color::ColorType;
use animation::Frames;
#[cfg(feature = "pnm")]
use pnm::PNMSubtype;
/// An enumeration of Image errors
#[derive(Debug)]
pub enum ImageError {
/// The Image is not formatted properly
FormatError(String),
/// The Image's dimensions are either too small or too large
DimensionError,
/// The Decoder does not support this image format
UnsupportedError(String),
/// The Decoder does not support this color type
UnsupportedColor(ColorType),
/// Not enough data was provided to the Decoder
/// to decode the image
NotEnoughData,
/// An I/O Error occurred while decoding the image
IoError(io::Error),
/// The end of the image has been reached
ImageEnd,
/// There is not enough memory to complete the given operation
InsufficientMemory,
}
impl fmt::Display for ImageError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
ImageError::FormatError(ref e) => write!(fmt, "Format error: {}", e),
ImageError::DimensionError => write!(
fmt,
"The Image's dimensions are either too \
small or too large"
),
ImageError::UnsupportedError(ref f) => write!(
fmt,
"The Decoder does not support the \
image format `{}`",
f
),
ImageError::UnsupportedColor(ref c) => write!(
fmt,
"The decoder does not support \
the color type `{:?}`",
c
),
ImageError::NotEnoughData => write!(
fmt,
"Not enough data was provided to the \
Decoder to decode the image"
),
ImageError::IoError(ref e) => e.fmt(fmt),
ImageError::ImageEnd => write!(fmt, "The end of the image has been reached"),
ImageError::InsufficientMemory => write!(fmt, "Insufficient memory"),
}
}
}
impl Error for ImageError {
fn description(&self) -> &str {
match *self {
ImageError::FormatError(..) => "Format error",
ImageError::DimensionError => "Dimension error",
ImageError::UnsupportedError(..) => "Unsupported error",
ImageError::UnsupportedColor(..) => "Unsupported color",
ImageError::NotEnoughData => "Not enough data",
ImageError::IoError(..) => "IO error",
ImageError::ImageEnd => "Image end",
ImageError::InsufficientMemory => "Insufficient memory",
}
}
fn cause(&self) -> Option<&Error> {
match *self {
ImageError::IoError(ref e) => Some(e),
_ => None,
}
}
}
impl From<io::Error> for ImageError {
fn from(err: io::Error) -> ImageError {
ImageError::IoError(err)
}
}
/// Result of an image decoding/encoding process
pub type ImageResult<T> = Result<T, ImageError>;
/// An enumeration of supported image formats.
/// Not all formats support both encoding and decoding.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum ImageFormat {
/// An Image in PNG Format
PNG,
/// An Image in JPEG Format
JPEG,
/// An Image in GIF Format
GIF,
/// An Image in WEBP Format
WEBP,
/// An Image in general PNM Format
PNM,
/// An Image in TIFF Format
TIFF,
/// An Image in TGA Format
TGA,
/// An Image in BMP Format
BMP,
/// An Image in ICO Format
ICO,
/// An Image in Radiance HDR Format
HDR,
}
/// An enumeration of supported image formats for encoding.
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum ImageOutputFormat {
#[cfg(feature = "png_codec")]
/// An Image in PNG Format
PNG,
#[cfg(feature = "jpeg")]
/// An Image in JPEG Format with specified quality
JPEG(u8),
#[cfg(feature = "pnm")]
/// An Image in one of the PNM Formats
PNM(PNMSubtype),
#[cfg(feature = "gif_codec")]
/// An Image in GIF Format
GIF,
#[cfg(feature = "ico")]
/// An Image in ICO Format
ICO,
#[cfg(feature = "bmp")]
/// An Image in BMP Format
BMP,
/// A value for signalling an error: An unsupported format was requested
// Note: When TryFrom is stabilized, this value should not be needed, and
// a TryInto<ImageOutputFormat> should be used instead of an Into<ImageOutputFormat>.
Unsupported(String),
}
impl From<ImageFormat> for ImageOutputFormat {
fn from(fmt: ImageFormat) -> Self {
match fmt {
#[cfg(feature = "png_codec")]
ImageFormat::PNG => ImageOutputFormat::PNG,
#[cfg(feature = "jpeg")]
ImageFormat::JPEG => ImageOutputFormat::JPEG(75),
#[cfg(feature = "pnm")]
ImageFormat::PNM => ImageOutputFormat::PNM(PNMSubtype::ArbitraryMap),
#[cfg(feature = "gif_codec")]
ImageFormat::GIF => ImageOutputFormat::GIF,
#[cfg(feature = "ico")]
ImageFormat::ICO => ImageOutputFormat::ICO,
#[cfg(feature = "bmp")]
ImageFormat::BMP => ImageOutputFormat::BMP,
f => ImageOutputFormat::Unsupported(format!(
"Image format {:?} not supported for encoding.",
f
)),
}
}
}
// This struct manages buffering associated with implementing `Read` and `Seek` on decoders that can
// must decode ranges of bytes at a time.
pub(crate) struct ImageReadBuffer {
scanline_bytes: usize,
buffer: Vec<u8>,
consumed: usize,
total_bytes: usize,
offset: usize,
}
impl ImageReadBuffer {
pub fn new(scanline_bytes: usize, total_bytes: usize) -> Self {
Self {
scanline_bytes,
buffer: Vec::new(),
consumed: 0,
total_bytes,
offset: 0,
}
}
pub fn read<F>(&mut self, buf: &mut [u8], mut read_scanline: F) -> io::Result<usize>
where
F: FnMut(&mut [u8]) -> io::Result<usize>,
{
if self.buffer.len() == self.consumed {
if self.offset == self.total_bytes {
return Ok(0);
} else if buf.len() >= self.scanline_bytes {
// If there is nothing buffered and the user requested a full scanline worth of
// data, skip buffering.
let bytes_read = read_scanline(&mut buf[..self.scanline_bytes])?;
self.offset += bytes_read;
return Ok(bytes_read);
} else {
// Lazily allocate buffer the first time that read is called with a buffer smaller
// than the scanline size.
if self.buffer.is_empty() {
self.buffer.resize(self.scanline_bytes, 0);
}
self.consumed = 0;
let bytes_read = read_scanline(&mut self.buffer[..])?;
self.buffer.resize(bytes_read, 0);
self.offset += bytes_read;
assert!(bytes_read == self.scanline_bytes || self.offset == self.total_bytes);
}
}
// Finally, copy bytes into output buffer.
let bytes_buffered = self.buffer.len() - self.consumed;
if bytes_buffered > buf.len() {
::copy_memory(&self.buffer[self.consumed..][..buf.len()], &mut buf[..]);
self.consumed += buf.len();
Ok(buf.len())
} else {
::copy_memory(&self.buffer[self.consumed..], &mut buf[..bytes_buffered]);
self.consumed = self.buffer.len();
Ok(bytes_buffered)
}
}
}
/// Decodes a specific region of the image, represented by the rectangle
/// starting from ```x``` and ```y``` and having ```length``` and ```width```
pub(crate) fn load_rect<D, F, F1, F2>(x: u64, y: u64, width: u64, height: u64, buf: &mut [u8],
progress_callback: F,
decoder: &mut D,
mut seek_scanline: F1,
mut read_scanline: F2) -> ImageResult<()>
where D: ImageDecoder,
F: Fn(Progress),
F1: FnMut(&mut D, u64) -> io::Result<()>,
F2: FnMut(&mut D, &mut [u8]) -> io::Result<usize>
{
let dimensions = decoder.dimensions();
let row_bytes = decoder.row_bytes();
let scanline_bytes = decoder.scanline_bytes();
let bits_per_pixel = color::bits_per_pixel(decoder.colortype()) as u64;
let total_bits = width * height * bits_per_pixel;
let mut bits_read = 0u64;
let mut current_scanline = 0;
let mut tmp = Vec::new();
{
// Read a range of the image starting from bit number `start` and continuing until bit
// number `end`. Updates `current_scanline` and `bits_read` appropiately.
let mut read_image_range = |start: u64, end: u64| -> ImageResult<()> {
let target_scanline = start / (scanline_bytes * 8);
if target_scanline != current_scanline {
seek_scanline(decoder, target_scanline)?;
current_scanline = target_scanline;
}
let mut position = current_scanline * scanline_bytes * 8;
while position < end {
if position >= start && end - position >= scanline_bytes * 8 && bits_read % 8 == 0 {
read_scanline(decoder, &mut buf[((bits_read/8) as usize)..]
[..(scanline_bytes as usize)])?;
bits_read += scanline_bytes * 8;
} else {
tmp.resize(scanline_bytes as usize, 0u8);
read_scanline(decoder, &mut tmp)?;
let offset = start.saturating_sub(position);
let len = (end - start)
.min(scanline_bytes * 8 - offset)
.min(end - position);
if bits_read % 8 == 0 && offset % 8 == 0 && len % 8 == 0 {
let o = (offset / 8) as usize;
let l = (len / 8) as usize;
buf[((bits_read/8) as usize)..][..l].copy_from_slice(&tmp[o..][..l]);
bits_read += len;
} else {
unimplemented!("Target rectangle not aligned on byte boundaries")
}
}
current_scanline += 1;
position += scanline_bytes * 8;
progress_callback(Progress {current: bits_read, total: total_bits});
}
Ok(())
};
if x + width > dimensions.0 || y + height > dimensions.0
|| width == 0 || height == 0 {
return Err(ImageError::DimensionError);
}
if scanline_bytes > usize::max_value() as u64 {
return Err(ImageError::InsufficientMemory);
}
progress_callback(Progress {current: 0, total: total_bits});
if x == 0 && width == dimensions.0 {
let start = x * bits_per_pixel + y * row_bytes * 8;
let end = (x + width) * bits_per_pixel + (y + height - 1) * row_bytes * 8;
read_image_range(start, end)?;
} else {
for row in y..(y+height) {
let start = x * bits_per_pixel + row * row_bytes * 8;
let end = (x + width) * bits_per_pixel + row * row_bytes * 8;
read_image_range(start, end)?;
}
}
}
// Seek back to the start
Ok(seek_scanline(decoder, 0)?)
}
/// Represents the progress of an image operation.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Progress {
current: u64,
total: u64,
}
/// The trait that all decoders implement
pub trait ImageDecoder: Sized {
/// The type of reader produced by `into_reader`.
type Reader: Read;
/// Returns a tuple containing the width and height of the image
fn dimensions(&self) -> (u64, u64);
/// Returns the color type of the image e.g. RGB(8) (8bit RGB)
fn colortype(&self) -> ColorType;
/// Returns a reader that can be used to obtain the bytes of the image. For the best
/// performance, always try to read at least `scanline_bytes` from the reader at a time. Reading
/// fewer bytes will cause the reader to perform internal buffering.
fn into_reader(self) -> ImageResult<Self::Reader>;
/// Returns the number of bytes in a single row of the image. All decoders will pad image rows
/// to a byte boundary.
fn row_bytes(&self) -> u64 {
(self.dimensions().0 * color::bits_per_pixel(self.colortype()) as u64 + 7) / 8
}
/// Returns the total number of bytes in the image.
fn total_bytes(&self) -> u64 {
self.dimensions().1 * self.row_bytes()
}
/// Returns the minimum number of bytes that can be efficiently read from this decoder. This may
/// be as few as 1 or as many as `total_bytes()`.
fn scanline_bytes(&self) -> u64 {
self.total_bytes()
}
/// Returns all the bytes in the image.
fn read_image(self) -> ImageResult<Vec<u8>> {
self.read_image_with_progress(|_| {})
}
/// Same as `read_image` but periodically calls the provided callback to give updates on loading
/// progress.
fn read_image_with_progress<F: Fn(Progress)>(
self,
progress_callback: F,
) -> ImageResult<Vec<u8>> {
let total_bytes = self.total_bytes();
if total_bytes > usize::max_value() as u64 {
return Err(ImageError::InsufficientMemory);
}
let total_bytes = total_bytes as usize;
let scanline_bytes = self.scanline_bytes() as usize;
let target_read_size = if scanline_bytes < 4096 {
(4096 / scanline_bytes) * scanline_bytes
} else {
scanline_bytes
};
let mut reader = self.into_reader()?;
let mut bytes_read = 0;
let mut contents = vec![0; total_bytes];
while bytes_read < total_bytes {
let read_size = target_read_size.min(total_bytes - bytes_read);
reader.read_exact(&mut contents[bytes_read..][..read_size])?;
bytes_read += read_size;
progress_callback(Progress {
current: bytes_read as u64,
total: total_bytes as u64,
});
}
Ok(contents)
}
}
/// ImageDecoderExt trait
pub trait ImageDecoderExt: ImageDecoder + Sized {
/// Read a rectangular section of the image.
fn read_rect(
&mut self,
x: u64,
y: u64,
width: u64,
height: u64,
buf: &mut [u8],
) -> ImageResult<()> {
self.read_rect_with_progress(x, y, width, height, buf, |_|{})
}
/// Read a rectangular section of the image, periodically reporting progress.
fn read_rect_with_progress<F: Fn(Progress)>(
&mut self,
x: u64,
y: u64,
width: u64,
height: u64,
buf: &mut [u8],
progress_callback: F,
) -> ImageResult<()>;
}
/// AnimationDecoder trait
pub trait AnimationDecoder {
/// Consume the decoder producing a series of frames.
fn into_frames(self) -> ImageResult<Frames>;
}
/// Immutable pixel iterator
pub struct Pixels<'a, I: ?Sized + 'a> {
image: &'a I,
x: u32,
y: u32,
width: u32,
height: u32,
}
impl<'a, I: GenericImageView> Iterator for Pixels<'a, I> {
type Item = (u32, u32, I::Pixel);
fn next(&mut self) -> Option<(u32, u32, I::Pixel)> {
if self.x >= self.width {
self.x = 0;
self.y += 1;
}
if self.y >= self.height {
None
} else {
let pixel = self.image.get_pixel(self.x, self.y);
let p = (self.x, self.y, pixel);
self.x += 1;
Some(p)
}
}
}
/// Mutable pixel iterator
///
/// DEPRECATED: It is currently not possible to create a safe iterator for this in Rust. You have to use an iterator over the image buffer instead.
pub struct MutPixels<'a, I: ?Sized + 'a> {
image: &'a mut I,
x: u32,
y: u32,
width: u32,
height: u32,
}
impl<'a, I: GenericImage + 'a> Iterator for MutPixels<'a, I>
where
I::Pixel: 'a,
<I::Pixel as Pixel>::Subpixel: 'a,
{
type Item = (u32, u32, &'a mut I::Pixel);
fn next(&mut self) -> Option<(u32, u32, &'a mut I::Pixel)> {
if self.x >= self.width {
self.x = 0;
self.y += 1;
}
if self.y >= self.height {
None
} else {
let tmp = self.image.get_pixel_mut(self.x, self.y);
// NOTE: This is potentially dangerous. It would require the signature fn next(&'a mut self) to be safe.
// error: lifetime of `self` is too short to guarantee its contents can be safely reborrowed...
let ptr = unsafe { mem::transmute(tmp) };
let p = (self.x, self.y, ptr);
self.x += 1;
Some(p)
}
}
}
/// Trait to inspect an image.
pub trait GenericImageView {
/// The type of pixel.
type Pixel: Pixel;
/// Underlying image type. This is mainly used by SubImages in order to
/// always have a reference to the original image. This allows for less
/// indirections and it eases the use of nested SubImages.
type InnerImageView: GenericImageView<Pixel = Self::Pixel>;
/// The width and height of this image.
fn dimensions(&self) -> (u32, u32);
/// The width of this image.
fn width(&self) -> u32 {
let (w, _) = self.dimensions();
w
}
/// The height of this image.
fn height(&self) -> u32 {
let (_, h) = self.dimensions();
h
}
/// The bounding rectangle of this image.
fn bounds(&self) -> (u32, u32, u32, u32);
/// Returns true if this x, y coordinate is contained inside the image.
fn in_bounds(&self, x: u32, y: u32) -> bool {
let (ix, iy, iw, ih) = self.bounds();
x >= ix && x < ix + iw && y >= iy && y < iy + ih
}
/// Returns the pixel located at (x, y)
///
/// # Panics
///
/// Panics if `(x, y)` is out of bounds.
///
/// TODO: change this signature to &P
fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel;
/// Returns the pixel located at (x, y)
///
/// This function can be implemented in a way that ignores bounds checking.
unsafe fn unsafe_get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
self.get_pixel(x, y)
}
/// Returns an Iterator over the pixels of this image.
/// The iterator yields the coordinates of each pixel
/// along with their value
fn pixels(&self) -> Pixels<Self> {
let (width, height) = self.dimensions();
Pixels {
image: self,
x: 0,
y: 0,
width,
height,
}
}
/// Returns a reference to the underlying image.
fn inner(&self) -> &Self::InnerImageView;
/// Returns an subimage that is an immutable view into this image.
fn view(&self, x: u32, y: u32, width: u32, height: u32) -> SubImage<&Self::InnerImageView> {
SubImage::new(self.inner(), x, y, width, height)
}
}
/// A trait for manipulating images.
pub trait GenericImage: GenericImageView {
/// Underlying image type. This is mainly used by SubImages in order to
/// always have a reference to the original image. This allows for less
/// indirections and it eases the use of nested SubImages.
type InnerImage: GenericImage<Pixel = Self::Pixel>;
/// Gets a reference to the mutable pixel at location `(x, y)`
///
/// # Panics
///
/// Panics if `(x, y)` is out of bounds.
fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut Self::Pixel;
/// Put a pixel at location (x, y)
///
/// # Panics
///
/// Panics if `(x, y)` is out of bounds.
fn put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel);
/// Puts a pixel at location (x, y)
///
/// This function can be implemented in a way that ignores bounds checking.
unsafe fn unsafe_put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
self.put_pixel(x, y, pixel);
}
/// Put a pixel at location (x, y), taking into account alpha channels
///
/// DEPRECATED: This method will be removed. Blend the pixel directly instead.
fn blend_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel);
/// Returns an Iterator over mutable pixels of this image.
/// The iterator yields the coordinates of each pixel
/// along with a mutable reference to them.
#[deprecated(
note = "This cannot be implemented safely in Rust. Please use the image buffer directly."
)]
fn pixels_mut(&mut self) -> MutPixels<Self> {
let (width, height) = self.dimensions();
MutPixels {
image: self,
x: 0,
y: 0,
width,
height,
}
}
/// Copies all of the pixels from another image into this image.
///
/// The other image is copied with the top-left corner of the
/// other image placed at (x, y).
///
/// In order to copy only a piece of the other image, use `sub_image`.
///
/// # Returns
/// `true` if the copy was successful, `false` if the image could not
/// be copied due to size constraints.
fn copy_from<O>(&mut self, other: &O, x: u32, y: u32) -> bool
where
O: GenericImageView<Pixel = Self::Pixel>,
{
// Do bounds checking here so we can use the non-bounds-checking
// functions to copy pixels.
if self.width() < other.width() + x || self.height() < other.height() + y {
return false;
}
for i in 0..other.width() {
for k in 0..other.height() {
unsafe {
let p = other.unsafe_get_pixel(i, k);
self.unsafe_put_pixel(i + x, k + y, p);
}
}
}
true
}
/// Returns a mutable reference to the underlying image.
fn inner_mut(&mut self) -> &mut Self::InnerImage;
/// Returns a subimage that is a view into this image.
fn sub_image(
&mut self,
x: u32,
y: u32,
width: u32,
height: u32,
) -> SubImage<&mut Self::InnerImage> {
SubImage::new(self.inner_mut(), x, y, width, height)
}
}
/// A View into another image
pub struct SubImage<I> {
image: I,
xoffset: u32,
yoffset: u32,
xstride: u32,
ystride: u32,
}
/// Alias to access Pixel behind a reference
type DerefPixel<I> = <<I as Deref>::Target as GenericImageView>::Pixel;
/// Alias to access Subpixel behind a reference
type DerefSubpixel<I> = <DerefPixel<I> as Pixel>::Subpixel;
impl<I> SubImage<I> {
/// Construct a new subimage
pub fn new(image: I, x: u32, y: u32, width: u32, height: u32) -> SubImage<I> {
SubImage {
image,
xoffset: x,
yoffset: y,
xstride: width,
ystride: height,
}
}
/// Change the coordinates of this subimage.
pub fn change_bounds(&mut self, x: u32, y: u32, width: u32, height: u32) {
self.xoffset = x;
self.yoffset = y;
self.xstride = width;
self.ystride = height;
}
/// Convert this subimage to an ImageBuffer
pub fn to_image(&self) -> ImageBuffer<DerefPixel<I>, Vec<DerefSubpixel<I>>>
where
I: Deref,
I::Target: GenericImage + 'static,
{
let mut out = ImageBuffer::new(self.xstride, self.ystride);
let borrowed = self.image.deref();
for y in 0..self.ystride {
for x in 0..self.xstride {
let p = borrowed.get_pixel(x + self.xoffset, y + self.yoffset);
out.put_pixel(x, y, p);
}
}
out
}
}
#[allow(deprecated)]
impl<I> GenericImageView for SubImage<I>
where
I: Deref,
I::Target: GenericImageView + Sized,
{
type Pixel = DerefPixel<I>;
type InnerImageView = I::Target;
fn dimensions(&self) -> (u32, u32) {
(self.xstride, self.ystride)
}
fn bounds(&self) -> (u32, u32, u32, u32) {
(self.xoffset, self.yoffset, self.xstride, self.ystride)
}
fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
self.image.get_pixel(x + self.xoffset, y + self.yoffset)
}
fn view(&self, x: u32, y: u32, width: u32, height: u32) -> SubImage<&Self::InnerImageView> {
let x = self.xoffset + x;
let y = self.yoffset + y;
SubImage::new(self.inner(), x, y, width, height)
}
fn inner(&self) -> &Self::InnerImageView {
&self.image
}
}
#[allow(deprecated)]
impl<I> GenericImage for SubImage<I>
where
I: DerefMut,
I::Target: GenericImage + Sized,
{
type InnerImage = I::Target;
fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut Self::Pixel {
self.image.get_pixel_mut(x + self.xoffset, y + self.yoffset)
}
fn put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
self.image
.put_pixel(x + self.xoffset, y + self.yoffset, pixel)
}
/// DEPRECATED: This method will be removed. Blend the pixel directly instead.
fn blend_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
self.image
.blend_pixel(x + self.xoffset, y + self.yoffset, pixel)
}
fn sub_image(
&mut self,
x: u32,
y: u32,
width: u32,
height: u32,
) -> SubImage<&mut Self::InnerImage> {
let x = self.xoffset + x;
let y = self.yoffset + y;
SubImage::new(self.inner_mut(), x, y, width, height)
}
fn inner_mut(&mut self) -> &mut Self::InnerImage {
&mut self.image
}
}
#[cfg(test)]
mod tests {
use super::{GenericImage, GenericImageView};
use buffer::ImageBuffer;
use color::Rgba;
#[test]
/// Test that alpha blending works as expected
fn test_image_alpha_blending() {
let mut target = ImageBuffer::new(1, 1);
target.put_pixel(0, 0, Rgba([255u8, 0, 0, 255]));
assert!(*target.get_pixel(0, 0) == Rgba([255, 0, 0, 255]));
target.blend_pixel(0, 0, Rgba([0, 255, 0, 255]));
assert!(*target.get_pixel(0, 0) == Rgba([0, 255, 0, 255]));
// Blending an alpha channel onto a solid background
target.blend_pixel(0, 0, Rgba([255, 0, 0, 127]));
assert!(*target.get_pixel(0, 0) == Rgba([127, 127, 0, 255]));
// Blending two alpha channels
target.put_pixel(0, 0, Rgba([0, 255, 0, 127]));
target.blend_pixel(0, 0, Rgba([255, 0, 0, 127]));
assert!(*target.get_pixel(0, 0) == Rgba([169, 85, 0, 190]));
}
#[test]
fn test_in_bounds() {
let mut target = ImageBuffer::new(2, 2);
target.put_pixel(0, 0, Rgba([255u8, 0, 0, 255]));
assert!(target.in_bounds(0, 0));
assert!(target.in_bounds(1, 0));
assert!(target.in_bounds(0, 1));
assert!(target.in_bounds(1, 1));
assert!(!target.in_bounds(2, 0));
assert!(!target.in_bounds(0, 2));
assert!(!target.in_bounds(2, 2));
}
#[test]
fn test_can_subimage_clone_nonmut() {
let mut source = ImageBuffer::new(3, 3);
source.put_pixel(1, 1, Rgba([255u8, 0, 0, 255]));
// A non-mutable copy of the source image
let source = source.clone();
// Clone a view into non-mutable to a separate buffer
let cloned = source.view(1, 1, 1, 1).to_image();
assert!(cloned.get_pixel(0, 0) == source.get_pixel(1, 1));
}
#[test]
fn test_can_nest_views() {
let mut source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
{
let mut sub1 = source.sub_image(0, 0, 2, 2);
let mut sub2 = sub1.sub_image(1, 1, 1, 1);
sub2.put_pixel(0, 0, Rgba([0, 0, 0, 0]));
}
assert_eq!(*source.get_pixel(1, 1), Rgba([0, 0, 0, 0]));
let view1 = source.view(0, 0, 2, 2);
assert_eq!(*source.get_pixel(1, 1), view1.get_pixel(1, 1));
let view2 = view1.view(1, 1, 1, 1);
assert_eq!(*source.get_pixel(1, 1), view2.get_pixel(0, 0));
}
#[test]
fn test_load_rect() {
use super::*;
struct MockDecoder {scanline_number: u64, scanline_bytes: u64}
impl ImageDecoder for MockDecoder {
type Reader = Box<::std::io::Read>;
fn dimensions(&self) -> (u64, u64) {(5, 5)}
fn colortype(&self) -> ColorType { ColorType::Gray(8) }
fn into_reader(self) -> ImageResult<Self::Reader> {unimplemented!()}
fn scanline_bytes(&self) -> u64 { self.scanline_bytes }
}
const DATA: [u8; 25] = [0, 1, 2, 3, 4,
5, 6, 7, 8, 9,
10, 11, 12, 13, 14,
15, 16, 17, 18, 19,
20, 21, 22, 23, 24];
fn seek_scanline(m: &mut MockDecoder, n: u64) -> io::Result<()> {
m.scanline_number = n;
Ok(())
}
fn read_scanline(m: &mut MockDecoder, buf: &mut [u8]) -> io::Result<usize> {
let bytes_read = m.scanline_number * m.scanline_bytes;
if bytes_read >= 25 { return Ok(0); }
let len = m.scanline_bytes.min(25 - bytes_read);
buf[..(len as usize)].copy_from_slice(&DATA[(bytes_read as usize)..][..(len as usize)]);
m.scanline_number += 1;
Ok(len as usize)
}
for scanline_bytes in 1..30 {
let mut output = [0u8; 26];
load_rect(0, 0, 5, 5, &mut output, |_|{},
&mut MockDecoder{scanline_number:0, scanline_bytes},
seek_scanline, read_scanline).unwrap();
assert_eq!(output[0..25], DATA);
assert_eq!(output[25], 0);
output = [0u8; 26];
load_rect(3, 2, 1, 1, &mut output, |_|{},
&mut MockDecoder{scanline_number:0, scanline_bytes},
seek_scanline, read_scanline).unwrap();
assert_eq!(output[0..2], [13, 0]);
output = [0u8; 26];
load_rect(3, 2, 2, 2, &mut output, |_|{},
&mut MockDecoder{scanline_number:0, scanline_bytes},
seek_scanline, read_scanline).unwrap();
assert_eq!(output[0..5], [13, 14, 18, 19, 0]);
output = [0u8; 26];
load_rect(1, 1, 2, 4, &mut output, |_|{},
&mut MockDecoder{scanline_number:0, scanline_bytes},
seek_scanline, read_scanline).unwrap();
assert_eq!(output[0..9], [6, 7, 11, 12, 16, 17, 21, 22, 0]);
}
}
}
| 31.848671 | 147 | 0.550758 |
18171e7567ae9e78bf6b8b266a3c575669fdd65b
| 2,749 |
use super::{super::state::*, state::*};
use std::rc::Rc;
use utils::math::BoundsF64;
use web_sys::HtmlElement;
impl CardDrag {
pub fn on_release(&self) {
if let Some(current) = self.game.get_current() {
let choice = current.top.iter().find(|choice| choice.is_drag_over());
let mut found_match = false;
if let Some(choice) = choice {
if choice.pair_id == self.pair_id {
found_match = true;
choice.phase.set(TopPhase::Landed);
} else {
//specific wrong answer
}
} else {
//empty area
}
if !found_match {
if let Some(target) = current
.bottom
.iter()
.find(|choice| choice.pair_id == self.pair_id)
{
target.phase.set(BottomPhase::Show);
}
} else if current.top.iter().all(|choice| choice.is_landed()) {
Game::next(self.game.clone());
}
current.drag.set(None);
}
}
// picks the first hit in order of the Vec, not graphical majority
pub fn evaluate_drag_over(&self) {
match (self.game.get_current(), self.get_bounds()) {
(Some(current), Some(src_bounds)) => {
let mut found_drag_over = false;
for choice in current.top.iter() {
if found_drag_over {
choice.set_drag_over(false);
} else {
let is_drag_over = {
if let Some(elem) = choice.elem.borrow().as_ref() {
let target_bounds: BoundsF64 =
elem.get_bounding_client_rect().into();
if src_bounds.intersects(target_bounds) {
found_drag_over = true;
true
} else {
false
}
} else {
false
}
};
choice.set_drag_over(is_drag_over);
}
}
}
_ => {}
}
}
}
pub fn start_drag(state: Rc<CardBottom>, elem: HtmlElement, x: i32, y: i32) {
state.phase.set(BottomPhase::Remove);
if let Some(current) = state.game.get_current() {
current
.drag
.set(Some(Rc::new(CardDrag::new((*state).clone(), elem, x, y))));
}
}
| 33.52439 | 81 | 0.419789 |
26b49bb973eb55632325ea0649df9335e8d091c7
| 5,759 |
// Copyright 2017 The UNIC Project Developers.
//
// See the COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![warn(
bad_style,
missing_debug_implementations,
missing_docs,
unconditional_recursion
)]
#![forbid(unsafe_code)]
//! # UNIC: Unicode and Internationalization Crates for Rust
//!
//! The `unic` super-crate (this) is a collection of all UNIC components, providing
//! an easy way of access to all functionalities, when all or many are needed,
//! instead of importing components one-by-one, and ensuring all components
//! imported are compatible in algorithms and consistent data-wise.
//!
//! ## Major Components
//!
//! - [`char`](/unic-char): Unicode Character utilities.
//!
//! - [`ucd`](/unic-ucd): Unicode Character Database. (UAX\#44).
//!
//! - [`bidi`](/unic-bidi): Unicode Bidirectional Algorithm (UAX\#9).
//!
//! - [`normal`](/unic-normal): Unicode Normalization Forms (UAX\#15).
//!
//! - [`segment`](/unic-segment): Unicode Text Segmentation (UAX\#29).
//!
//! - [`idna`](/unic-idna): Unicode IDNA Compatibility Processing (UTS\#46).
//!
//!
//! ## A Basic Example
//!
//! ```rust
//! use unic::ucd::common::is_alphanumeric;
//! use unic::bidi::BidiInfo;
//! use unic::normal::StrNormalForm;
//! use unic::segment::{GraphemeIndices, Graphemes, WordBoundIndices, WordBounds, Words};
//! use unic::ucd::normal::compose;
//! use unic::ucd::{is_cased, Age, BidiClass, CharAge, CharBidiClass, StrBidiClass, UnicodeVersion};
//!
//! #[rustfmt::skip]
//! #[test]
//! fn test_sample() {
//!
//! // Age
//!
//! assert_eq!(Age::of('A').unwrap().actual(), UnicodeVersion { major: 1, minor: 1, micro: 0 });
//! assert_eq!(Age::of('\u{A0000}'), None);
//! assert_eq!(
//! Age::of('\u{10FFFF}').unwrap().actual(),
//! UnicodeVersion { major: 2, minor: 0, micro: 0 }
//! );
//!
//! if let Some(age) = '🦊'.age() {
//! assert_eq!(age.actual().major, 9);
//! assert_eq!(age.actual().minor, 0);
//! assert_eq!(age.actual().micro, 0);
//! }
//!
//! // Bidi
//!
//! let text = concat![
//! "א",
//! "ב",
//! "ג",
//! "a",
//! "b",
//! "c",
//! ];
//!
//! assert!(!text.has_bidi_explicit());
//! assert!(text.has_rtl());
//! assert!(text.has_ltr());
//!
//! assert_eq!(text.chars().nth(0).unwrap().bidi_class(), BidiClass::RightToLeft);
//! assert!(!text.chars().nth(0).unwrap().is_ltr());
//! assert!(text.chars().nth(0).unwrap().is_rtl());
//!
//! assert_eq!(text.chars().nth(3).unwrap().bidi_class(), BidiClass::LeftToRight);
//! assert!(text.chars().nth(3).unwrap().is_ltr());
//! assert!(!text.chars().nth(3).unwrap().is_rtl());
//!
//! let bidi_info = BidiInfo::new(text, None);
//! assert_eq!(bidi_info.paragraphs.len(), 1);
//!
//! let para = &bidi_info.paragraphs[0];
//! assert_eq!(para.level.number(), 1);
//! assert_eq!(para.level.is_rtl(), true);
//!
//! let line = para.range.clone();
//! let display = bidi_info.reorder_line(para, line);
//! assert_eq!(
//! display,
//! concat![
//! "a",
//! "b",
//! "c",
//! "ג",
//! "ב",
//! "א",
//! ]
//! );
//!
//! // Case
//!
//! assert_eq!(is_cased('A'), true);
//! assert_eq!(is_cased('א'), false);
//!
//! // Normalization
//!
//! assert_eq!(compose('A', '\u{030A}'), Some('Å'));
//!
//! let s = "ÅΩ";
//! let c = s.nfc().collect::<String>();
//! assert_eq!(c, "ÅΩ");
//!
//! // Segmentation
//!
//! assert_eq!(
//! Graphemes::new("a\u{310}e\u{301}o\u{308}\u{332}").collect::<Vec<&str>>(),
//! &["a\u{310}", "e\u{301}", "o\u{308}\u{332}"]
//! );
//!
//! assert_eq!(
//! Graphemes::new("a\r\nb🇺🇳🇮🇨").collect::<Vec<&str>>(),
//! &["a", "\r\n", "b", "🇺🇳", "🇮🇨"]
//! );
//!
//! assert_eq!(
//! GraphemeIndices::new("a̐éö̲\r\n").collect::<Vec<(usize, &str)>>(),
//! &[(0, "a̐"), (3, "é"), (6, "ö̲"), (11, "\r\n")]
//! );
//!
//! assert_eq!(
//! Words::new(
//! "The quick (\"brown\") fox can't jump 32.3 feet, right?",
//! |s: &&str| s.chars().any(is_alphanumeric),
//! ).collect::<Vec<&str>>(),
//! &["The", "quick", "brown", "fox", "can't", "jump", "32.3", "feet", "right"]
//! );
//!
//! assert_eq!(
//! WordBounds::new("The quick (\"brown\") fox").collect::<Vec<&str>>(),
//! &["The", " ", "quick", " ", "(", "\"", "brown", "\"", ")", " ", " ", "fox"]
//! );
//!
//! assert_eq!(
//! WordBoundIndices::new("Brr, it's 29.3°F!").collect::<Vec<(usize, &str)>>(),
//! &[
//! (0, "Brr"),
//! (3, ","),
//! (4, " "),
//! (5, "it's"),
//! (9, " "),
//! (10, "29.3"),
//! (14, "°"),
//! (16, "F"),
//! (17, "!")
//! ]
//! );
//! }
//! ```
pub use unic_bidi as bidi;
pub use unic_char as char;
pub use unic_emoji as emoji;
pub use unic_idna as idna;
pub use unic_normal as normal;
pub use unic_segment as segment;
pub use unic_ucd as ucd;
/// The [Unicode version](https://www.unicode.org/versions/) of data
pub use crate::ucd::UNICODE_VERSION;
mod pkg_info;
pub use crate::pkg_info::{PKG_DESCRIPTION, PKG_NAME, PKG_VERSION};
| 30.796791 | 100 | 0.511374 |
3318cfa7f7cd19e0505385d4a4bc81afb94545a4
| 2,945 |
use byteorder::LittleEndian;
use zerocopy::U32;
/// A struct representing duration that hides the details of endianness and conversion between
/// platform-native u32 and byte arrays.
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Duration {
months: Months,
days: Days,
millis: Millis,
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Months(U32<LittleEndian>);
impl Months {
pub fn new(months: u32) -> Self {
Self(U32::new(months))
}
}
impl From<Months> for u32 {
fn from(days: Months) -> Self {
days.0.get()
}
}
impl From<[u8; 4]> for Months {
fn from(bytes: [u8; 4]) -> Self {
Self(U32::from(bytes))
}
}
impl AsRef<[u8; 4]> for Months {
fn as_ref(&self) -> &[u8; 4] {
self.0.as_ref()
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Days(U32<LittleEndian>);
impl Days {
pub fn new(days: u32) -> Self {
Self(U32::new(days))
}
}
impl From<Days> for u32 {
fn from(days: Days) -> Self {
days.0.get()
}
}
impl From<[u8; 4]> for Days {
fn from(bytes: [u8; 4]) -> Self {
Self(U32::from(bytes))
}
}
impl AsRef<[u8; 4]> for Days {
fn as_ref(&self) -> &[u8; 4] {
self.0.as_ref()
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Millis(U32<LittleEndian>);
impl Millis {
pub fn new(millis: u32) -> Self {
Self(U32::new(millis))
}
}
impl From<Millis> for u32 {
fn from(days: Millis) -> Self {
days.0.get()
}
}
impl From<[u8; 4]> for Millis {
fn from(bytes: [u8; 4]) -> Self {
Self(U32::from(bytes))
}
}
impl AsRef<[u8; 4]> for Millis {
fn as_ref(&self) -> &[u8; 4] {
self.0.as_ref()
}
}
impl Duration {
/// Construct a new `Duration`.
pub fn new(months: Months, days: Days, millis: Millis) -> Self {
Self {
months,
days,
millis,
}
}
/// Return the number of months in this duration.
pub fn months(&self) -> Months {
self.months
}
/// Return the number of days in this duration.
pub fn days(&self) -> Days {
self.days
}
/// Return the number of milliseconds in this duration.
pub fn millis(&self) -> Millis {
self.millis
}
}
impl From<Duration> for [u8; 12] {
fn from(duration: Duration) -> Self {
let mut bytes = [0u8; 12];
bytes[0..4].copy_from_slice(duration.months.as_ref());
bytes[4..8].copy_from_slice(duration.days.as_ref());
bytes[8..12].copy_from_slice(duration.millis.as_ref());
bytes
}
}
impl From<[u8; 12]> for Duration {
fn from(bytes: [u8; 12]) -> Self {
Self {
months: Months::from([bytes[0], bytes[1], bytes[2], bytes[3]]),
days: Days::from([bytes[4], bytes[5], bytes[6], bytes[7]]),
millis: Millis::from([bytes[8], bytes[9], bytes[10], bytes[11]]),
}
}
}
| 20.886525 | 94 | 0.55382 |
14225c4e6fd88123975216381de64abbde3c8f35
| 7,224 |
// Copyright 2020 The Druid Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Types for working with images
#[cfg(feature = "image")]
use std::error::Error;
#[cfg(feature = "image")]
use std::path::Path;
use std::sync::Arc;
use crate::kurbo::Size;
use crate::util::unpremul;
use crate::{Color, ImageFormat, RenderContext};
/// A trait for a backend's bitmap image type.
pub trait Image {
/// The size of the image
fn size(&self) -> Size;
}
/// An in-memory pixel buffer.
///
/// Contains raw bytes, dimensions, and image format ([`piet::ImageFormat`]).
///
/// [`piet::ImageFormat`]: ../piet/enum.ImageFormat.html
#[derive(Clone)]
pub struct ImageBuf {
pixels: Arc<[u8]>,
width: usize,
height: usize,
format: ImageFormat,
}
impl ImageBuf {
/// Create an empty image buffer.
pub fn empty() -> Self {
ImageBuf {
pixels: Arc::new([]),
width: 0,
height: 0,
format: ImageFormat::RgbaSeparate,
}
}
/// Creates a new image buffer from an array of bytes.
///
/// `format` specifies the pixel format of the pixel data, which must have length
/// `width * height * format.bytes_per_pixel()`.
///
/// # Panics
///
/// Panics if the pixel data has the wrong length.
pub fn from_raw(
pixels: impl Into<Arc<[u8]>>,
format: ImageFormat,
width: usize,
height: usize,
) -> ImageBuf {
let pixels = pixels.into();
assert_eq!(pixels.len(), width * height * format.bytes_per_pixel());
ImageBuf {
pixels,
format,
width,
height,
}
}
/// Returns the raw pixel data of this image buffer.
pub fn raw_pixels(&self) -> &[u8] {
&self.pixels[..]
}
/// Returns a shared reference to the raw pixel data of this image buffer.
pub fn raw_pixels_shared(&self) -> Arc<[u8]> {
Arc::clone(&self.pixels)
}
/// Returns the format of the raw pixel data.
pub fn format(&self) -> ImageFormat {
self.format
}
/// The width, in pixels, of this image.
pub fn width(&self) -> usize {
self.width
}
/// The height, in pixels, of this image.
pub fn height(&self) -> usize {
self.height
}
/// The size of this image, in pixels.
pub fn size(&self) -> Size {
Size::new(self.width() as f64, self.height() as f64)
}
/// Returns an iterator over the pixels in this image.
///
/// The return value is an iterator over "rows", where each "row" is an iterator
/// over the color of the pixels in that row.
pub fn pixel_colors(&self) -> impl Iterator<Item = impl Iterator<Item = Color> + '_> {
let format = self.format;
let bytes_per_pixel = format.bytes_per_pixel();
self.pixels
.chunks_exact(self.width * bytes_per_pixel)
.map(move |row| {
row.chunks_exact(bytes_per_pixel)
.map(move |p| match format {
ImageFormat::Grayscale => Color::grey8(p[0]),
ImageFormat::Rgb => Color::rgb8(p[0], p[1], p[2]),
ImageFormat::RgbaSeparate => Color::rgba8(p[0], p[1], p[2], p[3]),
ImageFormat::RgbaPremul => {
let a = p[3];
Color::rgba8(unpremul(p[0], a), unpremul(p[1], a), unpremul(p[2], a), a)
}
})
})
}
/// Converts this buffer an image that is optimized for drawing into a [`RenderContext`].
///
/// [`RenderContext`]: ../piet/trait.RenderContext.html
pub fn to_image<Ctx: RenderContext>(&self, ctx: &mut Ctx) -> Ctx::Image {
ctx.make_image(self.width(), self.height(), &self.pixels, self.format)
.unwrap()
}
/// Returns `true` if the two `ImageBuf`s refer to the same memory location.
pub fn ptr_eq(&self, other: &ImageBuf) -> bool {
Arc::ptr_eq(&self.raw_pixels_shared(), &other.raw_pixels_shared())
}
}
impl Default for ImageBuf {
fn default() -> Self {
ImageBuf::empty()
}
}
#[cfg(feature = "image")]
#[cfg_attr(docsrs, doc(cfg(feature = "image")))]
impl ImageBuf {
/// Load an image from a DynamicImage from the image crate
pub fn from_dynamic_image(image_data: image::DynamicImage) -> ImageBuf {
fn has_alpha_channel(color: image::ColorType) -> bool {
use image::ColorType::*;
matches!(color, La8 | Rgba8 | La16 | Rgba16 | Bgra8)
}
if has_alpha_channel(image_data.color()) {
ImageBuf::from_dynamic_image_with_alpha(image_data)
} else {
ImageBuf::from_dynamic_image_without_alpha(image_data)
}
}
/// Load an image from a DynamicImage with alpha
pub fn from_dynamic_image_with_alpha(image_data: image::DynamicImage) -> ImageBuf {
let rgba_image = image_data.to_rgba8();
let sizeofimage = rgba_image.dimensions();
ImageBuf::from_raw(
rgba_image.to_vec(),
ImageFormat::RgbaSeparate,
sizeofimage.0 as usize,
sizeofimage.1 as usize,
)
}
/// Load an image from a DynamicImage without alpha
pub fn from_dynamic_image_without_alpha(image_data: image::DynamicImage) -> ImageBuf {
let rgb_image = image_data.to_rgb8();
let sizeofimage = rgb_image.dimensions();
ImageBuf::from_raw(
rgb_image.to_vec(),
ImageFormat::Rgb,
sizeofimage.0 as usize,
sizeofimage.1 as usize,
)
}
/// Attempt to load an image from raw bytes.
///
/// If the image crate can't decode an image from the data an error will be returned.
pub fn from_data(raw_image: &[u8]) -> Result<ImageBuf, Box<dyn Error + Send + Sync>> {
let image_data = image::load_from_memory(raw_image).map_err(|e| e)?;
Ok(ImageBuf::from_dynamic_image(image_data))
}
/// Attempt to load an image from the file at the provided path.
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<ImageBuf, Box<dyn Error + Send + Sync>> {
let image_data = image::open(path).map_err(|e| e)?;
Ok(ImageBuf::from_dynamic_image(image_data))
}
}
impl std::fmt::Debug for ImageBuf {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("ImageBuf")
.field("size", &self.pixels.len())
.field("width", &self.width)
.field("height", &self.height)
.field("format", &format_args!("{:?}", self.format))
.finish()
}
}
| 32.836364 | 100 | 0.587348 |
89c007858bff5c190a1cf48db2b86cc0f73a9019
| 9,393 |
// Copyright (C) 2019 Tom Steu <[email protected]>
// SPDX-License-Identifier: MIT
use gdk::enums::key;
use glib::object::{ObjectExt, WeakRef};
use glib::{clone, SignalHandlerId};
use gtk::prelude::*;
use gtk::{Inhibit, Notebook, Widget};
use log::debug;
use std::cell::RefCell;
use std::rc::Rc;
/// Records history of visited tabs
pub struct ViewHistory {
/// weak reference to notebook
parent: WeakRef<Notebook>,
/// list of visited page widgets
book: Vec<WeakRef<Widget>>,
/// current index of the history
index: usize,
/// callback handler for tab switching
switch_handler: Option<SignalHandlerId>,
/// callback handler to stop cycling
stop_handler: Option<SignalHandlerId>,
/// callback handler for adding child
add_handler: Option<SignalHandlerId>,
/// callback handler for removing child
remove_handler: Option<SignalHandlerId>,
}
/// Extension trait for `ViewHistory`
pub trait ViewHistoryExt {
/// Update history when not cycling through it
/// # Panics
/// Panics if `self` is already borrowed
fn update(&self, child: &Widget);
/// Set up event handlers when creating the history
/// # Panics
/// Panics if `self` is already borrowed
fn connect_events(&self);
/// Prepare for history being recorded
/// # Panics
/// Panics if `self` is already borrowed
fn listen(&self);
/// Consolidate history after cycling finished
/// # Panics
/// Panics if `self` is already borrowed
fn write_history(&self);
/// Cycle to previous tab in history
/// # Panics
/// Panics if `self` is already borrowed
fn cycle_backward(&self);
/// Cycle to next tab in history
/// # Panics
/// Panics if `self` is already borrowed
fn cycle_forward(&self);
/// Cycle towards the position of `index`
/// # Panics
/// Panics if `self` is already borrowed
fn cycle_to_index(&self, index: i32);
}
impl ViewHistory {
/// Create new history
pub fn new(notebook: &Notebook) -> Rc<RefCell<Self>> {
let book = notebook
.get_children()
.drain(..)
.map(|w| w.downgrade())
.collect();
let history = Rc::new(RefCell::new(Self {
book,
parent: notebook.downgrade(),
index: 0,
switch_handler: None,
stop_handler: None,
add_handler: None,
remove_handler: None,
}));
history.connect_events();
history
}
}
impl ViewHistoryExt for Rc<RefCell<ViewHistory>> {
fn connect_events(&self) {
{
let mut view_history = self.borrow_mut();
if let Some(notebook) = view_history.parent.upgrade() {
if view_history.add_handler.is_none() {
view_history.add_handler = Some(notebook.connect_page_added(
clone!(@weak self as vh => @default-panic, move |_, w, _| {
vh.borrow_mut().book.push((*w).downgrade());
}),
));
}
if view_history.remove_handler.is_none() {
notebook.connect_page_removed(
clone!(@weak self as vh => @default-panic, move |_, w, _| {
vh.borrow_mut().book.retain(|x| {
if let Some(listed_widget) = x.upgrade() {
listed_widget != *w
} else {
false
}
});
}),
);
}
}
}
self.listen();
}
fn listen(&self) {
let mut view_history = self.borrow_mut();
if let Some(notebook) = view_history.parent.upgrade() {
// unregister stop handler
if let Some(handler) = view_history.stop_handler.take() {
notebook.disconnect(handler);
view_history.stop_handler = None;
}
// register switch handler
view_history.switch_handler = Some(notebook.connect_switch_page(
clone!(@weak self as vh => @default-panic, move |_, w, _| {
vh.update(w);
}),
));
}
}
fn update(&self, child: &Widget) {
let mut view_history = self.borrow_mut();
// check if any tabs are listed
if let Some(first_ref) = view_history.book.first() {
if let Some(first_widget) = first_ref.upgrade() {
if first_widget != *child {
view_history.book = {
let mut new_history =
vec![child.clone().downgrade(), first_widget.downgrade()];
new_history.extend(view_history.book.drain(1..).filter(|weak_ref| {
if let Some(w) = weak_ref.upgrade() {
w != *child
} else {
false
}
}));
new_history
}
}
} else {
view_history.book = {
let mut new_history = vec![child.downgrade()];
new_history.extend(view_history.book.drain(1..).filter(|weak_ref| {
if let Some(w) = weak_ref.upgrade() {
w != *child
} else {
false
}
}));
new_history
}
}
} else {
view_history.book.push(child.downgrade());
}
}
fn write_history(&self) {
let (len, index) = {
let view_history = self.borrow();
(view_history.book.len(), view_history.index)
};
if len > 0 && index != 0 {
debug!("Write tab history");
{
let mut view_history = self.borrow_mut();
view_history.book.swap(0, index);
view_history.index = 0;
}
self.listen();
}
}
fn cycle_to_index(&self, index: i32) {
let mut view_history = self.borrow_mut();
let len = view_history.book.len() as i32;
if len > 0 {
if let Some(notebook) = view_history.parent.upgrade() {
// make sure index is in bounds
let mut checked_index = index % len;
if checked_index < 0 {
checked_index += len;
}
view_history.index = checked_index as usize;
// remove switch handler if necessary
if let Some(handler_id) = view_history.switch_handler.take() {
notebook.disconnect(handler_id);
view_history.switch_handler = None;
}
// register stop handler
if view_history.stop_handler == None {
view_history.stop_handler = Some(notebook.connect_key_release_event(
clone!(@weak self as vh => @default-panic, move |_, ek| {
match ek.get_keyval() {
key::Control_L | key::Control_R => {
vh.write_history();
}
_ => {}
}
Inhibit(false)
}),
));
}
// cycle to indexed tab
if let Some(notebook) = view_history.parent.upgrade() {
if let Some(Some(w)) = view_history
.book
.get(view_history.index)
.map(|wr| wr.upgrade())
{
if let Some(page_num) = notebook.page_num(&w) {
notebook.set_property_page(page_num as i32);
}
}
}
}
}
}
fn cycle_backward(&self) {
debug!("Cycle tab history backward");
let index = { self.borrow().index as i32 + 1 };
self.cycle_to_index(index);
}
fn cycle_forward(&self) {
debug!("Cycle tab history forward");
let index = { self.borrow().index as i32 - 1 };
self.cycle_to_index(index);
}
}
impl Drop for ViewHistory {
/// unregister all active handlers when dropping
fn drop(&mut self) {
if let Some(notebook) = self.parent.upgrade() {
if let Some(switch_handler) = self.switch_handler.take() {
notebook.disconnect(switch_handler);
}
if let Some(stop_handler) = self.stop_handler.take() {
notebook.disconnect(stop_handler);
}
if let Some(add_handler) = self.add_handler.take() {
notebook.disconnect(add_handler);
}
if let Some(remove_handler) = self.remove_handler.take() {
notebook.disconnect(remove_handler);
}
}
}
}
| 34.406593 | 91 | 0.482487 |
239b060497c02ad9d2dfabd8cb89610ed6786a5f
| 1,190 |
/*
* Ory APIs
*
* Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers.
*
* The version of the OpenAPI document: v0.0.1-alpha.15
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecoveryAddress {
/// CreatedAt is a helper struct field for gobuffalo.pop.
#[serde(rename = "created_at", skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "id")]
pub id: String,
/// UpdatedAt is a helper struct field for gobuffalo.pop.
#[serde(rename = "updated_at", skip_serializing_if = "Option::is_none")]
pub updated_at: Option<String>,
#[serde(rename = "value")]
pub value: String,
#[serde(rename = "via")]
pub via: String,
}
impl RecoveryAddress {
pub fn new(id: String, value: String, via: String) -> RecoveryAddress {
RecoveryAddress {
created_at: None,
id,
updated_at: None,
value,
via,
}
}
}
| 27.674419 | 179 | 0.642857 |
ac810606a93285eb0ea953c6b856799d8a93bac8
| 660 |
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
trait MyTrait {
fn f(&self) -> Self;
}
struct S {
x: isize
}
impl MyTrait for S {
fn f(&self) -> S {
S { x: 3 }
}
}
pub fn main() {}
| 23.571429 | 68 | 0.671212 |
fbc7925d9dd32dd923e3cdfeab21ba9210b20b68
| 1,206 |
fn permutations_with_dups(s: &str) -> Vec<String> {
if s.is_empty() {
vec![]
} else if s.len() == 1 {
vec![s.to_string()]
} else if s.len() == 2 {
vec![s.to_string(), s.chars().rev().collect()]
} else {
let subperms = permutations_with_dups(&s[1..]);
let item = s.chars().next().unwrap();
subperms
.iter()
.flat_map(|substr| {
let mut new_subperms: Vec<String> = vec![];
for (idx, _) in substr.chars().enumerate() {
let mut s = substr.clone();
s.insert(idx, item);
new_subperms.push(s);
}
let mut s = substr.clone();
s.push(item);
new_subperms.push(s);
new_subperms
})
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_permutations_with_dups() {
let s1 = "abb";
let permutations = vec!["abb", "bab", "bba", "abb", "bab", "bba"];
assert_eq!(permutations_with_dups(&s1), permutations);
}
}
fn main() {
let s1 = "abc";
permutations_with_dups(&s1);
}
| 26.8 | 74 | 0.461857 |
ab803c89f6d31b11f1a83bbe02a351ac80074e9c
| 140 |
use structopt::StructOpt;
use wasi_worker_cli::Cli;
fn main() {
let opt = Cli::from_args();
opt.exec().expect("command failed");
}
| 17.5 | 40 | 0.657143 |
23b709815f274da552829003ebc9fbad2cde3bb1
| 40,685 |
// This file is copy-paste from
// https://github.com/contain-rs/linked-hash-map/blob/master/src/lib.rs
// rev df65b33f8a9dbd06b95b0a6af7521f0d47233545.
// to avoid conflicting versions in dependent crates.
// Should be used as an external dependency when private dependencies implemented:
// https://github.com/rust-lang/rust/issues/44663
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A `HashMap` wrapper that holds key-value pairs in insertion order.
//!
//! # Examples
//!
//! ```
//! use linked_hash_map::LinkedHashMap;
//!
//! let mut map = LinkedHashMap::new();
//! map.insert(2, 20);
//! map.insert(1, 10);
//! map.insert(3, 30);
//! assert_eq!(map[&1], 10);
//! assert_eq!(map[&2], 20);
//! assert_eq!(map[&3], 30);
//!
//! let items: Vec<(i32, i32)> = map.iter().map(|t| (*t.0, *t.1)).collect();
//! assert_eq!(items, [(2, 20), (1, 10), (3, 30)]);
//! ```
#![forbid(missing_docs)]
#![cfg_attr(all(feature = "nightly", test), feature(test))]
#![cfg_attr(feature = "clippy", feature(plugin))]
#![cfg_attr(feature = "clippy", plugin(clippy))]
#![cfg_attr(feature = "clippy", deny(clippy))]
// Optional Serde support
// #[cfg(feature = "serde_impl")]
// pub mod serde;
// Optional Heapsize support
// #[cfg(feature = "heapsize_impl")]
// mod heapsize;
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::collections::hash_map::{self, HashMap};
use std::fmt;
use std::hash::{BuildHasher, Hash, Hasher};
use std::iter;
use std::marker;
use std::mem;
use std::ops::{Index, IndexMut};
use std::ptr;
struct KeyRef<K> {
k: *const K,
}
struct Node<K, V> {
next: *mut Node<K, V>,
prev: *mut Node<K, V>,
key: K,
value: V,
}
/// A linked hash map.
pub struct LinkedHashMap<K, V, S = hash_map::RandomState> {
map: HashMap<KeyRef<K>, *mut Node<K, V>, S>,
head: *mut Node<K, V>,
free: *mut Node<K, V>,
}
impl<K: Hash> Hash for KeyRef<K> {
fn hash<H: Hasher>(&self, state: &mut H) {
unsafe { (*self.k).hash(state) }
}
}
impl<K: PartialEq> PartialEq for KeyRef<K> {
fn eq(&self, other: &Self) -> bool {
unsafe { (*self.k).eq(&*other.k) }
}
}
impl<K: Eq> Eq for KeyRef<K> {}
// This type exists only to support borrowing `KeyRef`s, which cannot be borrowed to `Q` directly
// due to conflicting implementations of `Borrow`. The layout of `&Qey<Q>` must be identical to
// `&Q` in order to support transmuting in the `Qey::from_ref` method.
#[derive(Hash, PartialEq, Eq)]
struct Qey<Q: ?Sized>(Q);
impl<Q: ?Sized> Qey<Q> {
fn from_ref(q: &Q) -> &Self {
unsafe { mem::transmute(q) }
}
}
impl<K, Q: ?Sized> Borrow<Qey<Q>> for KeyRef<K>
where
K: Borrow<Q>,
{
fn borrow(&self) -> &Qey<Q> {
Qey::from_ref(unsafe { (*self.k).borrow() })
}
}
impl<K, V> Node<K, V> {
fn new(k: K, v: V) -> Self {
Node {
key: k,
value: v,
next: ptr::null_mut(),
prev: ptr::null_mut(),
}
}
}
unsafe fn drop_empty_node<K, V>(the_box: *mut Node<K, V>) {
// Prevent compiler from trying to drop the un-initialized key and values in the node.
let Node { key, value, .. } = *Box::from_raw(the_box);
mem::forget(key);
mem::forget(value);
}
impl<K: Hash + Eq, V> LinkedHashMap<K, V> {
/// Creates a linked hash map.
pub fn new() -> Self {
Self::with_map(HashMap::new())
}
/// Creates an empty linked hash map with the given initial capacity.
pub fn with_capacity(capacity: usize) -> Self {
Self::with_map(HashMap::with_capacity(capacity))
}
}
impl<K, V, S> LinkedHashMap<K, V, S> {
#[inline]
fn detach(&mut self, node: *mut Node<K, V>) {
unsafe {
(*(*node).prev).next = (*node).next;
(*(*node).next).prev = (*node).prev;
}
}
#[inline]
fn attach(&mut self, node: *mut Node<K, V>) {
unsafe {
(*node).next = (*self.head).next;
(*node).prev = self.head;
(*self.head).next = node;
(*(*node).next).prev = node;
}
}
// Caller must check `!self.head.is_null()`
unsafe fn drop_entries(&mut self) {
let mut cur = (*self.head).next;
while cur != self.head {
let next = (*cur).next;
Box::from_raw(cur);
cur = next;
}
}
fn clear_free_list(&mut self) {
unsafe {
let mut free = self.free;
while !free.is_null() {
let next_free = (*free).next;
drop_empty_node(free);
free = next_free;
}
self.free = ptr::null_mut();
}
}
fn ensure_guard_node(&mut self) {
if self.head.is_null() {
// allocate the guard node if not present
unsafe {
let node_layout = std::alloc::Layout::new::<Node<K, V>>();
self.head = std::alloc::alloc(node_layout) as *mut Node<K, V>;
(*self.head).next = self.head;
(*self.head).prev = self.head;
}
}
}
}
impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
fn with_map(map: HashMap<KeyRef<K>, *mut Node<K, V>, S>) -> Self {
LinkedHashMap {
map: map,
head: ptr::null_mut(),
free: ptr::null_mut(),
}
}
/// Creates an empty linked hash map with the given initial hash builder.
pub fn with_hasher(hash_builder: S) -> Self {
Self::with_map(HashMap::with_hasher(hash_builder))
}
/// Creates an empty linked hash map with the given initial capacity and hash builder.
pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self {
Self::with_map(HashMap::with_capacity_and_hasher(capacity, hash_builder))
}
/// Reserves capacity for at least `additional` more elements to be inserted into the map. The
/// map may reserve more space to avoid frequent allocations.
///
/// # Panics
///
/// Panics if the new allocation size overflows `usize.`
pub fn reserve(&mut self, additional: usize) {
self.map.reserve(additional);
}
/// Shrinks the capacity of the map as much as possible. It will drop down as much as possible
/// while maintaining the internal rules and possibly leaving some space in accordance with the
/// resize policy.
pub fn shrink_to_fit(&mut self) {
self.map.shrink_to_fit();
self.clear_free_list();
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
///
/// let mut letters = LinkedHashMap::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
pub fn entry(&mut self, k: K) -> Entry<K, V, S> {
let self_ptr: *mut Self = self;
if let Some(entry) = self.map.get_mut(&KeyRef { k: &k }) {
return Entry::Occupied(OccupiedEntry {
entry: *entry,
map: self_ptr,
marker: marker::PhantomData,
});
}
Entry::Vacant(VacantEntry { key: k, map: self })
}
/// Returns an iterator visiting all entries in insertion order.
/// Iterator element type is `OccupiedEntry<K, V, S>`. Allows for removal
/// as well as replacing the entry.
///
/// # Examples
/// ```
/// use linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert("a", 10);
/// map.insert("c", 30);
/// map.insert("b", 20);
///
/// {
/// let mut iter = map.entries();
/// let mut entry = iter.next().unwrap();
/// assert_eq!(&"a", entry.key());
/// *entry.get_mut() = 17;
/// }
///
/// assert_eq!(&17, map.get(&"a").unwrap());
/// ```
pub fn entries(&mut self) -> Entries<K, V, S> {
let head = if !self.head.is_null() {
unsafe { (*self.head).prev }
} else {
ptr::null_mut()
};
Entries {
map: self,
head: head,
remaining: self.len(),
marker: marker::PhantomData,
}
}
/// Inserts a key-value pair into the map. If the key already existed, the old value is
/// returned.
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
/// map.insert(2, "b");
/// assert_eq!(map[&1], "a");
/// assert_eq!(map[&2], "b");
/// ```
pub fn insert(&mut self, k: K, v: V) -> Option<V> {
self.ensure_guard_node();
let (node, old_val) = match self.map.get(&KeyRef { k: &k }) {
Some(node) => {
let old_val = unsafe { ptr::replace(&mut (**node).value, v) };
(*node, Some(old_val))
}
None => {
let node = if self.free.is_null() {
Box::into_raw(Box::new(Node::new(k, v)))
} else {
// use a recycled box
unsafe {
let free = self.free;
self.free = (*free).next;
ptr::write(free, Node::new(k, v));
free
}
};
(node, None)
}
};
match old_val {
Some(_) => {
// Existing node, just update LRU position
self.detach(node);
self.attach(node);
}
None => {
let keyref = unsafe { &(*node).key };
self.map.insert(KeyRef { k: keyref }, node);
self.attach(node);
}
}
old_val
}
/// Checks if the map contains the given key.
pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
where
K: Borrow<Q>,
Q: Eq + Hash,
{
self.map.contains_key(Qey::from_ref(k))
}
/// Returns the value corresponding to the key in the map.
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
/// map.insert(2, "b");
/// map.insert(2, "c");
/// map.insert(3, "d");
///
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), Some(&"c"));
/// ```
pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Eq + Hash,
{
self.map
.get(Qey::from_ref(k))
.map(|e| unsafe { &(**e).value })
}
/// Returns the mutable reference corresponding to the key in the map.
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
/// map.insert(2, "b");
///
/// *map.get_mut(&1).unwrap() = "c";
/// assert_eq!(map.get(&1), Some(&"c"));
/// ```
pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
where
K: Borrow<Q>,
Q: Eq + Hash,
{
self.map
.get(Qey::from_ref(k))
.map(|e| unsafe { &mut (**e).value })
}
/// Returns the value corresponding to the key in the map.
///
/// If value is found, it is moved to the end of the list.
/// This operation can be used in implemenation of LRU cache.
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
/// map.insert(2, "b");
/// map.insert(3, "d");
///
/// assert_eq!(map.get_refresh(&2), Some(&mut "b"));
///
/// assert_eq!((&2, &"b"), map.iter().rev().next().unwrap());
/// ```
pub fn get_refresh<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
where
K: Borrow<Q>,
Q: Eq + Hash,
{
let (value, node_ptr_opt) = match self.map.get(Qey::from_ref(k)) {
None => (None, None),
Some(node) => (Some(unsafe { &mut (**node).value }), Some(*node)),
};
if let Some(node_ptr) = node_ptr_opt {
self.detach(node_ptr);
self.attach(node_ptr);
}
value
}
/// Removes and returns the value corresponding to the key from the map.
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(2, "a");
///
/// assert_eq!(map.remove(&1), None);
/// assert_eq!(map.remove(&2), Some("a"));
/// assert_eq!(map.remove(&2), None);
/// assert_eq!(map.len(), 0);
/// ```
pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Eq + Hash,
{
let removed = self.map.remove(Qey::from_ref(k));
removed.map(|node| {
self.detach(node);
unsafe {
// add to free list
(*node).next = self.free;
self.free = node;
// drop the key and return the value
drop(ptr::read(&(*node).key));
ptr::read(&(*node).value)
}
})
}
/// Returns the maximum number of key-value pairs the map can hold without reallocating.
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
/// let mut map: LinkedHashMap<i32, &str> = LinkedHashMap::new();
/// let capacity = map.capacity();
/// ```
pub fn capacity(&self) -> usize {
self.map.capacity()
}
/// Removes the first entry.
///
/// Can be used in implementation of LRU cache.
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
/// map.insert(1, 10);
/// map.insert(2, 20);
/// map.pop_front();
/// assert_eq!(map.get(&1), None);
/// assert_eq!(map.get(&2), Some(&20));
/// ```
#[inline]
pub fn pop_front(&mut self) -> Option<(K, V)> {
if self.is_empty() {
return None;
}
let lru = unsafe { (*self.head).prev };
self.detach(lru);
self.map
.remove(&KeyRef {
k: unsafe { &(*lru).key },
})
.map(|e| {
let e = *unsafe { Box::from_raw(e) };
(e.key, e.value)
})
}
/// Gets the first entry.
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
/// map.insert(1, 10);
/// map.insert(2, 20);
/// assert_eq!(map.front(), Some((&1, &10)));
/// ```
#[inline]
pub fn front(&self) -> Option<(&K, &V)> {
if self.is_empty() {
return None;
}
let lru = unsafe { (*self.head).prev };
self.map
.get(&KeyRef {
k: unsafe { &(*lru).key },
})
.map(|e| unsafe { (&(**e).key, &(**e).value) })
}
/// Removes the last entry.
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
/// map.insert(1, 10);
/// map.insert(2, 20);
/// map.pop_back();
/// assert_eq!(map.get(&1), Some(&10));
/// assert_eq!(map.get(&2), None);
/// ```
#[inline]
pub fn pop_back(&mut self) -> Option<(K, V)> {
if self.is_empty() {
return None;
}
let mru = unsafe { (*self.head).next };
self.detach(mru);
self.map
.remove(&KeyRef {
k: unsafe { &(*mru).key },
})
.map(|e| {
let e = *unsafe { Box::from_raw(e) };
(e.key, e.value)
})
}
/// Gets the last entry.
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
/// map.insert(1, 10);
/// map.insert(2, 20);
/// assert_eq!(map.back(), Some((&2, &20)));
/// ```
#[inline]
pub fn back(&mut self) -> Option<(&K, &V)> {
if self.is_empty() {
return None;
}
let mru = unsafe { (*self.head).next };
self.map
.get(&KeyRef {
k: unsafe { &(*mru).key },
})
.map(|e| unsafe { (&(**e).key, &(**e).value) })
}
/// Returns the number of key-value pairs in the map.
pub fn len(&self) -> usize {
self.map.len()
}
/// Returns whether the map is currently empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns a reference to the map's hasher.
pub fn hasher(&self) -> &S {
self.map.hasher()
}
/// Clears the map of all key-value pairs.
pub fn clear(&mut self) {
self.map.clear();
// update the guard node if present
if !self.head.is_null() {
unsafe {
self.drop_entries();
(*self.head).prev = self.head;
(*self.head).next = self.head;
}
}
}
/// Returns a double-ended iterator visiting all key-value pairs in order of insertion.
/// Iterator element type is `(&'a K, &'a V)`
///
/// # Examples
/// ```
/// use linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert("a", 10);
/// map.insert("c", 30);
/// map.insert("b", 20);
///
/// let mut iter = map.iter();
/// assert_eq!((&"a", &10), iter.next().unwrap());
/// assert_eq!((&"c", &30), iter.next().unwrap());
/// assert_eq!((&"b", &20), iter.next().unwrap());
/// assert_eq!(None, iter.next());
/// ```
pub fn iter(&self) -> Iter<K, V> {
let head = if self.head.is_null() {
ptr::null_mut()
} else {
unsafe { (*self.head).prev }
};
Iter {
head: head,
tail: self.head,
remaining: self.len(),
marker: marker::PhantomData,
}
}
/// Returns a double-ended iterator visiting all key-value pairs in order of insertion.
/// Iterator element type is `(&'a K, &'a mut V)`
/// # Examples
/// ```
/// use linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert("a", 10);
/// map.insert("c", 30);
/// map.insert("b", 20);
///
/// {
/// let mut iter = map.iter_mut();
/// let mut entry = iter.next().unwrap();
/// assert_eq!(&"a", entry.0);
/// *entry.1 = 17;
/// }
///
/// assert_eq!(&17, map.get(&"a").unwrap());
/// ```
pub fn iter_mut(&mut self) -> IterMut<K, V> {
let head = if self.head.is_null() {
ptr::null_mut()
} else {
unsafe { (*self.head).prev }
};
IterMut {
head: head,
tail: self.head,
remaining: self.len(),
marker: marker::PhantomData,
}
}
/// Returns a double-ended iterator visiting all key in order of insertion.
///
/// # Examples
/// ```
/// use linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert('a', 10);
/// map.insert('c', 30);
/// map.insert('b', 20);
///
/// let mut keys = map.keys();
/// assert_eq!(&'a', keys.next().unwrap());
/// assert_eq!(&'c', keys.next().unwrap());
/// assert_eq!(&'b', keys.next().unwrap());
/// assert_eq!(None, keys.next());
/// ```
pub fn keys(&self) -> Keys<K, V> {
Keys { inner: self.iter() }
}
/// Returns a double-ended iterator visiting all values in order of insertion.
///
/// # Examples
/// ```
/// use linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert('a', 10);
/// map.insert('c', 30);
/// map.insert('b', 20);
///
/// let mut values = map.values();
/// assert_eq!(&10, values.next().unwrap());
/// assert_eq!(&30, values.next().unwrap());
/// assert_eq!(&20, values.next().unwrap());
/// assert_eq!(None, values.next());
/// ```
pub fn values(&self) -> Values<K, V> {
Values { inner: self.iter() }
}
}
impl<'a, K, V, S, Q: ?Sized> Index<&'a Q> for LinkedHashMap<K, V, S>
where
K: Hash + Eq + Borrow<Q>,
S: BuildHasher,
Q: Eq + Hash,
{
type Output = V;
fn index(&self, index: &'a Q) -> &V {
self.get(index).expect("no entry found for key")
}
}
impl<'a, K, V, S, Q: ?Sized> IndexMut<&'a Q> for LinkedHashMap<K, V, S>
where
K: Hash + Eq + Borrow<Q>,
S: BuildHasher,
Q: Eq + Hash,
{
fn index_mut(&mut self, index: &'a Q) -> &mut V {
self.get_mut(index).expect("no entry found for key")
}
}
impl<K: Hash + Eq + Clone, V: Clone, S: BuildHasher + Clone> Clone for LinkedHashMap<K, V, S> {
fn clone(&self) -> Self {
let mut map = Self::with_hasher(self.map.hasher().clone());
map.extend(self.iter().map(|(k, v)| (k.clone(), v.clone())));
map
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Default> Default for LinkedHashMap<K, V, S> {
fn default() -> Self {
Self::with_hasher(S::default())
}
}
impl<K: Hash + Eq, V, S: BuildHasher> Extend<(K, V)> for LinkedHashMap<K, V, S> {
fn extend<I: IntoIterator<Item = (K, V)>>(&mut self, iter: I) {
for (k, v) in iter {
self.insert(k, v);
}
}
}
impl<'a, K, V, S> Extend<(&'a K, &'a V)> for LinkedHashMap<K, V, S>
where
K: 'a + Hash + Eq + Copy,
V: 'a + Copy,
S: BuildHasher,
{
fn extend<I: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: I) {
for (&k, &v) in iter {
self.insert(k, v);
}
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Default> iter::FromIterator<(K, V)>
for LinkedHashMap<K, V, S>
{
fn from_iter<I: IntoIterator<Item = (K, V)>>(iter: I) -> Self {
let iter = iter.into_iter();
let mut map = Self::with_capacity_and_hasher(iter.size_hint().0, S::default());
map.extend(iter);
map
}
}
impl<A: fmt::Debug + Hash + Eq, B: fmt::Debug, S: BuildHasher> fmt::Debug
for LinkedHashMap<A, B, S>
{
/// Returns a string that lists the key-value pairs in insertion order.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self).finish()
}
}
impl<K: Hash + Eq, V: PartialEq, S: BuildHasher> PartialEq for LinkedHashMap<K, V, S> {
fn eq(&self, other: &Self) -> bool {
self.len() == other.len() && self.iter().eq(other)
}
}
impl<K: Hash + Eq, V: Eq, S: BuildHasher> Eq for LinkedHashMap<K, V, S> {}
impl<K: Hash + Eq + PartialOrd, V: PartialOrd, S: BuildHasher> PartialOrd
for LinkedHashMap<K, V, S>
{
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.iter().partial_cmp(other)
}
fn lt(&self, other: &Self) -> bool {
self.iter().lt(other)
}
fn le(&self, other: &Self) -> bool {
self.iter().le(other)
}
fn ge(&self, other: &Self) -> bool {
self.iter().ge(other)
}
fn gt(&self, other: &Self) -> bool {
self.iter().gt(other)
}
}
impl<K: Hash + Eq + Ord, V: Ord, S: BuildHasher> Ord for LinkedHashMap<K, V, S> {
fn cmp(&self, other: &Self) -> Ordering {
self.iter().cmp(other)
}
}
impl<K: Hash + Eq, V: Hash, S: BuildHasher> Hash for LinkedHashMap<K, V, S> {
fn hash<H: Hasher>(&self, h: &mut H) {
for e in self.iter() {
e.hash(h);
}
}
}
unsafe impl<K: Send, V: Send, S: Send> Send for LinkedHashMap<K, V, S> {}
unsafe impl<K: Sync, V: Sync, S: Sync> Sync for LinkedHashMap<K, V, S> {}
impl<K, V, S> Drop for LinkedHashMap<K, V, S> {
fn drop(&mut self) {
if !self.head.is_null() {
unsafe {
self.drop_entries();
drop_empty_node(self.head);
}
}
self.clear_free_list();
}
}
/// An insertion-order iterator over a `LinkedHashMap`'s entries, with immutable references to the
/// values.
pub struct Iter<'a, K: 'a, V: 'a> {
head: *const Node<K, V>,
tail: *const Node<K, V>,
remaining: usize,
marker: marker::PhantomData<(&'a K, &'a V)>,
}
/// An insertion-order iterator over a `LinkedHashMap`'s entries, with mutable references to the
/// values.
pub struct IterMut<'a, K: 'a, V: 'a> {
head: *mut Node<K, V>,
tail: *mut Node<K, V>,
remaining: usize,
marker: marker::PhantomData<(&'a K, &'a mut V)>,
}
/// A consuming insertion-order iterator over a `LinkedHashMap`'s entries.
pub struct IntoIter<K, V> {
head: *mut Node<K, V>,
tail: *mut Node<K, V>,
remaining: usize,
marker: marker::PhantomData<(K, V)>,
}
/// An insertion-order iterator over a `LinkedHashMap`'s entries represented as
/// an `OccupiedEntry`.
pub struct Entries<'a, K: 'a, V: 'a, S: 'a = hash_map::RandomState> {
map: *mut LinkedHashMap<K, V, S>,
head: *mut Node<K, V>,
remaining: usize,
marker: marker::PhantomData<(&'a K, &'a mut V, &'a S)>,
}
unsafe impl<'a, K, V> Send for Iter<'a, K, V>
where
K: Send,
V: Send,
{
}
unsafe impl<'a, K, V> Send for IterMut<'a, K, V>
where
K: Send,
V: Send,
{
}
unsafe impl<K, V> Send for IntoIter<K, V>
where
K: Send,
V: Send,
{
}
unsafe impl<'a, K, V, S> Send for Entries<'a, K, V, S>
where
K: Send,
V: Send,
S: Send,
{
}
unsafe impl<'a, K, V> Sync for Iter<'a, K, V>
where
K: Sync,
V: Sync,
{
}
unsafe impl<'a, K, V> Sync for IterMut<'a, K, V>
where
K: Sync,
V: Sync,
{
}
unsafe impl<K, V> Sync for IntoIter<K, V>
where
K: Sync,
V: Sync,
{
}
unsafe impl<'a, K, V, S> Sync for Entries<'a, K, V, S>
where
K: Sync,
V: Sync,
S: Sync,
{
}
impl<'a, K, V> Clone for Iter<'a, K, V> {
fn clone(&self) -> Self {
Iter { ..*self }
}
}
impl<K, V> Clone for IntoIter<K, V>
where
K: Clone,
V: Clone,
{
fn clone(&self) -> Self {
if self.remaining == 0 {
return IntoIter { ..*self };
}
fn clone_node<K, V>(e: *mut Node<K, V>) -> *mut Node<K, V>
where
K: Clone,
V: Clone,
{
Box::into_raw(Box::new(Node::new(unsafe { (*e).key.clone() }, unsafe {
(*e).value.clone()
})))
}
let mut cur = self.head;
let head = clone_node(cur);
let mut tail = head;
for _ in 1..self.remaining {
unsafe {
(*tail).prev = clone_node((*cur).prev);
(*(*tail).prev).next = tail;
tail = (*tail).prev;
cur = (*cur).prev;
}
}
IntoIter {
head: head,
tail: tail,
remaining: self.remaining,
marker: marker::PhantomData,
}
}
}
impl<'a, K, V> Iterator for Iter<'a, K, V> {
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<(&'a K, &'a V)> {
if self.head == self.tail {
None
} else {
self.remaining -= 1;
unsafe {
let r = Some((&(*self.head).key, &(*self.head).value));
self.head = (*self.head).prev;
r
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.remaining, Some(self.remaining))
}
}
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
if self.head == self.tail {
None
} else {
self.remaining -= 1;
unsafe {
let r = Some((&(*self.head).key, &mut (*self.head).value));
self.head = (*self.head).prev;
r
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.remaining, Some(self.remaining))
}
}
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
fn next(&mut self) -> Option<(K, V)> {
if self.remaining == 0 {
return None;
}
self.remaining -= 1;
unsafe {
let prev = (*self.head).prev;
let e = *Box::from_raw(self.head);
self.head = prev;
Some((e.key, e.value))
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.remaining, Some(self.remaining))
}
}
impl<'a, K, V, S: BuildHasher> Iterator for Entries<'a, K, V, S> {
type Item = OccupiedEntry<'a, K, V, S>;
fn next(&mut self) -> Option<OccupiedEntry<'a, K, V, S>> {
if self.remaining == 0 {
None
} else {
self.remaining -= 1;
unsafe {
let r = Some(OccupiedEntry {
map: self.map,
entry: self.head,
marker: marker::PhantomData,
});
self.head = (*self.head).prev;
r
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.remaining, Some(self.remaining))
}
}
impl<'a, K, V> DoubleEndedIterator for Iter<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
if self.head == self.tail {
None
} else {
self.remaining -= 1;
unsafe {
self.tail = (*self.tail).next;
Some((&(*self.tail).key, &(*self.tail).value))
}
}
}
}
impl<'a, K, V> DoubleEndedIterator for IterMut<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> {
if self.head == self.tail {
None
} else {
self.remaining -= 1;
unsafe {
self.tail = (*self.tail).next;
Some((&(*self.tail).key, &mut (*self.tail).value))
}
}
}
}
impl<K, V> DoubleEndedIterator for IntoIter<K, V> {
fn next_back(&mut self) -> Option<(K, V)> {
if self.remaining == 0 {
return None;
}
self.remaining -= 1;
unsafe {
let next = (*self.tail).next;
let e = *Box::from_raw(self.tail);
self.tail = next;
Some((e.key, e.value))
}
}
}
impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {
fn len(&self) -> usize {
self.remaining
}
}
impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
fn len(&self) -> usize {
self.remaining
}
}
impl<K, V> ExactSizeIterator for IntoIter<K, V> {
fn len(&self) -> usize {
self.remaining
}
}
impl<K, V> Drop for IntoIter<K, V> {
fn drop(&mut self) {
for _ in 0..self.remaining {
unsafe {
let next = (*self.tail).next;
Box::from_raw(self.tail);
self.tail = next;
}
}
}
}
/// An insertion-order iterator over a `LinkedHashMap`'s keys.
pub struct Keys<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Clone for Keys<'a, K, V> {
fn clone(&self) -> Self {
Keys {
inner: self.inner.clone(),
}
}
}
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
#[inline]
fn next(&mut self) -> Option<&'a K> {
self.inner.next().map(|e| e.0)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> {
#[inline]
fn next_back(&mut self) -> Option<&'a K> {
self.inner.next_back().map(|e| e.0)
}
}
impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
/// An insertion-order iterator over a `LinkedHashMap`'s values.
pub struct Values<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Clone for Values<'a, K, V> {
fn clone(&self) -> Self {
Values {
inner: self.inner.clone(),
}
}
}
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
#[inline]
fn next(&mut self) -> Option<&'a V> {
self.inner.next().map(|e| e.1)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> {
#[inline]
fn next_back(&mut self) -> Option<&'a V> {
self.inner.next_back().map(|e| e.1)
}
}
impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
impl<'a, K: Hash + Eq, V, S: BuildHasher> IntoIterator for &'a LinkedHashMap<K, V, S> {
type Item = (&'a K, &'a V);
type IntoIter = Iter<'a, K, V>;
fn into_iter(self) -> Iter<'a, K, V> {
self.iter()
}
}
impl<'a, K: Hash + Eq, V, S: BuildHasher> IntoIterator for &'a mut LinkedHashMap<K, V, S> {
type Item = (&'a K, &'a mut V);
type IntoIter = IterMut<'a, K, V>;
fn into_iter(self) -> IterMut<'a, K, V> {
self.iter_mut()
}
}
impl<K: Hash + Eq, V, S: BuildHasher> IntoIterator for LinkedHashMap<K, V, S> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
fn into_iter(mut self) -> IntoIter<K, V> {
let (head, tail) = if !self.head.is_null() {
unsafe { ((*self.head).prev, (*self.head).next) }
} else {
(ptr::null_mut(), ptr::null_mut())
};
let len = self.len();
if !self.head.is_null() {
unsafe { drop_empty_node(self.head) }
}
self.clear_free_list();
// drop the HashMap but not the LinkedHashMap
unsafe {
ptr::drop_in_place(&mut self.map);
}
mem::forget(self);
IntoIter {
head: head,
tail: tail,
remaining: len,
marker: marker::PhantomData,
}
}
}
/// A view into a single location in a map, which may be vacant or occupied.
pub enum Entry<'a, K: 'a, V: 'a, S: 'a = hash_map::RandomState> {
/// An occupied Entry.
Occupied(OccupiedEntry<'a, K, V, S>),
/// A vacant Entry.
Vacant(VacantEntry<'a, K, V, S>),
}
/// A view into a single occupied location in a `LinkedHashMap`.
pub struct OccupiedEntry<'a, K: 'a, V: 'a, S: 'a = hash_map::RandomState> {
entry: *mut Node<K, V>,
map: *mut LinkedHashMap<K, V, S>,
marker: marker::PhantomData<&'a K>,
}
/// A view into a single empty location in a `LinkedHashMap`.
pub struct VacantEntry<'a, K: 'a, V: 'a, S: 'a = hash_map::RandomState> {
key: K,
map: &'a mut LinkedHashMap<K, V, S>,
}
impl<'a, K: Hash + Eq, V, S: BuildHasher> Entry<'a, K, V, S> {
/// Returns the entry key
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::<String, u32>::new();
///
/// assert_eq!("hello", map.entry("hello".to_string()).key());
/// ```
pub fn key(&self) -> &K {
match *self {
Entry::Occupied(ref e) => e.key(),
Entry::Vacant(ref e) => e.key(),
}
}
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
pub fn or_insert(self, default: V) -> &'a mut V {
match self {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => entry.insert(default),
}
}
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns a mutable reference to the value in the entry.
pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
match self {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => entry.insert(default()),
}
}
}
impl<'a, K: Hash + Eq, V, S: BuildHasher> OccupiedEntry<'a, K, V, S> {
/// Gets a reference to the entry key
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
///
/// map.insert("foo".to_string(), 1);
/// assert_eq!("foo", map.entry("foo".to_string()).key());
/// ```
pub fn key(&self) -> &K {
unsafe { &(*self.entry).key }
}
/// Gets a reference to the value in the entry.
pub fn get(&self) -> &V {
unsafe { &(*self.entry).value }
}
/// Gets a mutable reference to the value in the entry.
pub fn get_mut(&mut self) -> &mut V {
unsafe { &mut (*self.entry).value }
}
/// Converts the OccupiedEntry into a mutable reference to the value in the entry
/// with a lifetime bound to the map itself
pub fn into_mut(self) -> &'a mut V {
unsafe { &mut (*self.entry).value }
}
/// Sets the value of the entry, and returns the entry's old value
pub fn insert(&mut self, value: V) -> V {
unsafe {
(*self.map).ensure_guard_node();
let old_val = mem::replace(&mut (*self.entry).value, value);
let node_ptr: *mut Node<K, V> = self.entry;
// Existing node, just update LRU position
(*self.map).detach(node_ptr);
(*self.map).attach(node_ptr);
old_val
}
}
/// Takes the value out of the entry, and returns it
pub fn remove(self) -> V {
unsafe { (*self.map).remove(&(*self.entry).key) }.unwrap()
}
}
impl<'a, K: 'a + Hash + Eq, V: 'a, S: BuildHasher> VacantEntry<'a, K, V, S> {
/// Gets a reference to the entry key
///
/// # Examples
///
/// ```
/// use linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::<String, u32>::new();
///
/// assert_eq!("foo", map.entry("foo".to_string()).key());
/// ```
pub fn key(&self) -> &K {
&self.key
}
/// Sets the value of the entry with the VacantEntry's key,
/// and returns a mutable reference to it
pub fn insert(self, value: V) -> &'a mut V {
self.map.ensure_guard_node();
let node = if self.map.free.is_null() {
Box::into_raw(Box::new(Node::new(self.key, value)))
} else {
// use a recycled box
unsafe {
let free = self.map.free;
self.map.free = (*free).next;
ptr::write(free, Node::new(self.key, value));
free
}
};
let keyref = unsafe { &(*node).key };
self.map.attach(node);
let ret = self.map.map.entry(KeyRef { k: keyref }).or_insert(node);
unsafe { &mut (**ret).value }
}
}
#[cfg(all(feature = "nightly", test))]
mod bench {
extern crate test;
use super::LinkedHashMap;
#[bench]
fn not_recycled_cycling(b: &mut test::Bencher) {
let mut hash_map = LinkedHashMap::with_capacity(1000);
for i in 0usize..1000 {
hash_map.insert(i, i);
}
b.iter(|| {
for i in 0usize..1000 {
hash_map.remove(&i);
}
hash_map.clear_free_list();
for i in 0usize..1000 {
hash_map.insert(i, i);
}
})
}
#[bench]
fn recycled_cycling(b: &mut test::Bencher) {
let mut hash_map = LinkedHashMap::with_capacity(1000);
for i in 0usize..1000 {
hash_map.insert(i, i);
}
b.iter(|| {
for i in 0usize..1000 {
hash_map.remove(&i);
}
for i in 0usize..1000 {
hash_map.insert(i, i);
}
})
}
}
| 27.015272 | 99 | 0.502102 |
5d67ba1844d0e49d62f949d3437720e97a57ffa9
| 14,018 |
use core::fmt;
use std::collections::BTreeMap;
use std::str::FromStr;
use anyhow::{Error, Result};
use chrono::Utc;
use hashbrown::HashMap;
use serde::de::Visitor;
use serde::{Deserialize, Deserializer, Serialize};
use tantivy::schema::{
Cardinality,
Document as InternalDocument,
Field,
FieldType,
IntOptions,
Schema as InternalSchema,
SchemaBuilder as InternalSchemaBuilder,
STORED,
STRING,
TEXT,
};
use tantivy::{DateTime, Score};
use crate::helpers::hash;
/// A declared schema field type.
///
/// Each field has a set of relevant options as specified
/// by the tantivy docs.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
#[serde(tag = "type")]
pub enum FieldDeclaration {
/// A f64 field with given options
F64(IntOptions),
/// A u64 field with given options.
U64(IntOptions),
/// A I64 field with given options.
I64(IntOptions),
/// A Datetime<Utc> field with given options.
///
/// This is treated as a u64 integer timestamp.
Date(IntOptions),
/// A string field with given options.
///
/// This will be tokenized.
Text { stored: bool },
/// A string field with given options.
///
/// This wont be tokenized.
String { stored: bool },
}
/// The storage backend to store index documents in.
#[derive(Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum IndexStorageType {
/// Creates a temporary file.
TempDir,
/// Creates the index in memory (generally only for debugging)
Memory,
/// Store the index in a persistence setup in a given directory.
FileSystem,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct IndexDeclaration {
pub(crate) name: String,
writer_buffer: usize,
writer_threads: Option<usize>,
max_concurrency: u32,
reader_threads: Option<u32>,
search_fields: Vec<String>,
#[serde(default)]
boost_fields: HashMap<String, tantivy::Score>,
storage_type: IndexStorageType,
fields: HashMap<String, FieldDeclaration>,
#[serde(default)]
set_conjunction_by_default: bool,
#[serde(default)]
use_fast_fuzzy: bool,
#[serde(default)]
strip_stop_words: bool,
}
impl IndexDeclaration {
pub(crate) fn into_schema(self) -> LoadedIndex {
let mut indexed_text_fields = vec![];
let mut fuzzy_search_fields = vec![];
let mut schema = InternalSchemaBuilder::new();
let opts = IntOptions::default()
.set_fast(Cardinality::SingleValue)
.set_stored()
.set_indexed();
schema.add_u64_field("_id", opts);
for (name, field) in self.fields {
if name == "_id" {
continue;
}
match field {
FieldDeclaration::F64(opts) => {
schema.add_f64_field(&name, opts);
},
FieldDeclaration::U64(opts) => {
schema.add_u64_field(&name, opts);
},
FieldDeclaration::I64(opts) => {
schema.add_f64_field(&name, opts);
},
FieldDeclaration::Date(opts) => {
schema.add_date_field(&name, opts);
},
FieldDeclaration::String { stored } => {
let mut opts = STRING;
if stored {
opts = opts | STORED;
}
schema.add_text_field(&name, opts);
},
FieldDeclaration::Text { stored } => {
let field = if !(self.use_fast_fuzzy && crate::correction::enabled()) {
let mut opts = TEXT;
if stored {
opts = opts | STORED;
}
schema.add_text_field(&name, opts)
} else {
if stored {
schema.add_text_field(&name, STORED);
}
indexed_text_fields.push(name.clone());
let id = hash(&name);
schema.add_text_field(&format!("_{}", id), TEXT)
};
let boost = match self.boost_fields.get(&name) {
Some(b) => *b,
None => 0f32,
};
fuzzy_search_fields.push((field, boost));
},
};
}
LoadedIndex {
name: self.name.into(),
writer_buffer: self.writer_buffer,
writer_threads: self.writer_threads.unwrap_or_else(|| num_cpus::get()),
max_concurrency: self.max_concurrency,
reader_threads: self.reader_threads.unwrap_or(1),
search_fields: self.search_fields,
storage_type: self.storage_type,
schema: schema.build(),
boost_fields: self.boost_fields,
set_conjunction_by_default: self.set_conjunction_by_default,
indexed_text_fields,
fuzzy_search_fields,
use_fast_fuzzy: self.use_fast_fuzzy,
strip_stop_words: self.strip_stop_words,
}
}
}
/// The loaded and processed index declaration.
///
/// This is used for controlling the behaviour of the
/// generated indexes, thread pools and writers.
pub struct LoadedIndex {
/// The name of the index.
pub(crate) name: String,
/// The amount of bytes to allocate to the writer buffer.
pub(crate) writer_buffer: usize,
/// The amount of worker threads to dedicate to a writer.
pub(crate) writer_threads: usize,
/// The maximum searches that can be done at any one time.
pub(crate) max_concurrency: u32,
/// The number of reader threads to use.
///
/// The current implementation is rather naive : multithreading is by splitting search
/// into as many task as there are segments.
/// It is powerless at making search faster if your index consists in one large segment.
/// Also, keep in my multithreading a single query on several threads will not improve
/// your throughput. It can actually hurt it.
/// It will however, decrease the average response time.
pub(crate) reader_threads: u32,
/// The fields what are actually searched via tantivy.
///
/// These values need to either be a fast field (ints) or TEXT.
pub(crate) search_fields: Vec<String>,
/// The storage type for the index backend.
pub(crate) storage_type: IndexStorageType,
/// The defined tantivy schema.
pub(crate) schema: InternalSchema,
/// A set of fields to boost by a given factor.
pub(crate) boost_fields: HashMap<String, Score>,
/// If set to true, this switches Tantivy's default query parser
/// behaviour to use AND instead of OR.
pub(crate) set_conjunction_by_default: bool,
/// The set of fields which are indexed.
pub(crate) indexed_text_fields: Vec<String>,
/// The set of fields which are indexed.
pub(crate) fuzzy_search_fields: Vec<(Field, Score)>,
/// Whether or not to use the fast fuzzy system or not.
///
/// The fast fuzzy system must be enabled on the server overall
/// for this feature.
pub(crate) use_fast_fuzzy: bool,
/// Whether or not to strip out stop words in fuzzy queries.
///
/// This only applies to the fast-fuzzy query system.
pub(crate) strip_stop_words: bool,
}
/// The mode of the query.
///
/// This can change how the system parses and handles the query.
#[derive(Debug, Copy, Clone, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum QueryMode {
/// This uses the tantivy query parser.
Normal,
/// Processes the query via the FuzzyQuery system. (Default)
Fuzzy,
/// Gets documents similar to the reference document.
MoreLikeThis,
}
impl Default for QueryMode {
fn default() -> Self {
Self::Fuzzy
}
}
#[derive(Debug, Deserialize)]
pub struct QueryPayload {
/// A query string for `QueryMode::Fuzzy` and `QueryMode::Normal` queries.
pub(crate) query: Option<String>,
/// A reference document for `QueryMode::MoreLikeThis`.
pub(crate) document: Option<u64>,
/// A map of fields to query strings.
#[serde(default)]
pub map: HashMap<String, String>,
/// The query mode which determines which query system will be
/// used.
#[serde(default)]
pub(crate) mode: QueryMode,
/// The amount of results to limit by, the default is 20.
#[serde(default = "default_query_data::default_limit")]
pub(crate) limit: usize,
/// The amount of results to limit by, the default is 20.
#[serde(default = "default_query_data::default_offset")]
pub(crate) offset: usize,
/// The field to order content by, this has to be a fast field if
/// not `None`.
pub(crate) order_by: Option<String>,
}
mod default_query_data {
pub fn default_limit() -> usize {
20
}
pub fn default_offset() -> usize {
0
}
}
/// A tantivy document representation.
///
/// This is checked against the schema and validated before being
/// converted into a direct tantivy type.
#[derive(Debug, Deserialize)]
pub struct Document(pub BTreeMap<String, DocumentItem>);
#[derive(Debug, Deserialize)]
#[serde(untagged)]
pub enum DocumentItem {
Single(DocumentValue),
Multi(Vec<DocumentValue>)
}
/// A document value that can be processed by tantivy.
#[derive(Debug)]
pub enum DocumentValue {
/// A signed 64 bit integer.
I64(i64),
/// A 64 bit floating point number.
F64(f64),
/// A unsigned 64 bit integer.
U64(u64),
/// A datetime field, deserialized as a u64 int.
Datetime(DateTime),
/// A text field.
Text(String),
}
impl<'de> Deserialize<'de> for DocumentValue {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor;
impl<'de> Visitor<'de> for ValueVisitor {
type Value = DocumentValue;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a string or u32")
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E> {
Ok(DocumentValue::I64(v))
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> {
Ok(DocumentValue::U64(v))
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E> {
Ok(DocumentValue::F64(v))
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> {
if let Ok(dt) = tantivy::DateTime::from_str(&v) {
return Ok(DocumentValue::Datetime(dt));
}
Ok(DocumentValue::Text(v.to_owned()))
}
fn visit_string<E>(self, v: String) -> Result<Self::Value, E> {
if let Ok(dt) = tantivy::DateTime::from_str(&v) {
return Ok(DocumentValue::Datetime(dt));
}
Ok(DocumentValue::Text(v))
}
}
deserializer.deserialize_any(ValueVisitor)
}
}
impl Document {
pub(crate) fn parse_into_document(self, schema: &InternalSchema) -> Result<InternalDocument> {
let mut doc = InternalDocument::new();
for (key, values) in self.0 {
let field = schema
.get_field(&key)
.ok_or_else(|| Error::msg(format!("field {:?} does not exist in schema", &key)))?;
let entry = schema.get_field_entry(field);
let field_type = entry.field_type();
match values {
DocumentItem::Single(value) =>
add_value(&key, field, field_type, value, &mut doc)?,
DocumentItem::Multi(values) => {
for value in values {
add_value(&key, field, field_type, value, &mut doc)?;
}
},
};
}
Ok(doc)
}
}
fn add_value(key: &String, field: Field, field_type: &FieldType, value: DocumentValue, doc: &mut InternalDocument) -> Result<()> {
match (value, field_type) {
(DocumentValue::I64(v), FieldType::I64(_)) => doc.add_i64(field, v),
(DocumentValue::U64(v), FieldType::U64(_)) => doc.add_u64(field, v),
(DocumentValue::F64(v), FieldType::F64(_)) => doc.add_f64(field, v),
(DocumentValue::Text(v), FieldType::Str(_)) => doc.add_text(field, v),
(DocumentValue::Datetime(v), FieldType::Str(_)) => doc.add_text(field, v.to_string()),
(DocumentValue::Datetime(v), FieldType::Date(_)) => doc.add_date(field, &v),
(DocumentValue::I64(v), FieldType::Date(_)) => {
match chrono::NaiveDateTime::from_timestamp_opt(v, 0) {
Some(dt) => {
let dt = chrono::DateTime::from_utc(dt, Utc);
doc.add_date(field, &dt)
},
None =>
return Err(Error::msg(format!("filed {:?} is type {:?} in schema but did not get a valid value (invalid timestamp)", &key, field_type))),
}
},
(DocumentValue::U64(v), FieldType::Date(_)) => {
match chrono::NaiveDateTime::from_timestamp_opt(v as i64, 0) {
Some(dt) => {
let dt = chrono::DateTime::from_utc(dt, Utc);
doc.add_date(field, &dt)
},
None =>
return Err(Error::msg(format!("filed {:?} is type {:?} in schema but did not get a valid value (invalid timestamp)", &key, field_type))),
}
},
_ => return Err(Error::msg(format!("filed {:?} is type {:?} in schema but did not get a valid value", &key, field_type)))
}
Ok(())
}
| 31.643341 | 157 | 0.574761 |
216af0dff20115c1f3bf140a29fffb0452b94f8d
| 18,164 |
#[allow(dead_code)]
pub static MIME_TYPE: &[&str] = &[
"x-world/x-3dmf",
"application/octet-stream",
"application/x-authorware-bin",
"application/x-authorware-map",
"application/x-authorware-seg",
"text/vnd.abc",
"text/html",
"video/animaflex",
"application/postscript",
"audio/aiff",
"audio/x-aiff",
"audio/aiff",
"audio/x-aiff",
"audio/aiff",
"audio/x-aiff",
"application/x-aim",
"text/x-audiosoft-intra",
"application/x-navi-animation",
"application/x-nokia-9000-communicator-add-on-software",
"application/mime",
"application/octet-stream",
"application/arj",
"application/octet-stream",
"image/x-jg",
"video/x-ms-asf",
"text/x-asm",
"text/asp",
"application/x-mplayer2",
"video/x-ms-asf",
"video/x-ms-asf-plugin",
"audio/basic",
"audio/x-au",
"application/x-troff-msvideo",
"video/avi",
"video/msvideo",
"video/x-msvideo",
"video/avs-video",
"application/x-bcpio",
"application/mac-binary",
"application/macbinary",
"application/octet-stream",
"application/x-binary",
"application/x-macbinary",
"image/bmp",
"image/bmp",
"image/x-windows-bmp",
"application/book",
"application/book",
"application/x-bzip2",
"application/x-bsh",
"application/x-bzip",
"application/x-bzip2",
"text/plain",
"text/x-c",
"text/plain",
"application/vnd.ms-pki.seccat",
"text/plain",
"text/x-c",
"application/clariscad",
"application/x-cocoa",
"application/cdf",
"application/x-cdf",
"application/x-netcdf",
"application/pkix-cert",
"application/x-x509-ca-cert",
"application/x-chat",
"application/x-chat",
"application/java",
"application/java-byte-code",
"application/x-java-class",
"application/octet-stream",
"text/plain",
"text/plain",
"application/x-cpio",
"text/x-c",
"application/mac-compactpro",
"application/x-compactpro",
"application/x-cpt",
"application/pkcs-crl",
"application/pkix-crl",
"application/pkix-cert",
"application/x-x509-ca-cert",
"application/x-x509-user-cert",
"application/x-csh",
"text/x-script.csh",
"application/x-pointplus",
"text/css",
"text/plain",
"application/x-director",
"application/x-deepv",
"text/plain",
"application/x-x509-ca-cert",
"video/x-dv",
"application/x-director",
"video/dl",
"video/x-dl",
"application/msword",
"application/msword",
"application/commonground",
"application/drafting",
"application/octet-stream",
"video/x-dv",
"application/x-dvi",
"drawing/x-dwf (old)",
"model/vnd.dwf",
"application/acad",
"image/vnd.dwg",
"image/x-dwg",
"application/dxf",
"image/vnd.dwg",
"image/x-dwg",
"application/x-director",
"text/x-script.elisp",
"application/x-bytecode.elisp (compiled elisp)",
"application/x-elc",
"application/x-envoy",
"application/postscript",
"application/x-esrehber",
"text/x-setext",
"application/envoy",
"application/x-envoy",
"application/octet-stream",
"text/plain",
"text/x-fortran",
"text/x-fortran",
"text/plain",
"text/x-fortran",
"application/vnd.fdf",
"application/fractals",
"image/fif",
"video/fli",
"video/x-fli",
"image/florian",
"text/vnd.fmi.flexstor",
"video/x-atomic3d-feature",
"text/plain",
"text/x-fortran",
"image/vnd.fpx",
"image/vnd.net-fpx",
"application/freeloader",
"audio/make",
"text/plain",
"image/g3fax",
"image/gif",
"video/gl",
"video/x-gl",
"audio/x-gsm",
"audio/x-gsm",
"application/x-gsp",
"application/x-gss",
"application/x-gtar",
"application/x-compressed",
"application/x-gzip",
"application/x-gzip",
"multipart/x-gzip",
"text/plain",
"text/x-h",
"application/x-hdf",
"application/x-helpfile",
"application/vnd.hp-hpgl",
"text/plain",
"text/x-h",
"text/x-script",
"application/hlp",
"application/x-helpfile",
"application/x-winhelp",
"application/vnd.hp-hpgl",
"application/vnd.hp-hpgl",
"application/binhex",
"application/binhex4",
"application/mac-binhex",
"application/mac-binhex40",
"application/x-binhex40",
"application/x-mac-binhex40",
"application/hta",
"text/x-component",
"text/html",
"text/html",
"text/html",
"text/webviewhtml",
"text/html",
"x-conference/x-cooltalk",
"image/x-icon",
"text/plain",
"image/ief",
"image/ief",
"application/iges",
"model/iges",
"application/iges",
"model/iges",
"application/x-ima",
"application/x-httpd-imap",
"application/inf",
"application/x-internett-signup",
"application/x-ip2",
"video/x-isvideo",
"audio/it",
"application/x-inventor",
"i-world/i-vrml",
"application/x-livescreen",
"audio/x-jam",
"text/plain",
"text/x-java-source",
"text/plain",
"text/x-java-source",
"application/x-java-commerce",
"image/jpeg",
"image/pjpeg",
"image/jpeg",
"image/jpeg",
"image/pjpeg",
"image/jpeg",
"image/pjpeg",
"image/jpeg",
"image/pjpeg",
"image/x-jps",
"application/x-javascript",
"image/jutvision",
"audio/midi",
"music/x-karaoke",
"application/x-ksh",
"text/x-script.ksh",
"audio/nspaudio",
"audio/x-nspaudio",
"audio/x-liveaudio",
"application/x-latex",
"application/lha",
"application/octet-stream",
"application/x-lha",
"application/octet-stream",
"text/plain",
"audio/nspaudio",
"audio/x-nspaudio",
"text/plain",
"application/x-lisp",
"text/x-script.lisp",
"text/plain",
"text/x-la-asf",
"application/x-latex",
"application/octet-stream",
"application/x-lzh",
"application/lzx",
"application/octet-stream",
"application/x-lzx",
"text/plain",
"text/x-m",
"video/mpeg",
"audio/mpeg",
"video/mpeg",
"audio/x-mpequrl",
"application/x-troff-man",
"application/x-navimap",
"text/plain",
"application/mbedlet",
"application/mcad",
"application/x-mathcad",
"image/vasa",
"text/mcf",
"application/netmc",
"application/x-troff-me",
"message/rfc822",
"message/rfc822",
"application/x-midi",
"audio/midi",
"audio/x-mid",
"audio/x-midi",
"music/crescendo",
"x-music/x-midi",
"application/x-midi",
"audio/midi",
"audio/x-mid",
"audio/x-midi",
"music/crescendo",
"x-music/x-midi",
"application/x-frame",
"application/x-mif",
"message/rfc822",
"www/mime",
"video/x-motion-jpeg",
"application/base64",
"application/x-meme",
"application/base64",
"audio/mod",
"audio/x-mod",
"video/quicktime",
"video/quicktime",
"video/x-sgi-movie",
"audio/mpeg",
"audio/x-mpeg",
"video/mpeg",
"video/x-mpeg",
"video/x-mpeq2a",
"audio/mpeg3",
"audio/x-mpeg-3",
"video/mpeg",
"video/x-mpeg",
"audio/mpeg",
"video/mpeg",
"application/x-project",
"video/mpeg",
"video/mpeg",
"audio/mpeg",
"video/mpeg",
"audio/mpeg",
"application/vnd.ms-project",
"application/x-project",
"application/x-project",
"application/x-project",
"application/marc",
"application/x-troff-ms",
"video/x-sgi-movie",
"audio/make",
"application/x-vnd.audioexplosion.mzz",
"image/naplps",
"image/naplps",
"application/x-netcdf",
"application/vnd.nokia.configuration-message",
"image/x-niff",
"image/x-niff",
"application/x-mix-transfer",
"application/x-conference",
"application/x-navidoc",
"application/octet-stream",
"application/oda",
"application/x-omc",
"application/x-omcdatamaker",
"application/x-omcregerator",
"text/x-pascal",
"application/pkcs10",
"application/x-pkcs10",
"application/pkcs-12",
"application/x-pkcs12",
"application/x-pkcs7-signature",
"application/pkcs7-mime",
"application/x-pkcs7-mime",
"application/pkcs7-mime",
"application/x-pkcs7-mime",
"application/x-pkcs7-certreqresp",
"application/pkcs7-signature",
"application/pro_eng",
"text/pascal",
"image/x-portable-bitmap",
"application/vnd.hp-pcl",
"application/x-pcl",
"image/x-pict",
"image/x-pcx",
"chemical/x-pdb",
"application/pdf",
"audio/make",
"audio/make.my.funk",
"image/x-portable-graymap",
"image/x-portable-greymap",
"image/pict",
"image/pict",
"application/x-newton-compatible-pkg",
"application/vnd.ms-pki.pko",
"text/plain",
"text/x-script.perl",
"application/x-pixclscript",
"image/x-xpixmap",
"text/x-script.perl-module",
"application/x-pagemaker",
"application/x-pagemaker",
"image/png",
"application/x-portable-anymap",
"image/x-portable-anymap",
"application/mspowerpoint",
"application/vnd.ms-powerpoint",
"model/x-pov",
"application/vnd.ms-powerpoint",
"image/x-portable-pixmap",
"application/mspowerpoint",
"application/vnd.ms-powerpoint",
"application/mspowerpoint",
"application/powerpoint",
"application/vnd.ms-powerpoint",
"application/x-mspowerpoint",
"application/mspowerpoint",
"application/x-freelance",
"application/pro_eng",
"application/postscript",
"application/octet-stream",
"paleovu/x-pv",
"application/vnd.ms-powerpoint",
"text/x-script.phyton",
"application/x-bytecode.python",
"audio/vnd.qcelp",
"x-world/x-3dmf",
"x-world/x-3dmf",
"image/x-quicktime",
"video/quicktime",
"video/x-qtc",
"image/x-quicktime",
"image/x-quicktime",
"audio/x-pn-realaudio",
"audio/x-pn-realaudio-plugin",
"audio/x-realaudio",
"audio/x-pn-realaudio",
"application/x-cmu-raster",
"image/cmu-raster",
"image/x-cmu-raster",
"image/cmu-raster",
"text/x-script.rexx",
"image/vnd.rn-realflash",
"image/x-rgb",
"application/vnd.rn-realmedia",
"audio/x-pn-realaudio",
"audio/mid",
"audio/x-pn-realaudio",
"audio/x-pn-realaudio",
"audio/x-pn-realaudio-plugin",
"application/ringing-tones",
"application/vnd.nokia.ringing-tone",
"application/vnd.rn-realplayer",
"application/x-troff",
"image/vnd.rn-realpix",
"audio/x-pn-realaudio-plugin",
"text/richtext",
"text/vnd.rn-realtext",
"application/rtf",
"application/x-rtf",
"text/richtext",
"application/rtf",
"text/richtext",
"video/vnd.rn-realvideo",
"text/x-asm",
"audio/s3m",
"application/octet-stream",
"application/x-tbook",
"application/x-lotusscreencam",
"text/x-script.guile",
"text/x-script.scheme",
"video/x-scm",
"text/plain",
"application/sdp",
"application/x-sdp",
"application/sounder",
"application/sea",
"application/x-sea",
"application/set",
"text/sgml",
"text/x-sgml",
"text/sgml",
"text/x-sgml",
"application/x-bsh",
"application/x-sh",
"application/x-shar",
"text/x-script.sh",
"application/x-bsh",
"application/x-shar",
"text/html",
"text/x-server-parsed-html",
"audio/x-psid",
"application/x-sit",
"application/x-stuffit",
"application/x-koan",
"application/x-koan",
"application/x-koan",
"application/x-koan",
"application/x-seelogo",
"application/smil",
"application/smil",
"audio/basic",
"audio/x-adpcm",
"application/solids",
"application/x-pkcs7-certificates",
"text/x-speech",
"application/futuresplash",
"application/x-sprite",
"application/x-sprite",
"application/x-wais-source",
"text/x-server-parsed-html",
"application/streamingmedia",
"application/vnd.ms-pki.certstore",
"application/step",
"application/sla",
"application/vnd.ms-pki.stl",
"application/x-navistyle",
"application/step",
"application/x-sv4cpio",
"application/x-sv4crc",
"image/vnd.dwg",
"image/x-dwg",
"application/x-world",
"x-world/x-svr",
"application/x-shockwave-flash",
"application/x-troff",
"text/x-speech",
"application/x-tar",
"application/toolbook",
"application/x-tbook",
"application/x-tcl",
"text/x-script.tcl",
"text/x-script.tcsh",
"application/x-tex",
"application/x-texinfo",
"application/x-texinfo",
"application/plain",
"text/plain",
"application/gnutar",
"application/x-compressed",
"image/tiff",
"image/x-tiff",
"image/tiff",
"image/x-tiff",
"application/x-troff",
"audio/tsp-audio",
"application/dsptype",
"audio/tsplayer",
"text/tab-separated-values",
"image/florian",
"text/plain",
"text/x-uil",
"text/uri-list",
"text/uri-list",
"application/i-deas",
"text/uri-list",
"text/uri-list",
"application/x-ustar",
"multipart/x-ustar",
"application/octet-stream",
"text/x-uuencode",
"text/x-uuencode",
"application/x-cdlink",
"text/x-vcalendar",
"application/vda",
"video/vdo",
"application/groupwise",
"video/vivo",
"video/vnd.vivo",
"video/vivo",
"video/vnd.vivo",
"application/vocaltec-media-desc",
"application/vocaltec-media-file",
"audio/voc",
"audio/x-voc",
"video/vosaic",
"audio/voxware",
"audio/x-twinvq-plugin",
"audio/x-twinvq",
"audio/x-twinvq-plugin",
"application/x-vrml",
"model/vrml",
"x-world/x-vrml",
"x-world/x-vrt",
"application/x-visio",
"application/x-visio",
"application/x-visio",
"application/wordperfect6.0",
"application/wordperfect6.1",
"application/msword",
"audio/wav",
"audio/x-wav",
"application/x-qpro",
"image/vnd.wap.wbmp",
"application/vnd.xara",
"application/msword",
"application/x-123",
"windows/metafile",
"text/vnd.wap.wml",
"application/vnd.wap.wmlc",
"text/vnd.wap.wmlscript",
"application/vnd.wap.wmlscriptc",
"application/msword",
"application/wordperfect",
"application/wordperfect",
"application/wordperfect6.0",
"application/wordperfect",
"application/wordperfect",
"application/x-wpwin",
"application/x-lotus",
"application/mswrite",
"application/x-wri",
"application/x-world",
"model/vrml",
"x-world/x-vrml",
"model/vrml",
"x-world/x-vrml",
"text/scriplet",
"application/x-wais-source",
"application/x-wintalk",
"image/x-xbitmap",
"image/x-xbm",
"image/xbm",
"video/x-amt-demorun",
"xgl/drawing",
"image/vnd.xiff",
"application/excel",
"application/excel",
"application/x-excel",
"application/x-msexcel",
"application/excel",
"application/vnd.ms-excel",
"application/x-excel",
"application/excel",
"application/vnd.ms-excel",
"application/x-excel",
"application/excel",
"application/x-excel",
"application/excel",
"application/x-excel",
"application/excel",
"application/vnd.ms-excel",
"application/x-excel",
"application/excel",
"application/vnd.ms-excel",
"application/x-excel",
"application/excel",
"application/vnd.ms-excel",
"application/x-excel",
"application/x-msexcel",
"application/excel",
"application/x-excel",
"application/excel",
"application/x-excel",
"application/excel",
"application/vnd.ms-excel",
"application/x-excel",
"application/x-msexcel",
"audio/xm",
"application/xml",
"text/xml",
"xgl/movie",
"application/x-vnd.ls-xpix",
"image/x-xpixmap",
"image/xpm",
"image/png",
"video/x-amt-showrun",
"image/x-xwd",
"image/x-xwindowdump",
"chemical/x-pdb",
"application/x-compress",
"application/x-compressed",
"application/x-compressed",
"application/x-zip-compressed",
"application/zip",
"multipart/x-zip",
"application/octet-stream",
"text/x-script.zsh",
];
#[allow(dead_code)]
pub static EXTENSION: &[&str] = &[
"doc",
"docx",
"log",
"msg",
"odt",
"pages",
"rtf",
"tex",
"txt",
"wpd",
"wps",
"csv",
"dat",
"gbr",
"ged",
"key",
"keychain",
"pps",
"ppt",
"pptx",
"sdf",
"tar",
"vcf",
"xml",
"aif",
"iff",
"mid",
"mpa",
"ra",
"wav",
"wma",
"asf",
"asx",
"avi",
"flv",
"mov",
"mpg",
"rm",
"srt",
"swf",
"vob",
"wmv",
"max",
"obj",
"bmp",
"dds",
"gif",
"jpg",
"png",
"psd",
"pspimage",
"tga",
"thm",
"tif",
"tiff",
"yuv",
"ai",
"eps",
"ps",
"svg",
"indd",
"pct",
"pdf",
"xlr",
"xls",
"xlsx",
"accdb",
"db",
"dbf",
"mdb",
"pdb",
"sql",
"apk",
"app",
"bat",
"cgi",
"com",
"exe",
"gadget",
"jar",
"pif",
"vb",
"wsf",
"dem",
"gam",
"nes",
"rom",
"sav",
"dwg",
"dxf",
"gpx",
"kml",
"kmz",
"asp",
"aspx",
"cer",
"cfm",
"csr",
"css",
"htm",
"html",
"js",
"jsp",
"php",
"rss",
"xhtml",
"crx",
"plugin",
"fnt",
"fon",
"otf",
"ttf",
"cab",
"cpl",
"cur",
"deskthemepack",
"dll",
"dmp",
"drv",
"icns",
"ico",
"lnk",
"sys",
"cfg",
"ini",
"prf",
"hqx",
"mim",
"uue",
"cbr",
"deb",
"gz",
"pkg",
"rar",
"rpm",
"sitx",
"gz",
"zip",
"zipx",
"bin",
"cue",
"dmg",
"iso",
"mdf",
"toast",
"vcd",
"class",
"cpp",
"cs",
"dtd",
"fla",
"java",
"lua",
"pl",
"py",
"sh",
"sln",
"swift",
"vcxproj",
"xcodeproj",
"bak",
"tmp",
"crdownload",
"ics",
"msi",
"part",
"torrent",
];
| 22.314496 | 60 | 0.575259 |
67dab47be80e6485088211e222cd1578a56dd45d
| 1,912 |
#[macro_export]
macro_rules! assert_query {
($runner:expr, $q:expr, $result:expr) => {
let result = $runner.query($q).await?;
assert_eq!(result.to_string(), $result);
};
}
#[macro_export]
macro_rules! assert_query_many {
($runner:expr, $q:expr, $potential_results:expr) => {
let query_result = $runner.query($q).await?.to_string();
assert_eq!(
$potential_results.contains(&query_result.as_str()),
true,
"Query result: {} is not part of the expected results: {:?}",
query_result,
$potential_results
);
};
}
#[macro_export]
macro_rules! is_one_of {
($result:expr, $potential_results:expr) => {
assert_eq!(
$potential_results.contains(&$result.as_str()),
true,
"Query result: {} is not part of the expected results: {:?}",
$result,
$potential_results
);
};
}
#[macro_export]
macro_rules! run_query {
($runner:expr, $q:expr) => {{
let res = $runner.query($q).await?;
res.assert_success();
res.to_string()
}};
}
#[macro_export]
macro_rules! run_query_json {
($runner:expr, $q:expr) => {
serde_json::from_str::<serde_json::Value>($runner.query($q).await?.to_string().as_str()).unwrap()
};
($runner:expr, $q:expr, $path: expr) => {
query_tests_setup::walk_json(
&serde_json::from_str::<serde_json::Value>($runner.query($q).await?.to_string().as_str()).unwrap(),
$path,
)
.unwrap()
.to_owned()
};
}
#[macro_export]
macro_rules! assert_error {
($runner:expr, $q:expr, $code:expr) => {
$runner.query($q).await?.assert_failure($code, None);
};
($runner:expr, $q:expr, $code:expr, $msg:expr) => {
$runner.query($q).await?.assert_failure($code, Some($msg.to_string()));
};
}
| 27.314286 | 111 | 0.560669 |
ebb5496482bc2f454af8ad445cff45a3b783931e
| 4,891 |
// Bitcoin Pro: Professional bitcoin accounts & assets management
// Written in 2020-2021 by
// Dr. Maxim Orlovsky <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the MIT License
// along with this software.
// If not, see <https://opensource.org/licenses/MIT>.
use gtk::prelude::GtkListStoreExtManual;
use std::cell::RefCell;
use std::collections::HashSet;
use std::ops::DerefMut;
use std::rc::Rc;
use bitcoin::Script;
use electrum_client::{
Client as ElectrumClient, ElectrumApi, Error as ElectrumError,
};
use wallet::bip32::{ChildIndex, UnhardenedIndex};
use wallet::descriptor;
use crate::model::{DescriptorAccount, UtxoEntry};
use crate::util::resolver_mode::ResolverModeType;
#[derive(Clone, PartialEq, Eq, Debug, Display, From, Error)]
#[display(doc_comments)]
pub enum Error {
/// Electrum error
#[display("{0}")]
#[from]
Electrum(String),
/// The actual value of the used index corresponds to a hardened index,
/// which can't be used in the current context
HardenedIndex,
/// Unable to generate key with index {0} for descriptor {1}: {2}
Descriptor(u32, String, descriptor::Error),
}
impl From<ElectrumError> for Error {
fn from(err: ElectrumError) -> Self {
Error::Electrum(format!("{:?}", err))
}
}
pub trait UtxoLookup {
fn utxo_lookup(
&self,
resolver: ElectrumClient,
lookup_type: ResolverModeType,
account: DescriptorAccount,
utxo_set: Rc<RefCell<HashSet<UtxoEntry>>>,
uxto_store: Option<>k::ListStore>,
) -> Result<usize, Error> {
struct LookupItem<'a> {
pub script_pubkey: Script,
pub descriptor_type: descriptor::Category,
pub descriptor_content: &'a descriptor::Template,
pub derivation_index: u32,
}
let mut total_found = 0usize;
let mut lookup_iter = lookup_type.into_iter();
loop {
let mut lookup: Vec<LookupItem> = Vec::with_capacity(
lookup_type.count() as usize
* account.pubkey_scripts_count() as usize,
);
for offset in lookup_iter.by_ref() {
let scripts = account
.pubkey_scripts(
UnhardenedIndex::from_index(offset)
.map_err(|_| Error::HardenedIndex)?,
)
.map_err(|err| {
Error::Descriptor(offset, account.descriptor(), err)
})?;
lookup.extend(scripts.into_iter().map(
|(descriptor_type, script_pubkey)| LookupItem {
script_pubkey,
descriptor_type,
descriptor_content: &account.generator.template,
derivation_index: offset,
},
));
}
let mut found = 0usize;
let request: Vec<_> = lookup
.iter()
.map(|item| item.script_pubkey.clone())
.collect();
println!("Requesting lookup for: {:#?}", request);
let response =
resolver.batch_script_list_unspent(request.iter())?;
println!("Response:\n{:#?}", response);
for utxo in response
.into_iter()
.zip(lookup)
.map(|(list, item)| {
list.into_iter().map(move |res| {
UtxoEntry::with(
&res,
item.descriptor_content.clone(),
item.descriptor_type,
item.derivation_index,
)
})
})
.flatten()
{
found += 1;
if utxo_set.borrow_mut().deref_mut().insert(utxo.clone()) {
if let Some(utxo_store) = uxto_store {
utxo_store.insert_with_values(
None,
&[0, 1, 2, 3],
&[
&utxo.outpoint.txid.to_string(),
&utxo.outpoint.vout,
&utxo.amount,
&utxo.height,
],
);
}
}
}
total_found += found;
if !lookup_type.is_while() || found == 0 {
break;
}
}
Ok(total_found)
}
}
| 34.687943 | 76 | 0.506236 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.